vendor: bump to Kube 1.9/master

Signed-off-by: Antonio Murdaca <runcom@redhat.com>
This commit is contained in:
Antonio Murdaca 2017-11-13 11:33:25 +01:00
parent 7076c73172
commit 7a675ccd92
No known key found for this signature in database
GPG key ID: B2BEAD150DE936B9
202 changed files with 8543 additions and 7270 deletions

View file

@ -4,7 +4,8 @@
git: git:
repo: "https://github.com/runcom/kubernetes.git" repo: "https://github.com/runcom/kubernetes.git"
dest: "{{ ansible_env.GOPATH }}/src/k8s.io/kubernetes" dest: "{{ ansible_env.GOPATH }}/src/k8s.io/kubernetes"
version: "cri-o-patched-1.8" # based on kube v1.9.0-alpha.2, update as needed
version: "cri-o-patched-1.9"
force: "{{ force_clone | default(False) | bool}}" force: "{{ force_clone | default(False) | bool}}"
- name: install etcd - name: install etcd

View file

@ -1,9 +1,14 @@
--- ---
- name: ensure Golang dir is empty first
file:
path: /usr/local/go
state: absent
- name: fetch Golang - name: fetch Golang
unarchive: unarchive:
remote_src: yes remote_src: yes
src: https://storage.googleapis.com/golang/go1.8.4.linux-amd64.tar.gz src: "https://storage.googleapis.com/golang/go{{ version }}.linux-amd64.tar.gz"
dest: /usr/local dest: /usr/local
- name: link go toolchain - name: link go toolchain

View file

@ -10,6 +10,8 @@
- name: install Golang tools - name: install Golang tools
include: golang.yml include: golang.yml
vars:
version: "1.8.4"
- name: clone build and install bats - name: clone build and install bats
include: "build/bats.yml" include: "build/bats.yml"
@ -54,6 +56,10 @@
tags: tags:
- e2e - e2e
tasks: tasks:
- name: install Golang tools
include: golang.yml
vars:
version: "1.9.2"
- name: clone build and install kubernetes - name: clone build and install kubernetes
include: "build/kubernetes.yml" include: "build/kubernetes.yml"
vars: vars:

View file

@ -1,11 +1,11 @@
k8s.io/kubernetes v1.8.1 https://github.com/kubernetes/kubernetes k8s.io/kubernetes v1.9.0-alpha.2 https://github.com/kubernetes/kubernetes
k8s.io/client-go release-5.0 https://github.com/kubernetes/client-go k8s.io/client-go master https://github.com/kubernetes/client-go
k8s.io/apimachinery release-1.8 https://github.com/kubernetes/apimachinery k8s.io/apimachinery master https://github.com/kubernetes/apimachinery
k8s.io/apiserver release-1.8 https://github.com/kubernetes/apiserver k8s.io/apiserver master https://github.com/kubernetes/apiserver
k8s.io/utils 4fe312863be2155a7b68acd2aff1c9221b24e68c https://github.com/kubernetes/utils k8s.io/utils 4fe312863be2155a7b68acd2aff1c9221b24e68c https://github.com/kubernetes/utils
k8s.io/api release-1.8 https://github.com/kubernetes/api k8s.io/api master https://github.com/kubernetes/api
k8s.io/kube-openapi abfc5fbe1cf87ee697db107fdfd24c32fe4397a8 https://github.com/kubernetes/kube-openapi k8s.io/kube-openapi abfc5fbe1cf87ee697db107fdfd24c32fe4397a8 https://github.com/kubernetes/kube-openapi
k8s.io/apiextensions-apiserver release-1.8 https://github.com/kubernetes/apiextensions-apiserver k8s.io/apiextensions-apiserver master https://github.com/kubernetes/apiextensions-apiserver
# #
github.com/googleapis/gnostic 0c5108395e2debce0d731cf0287ddf7242066aba github.com/googleapis/gnostic 0c5108395e2debce0d731cf0287ddf7242066aba
github.com/gregjones/httpcache 787624de3eb7bd915c329cba748687a3b22666a6 github.com/gregjones/httpcache 787624de3eb7bd915c329cba748687a3b22666a6

View file

@ -1,119 +0,0 @@
## Description
This library provides basic building blocks for building advanced console UIs.
Initially created for [Gor](http://github.com/buger/gor).
Full API documentation: http://godoc.org/github.com/buger/goterm
## Basic usage
Full screen console app, printing current time:
```go
import (
tm "github.com/buger/goterm"
"time"
)
func main() {
tm.Clear() // Clear current screen
for {
// By moving cursor to top-left position we ensure that console output
// will be overwritten each time, instead of adding new.
tm.MoveCursor(1,1)
tm.Println("Current Time:", time.Now().Format(time.RFC1123))
tm.Flush() // Call it every time at the end of rendering
time.Sleep(time.Second)
}
}
```
This can be seen in [examples/time_example.go](examples/time_example.go). To
run it yourself, go into your `$GOPATH/src/github.com/buger/goterm` directory
and run `go run ./examples/time_example.go`
Print red bold message on white background:
```go
tm.Println(tm.Background(tm.Color(tm.Bold("Important header"), tm.RED), tm.WHITE))
```
Create box and move it to center of the screen:
```go
tm.Clear()
// Create Box with 30% width of current screen, and height of 20 lines
box := tm.NewBox(30|tm.PCT, 20, 0)
// Add some content to the box
// Note that you can add ANY content, even tables
fmt.Fprint(box, "Some box content")
// Move Box to approx center of the screen
tm.Print(tm.MoveTo(box.String(), 40|tm.PCT, 40|tm.PCT))
tm.Flush()
```
This can be found in [examples/box_example.go](examples/box_example.go).
Draw table:
```go
// Based on http://golang.org/pkg/text/tabwriter
totals := tm.NewTable(0, 10, 5, ' ', 0)
fmt.Fprintf(totals, "Time\tStarted\tActive\tFinished\n")
fmt.Fprintf(totals, "%s\t%d\t%d\t%d\n", "All", started, started-finished, finished)
tm.Println(totals)
tm.Flush()
```
This can be found in [examples/table_example.go](examples/table_example.go).
## Line charts
Chart example:
![screen shot 2013-07-09 at 5 05 37 pm](https://f.cloud.github.com/assets/14009/767676/e3dd35aa-e887-11e2-9cd2-f6451eb26adc.png)
```go
import (
tm "github.com/buger/goterm"
)
chart := tm.NewLineChart(100, 20)
data := new(tm.DataTable)
data.addColumn("Time")
data.addColumn("Sin(x)")
data.addColumn("Cos(x+1)")
for i := 0.1; i < 10; i += 0.1 {
data.addRow(i, math.Sin(i), math.Cos(i+1))
}
tm.Println(chart.Draw(data))
```
This can be found in [examples/chart_example.go](examples/chart_example.go).
Drawing 2 separate graphs in different scales. Each graph have its own Y axe.
```go
chart.Flags = tm.DRAW_INDEPENDENT
```
Drawing graph with relative scale (Grapwh draw starting from min value instead of zero)
```go
chart.Flags = tm.DRAW_RELATIVE
```

122
vendor/github.com/buger/goterm/box.go generated vendored
View file

@ -1,122 +0,0 @@
package goterm
import (
"bytes"
"strings"
)
const DEFAULT_BORDER = "- │ ┌ ┐ └ ┘"
// Box allows you to create independent parts of screen, with its own buffer and borders.
// Can be used for creating modal windows
//
// Generates boxes likes this:
// ┌--------┐
// │hello │
// │world │
// │ │
// └--------┘
//
type Box struct {
Buf *bytes.Buffer
Width int
Height int
// To get even padding: PaddingX ~= PaddingY*4
PaddingX int
PaddingY int
// Should contain 6 border pieces separated by spaces
//
// Example border:
// "- │ ┌ ┐ └ ┘"
Border string
Flags int // Not used now
}
// Create new Box.
// Width and height can be relative:
//
// // Create box with 50% with of current screen and 10 lines height
// box := tm.NewBox(50|tm.PCT, 10, 0)
//
func NewBox(width, height int, flags int) *Box {
width, height = GetXY(width, height)
box := new(Box)
box.Buf = new(bytes.Buffer)
box.Width = width
box.Height = height
box.Border = DEFAULT_BORDER
box.PaddingX = 1
box.PaddingY = 0
box.Flags = flags
return box
}
func (b *Box) Write(p []byte) (int, error) {
return b.Buf.Write(p)
}
// Render Box
func (b *Box) String() (out string) {
borders := strings.Split(b.Border, " ")
lines := strings.Split(b.Buf.String(), "\n")
// Border + padding
prefix := borders[1] + strings.Repeat(" ", b.PaddingX)
suffix := strings.Repeat(" ", b.PaddingX) + borders[1]
offset := b.PaddingY + 1 // 1 is border width
// Content width without borders and padding
contentWidth := b.Width - (b.PaddingX+1)*2
for y := 0; y < b.Height; y++ {
var line string
switch {
// Draw borders for first line
case y == 0:
line = borders[2] + strings.Repeat(borders[0], b.Width-2) + borders[3]
// Draw borders for last line
case y == (b.Height - 1):
line = borders[4] + strings.Repeat(borders[0], b.Width-2) + borders[5]
// Draw top and bottom padding
case y <= b.PaddingY || y >= (b.Height-b.PaddingY):
line = borders[1] + strings.Repeat(" ", b.Width-2) + borders[1]
// Render content
default:
if len(lines) > y-offset {
line = lines[y-offset]
} else {
line = ""
}
if len(line) > contentWidth-1 {
// If line is too large limit it
line = line[0:contentWidth]
} else {
// If line is too small enlarge it by adding spaces
line = line + strings.Repeat(" ", contentWidth-len(line))
}
line = prefix + line + suffix
}
// Don't add newline for last element
if y != b.Height-1 {
line = line + "\n"
}
out += line
}
return out
}

View file

@ -1,328 +0,0 @@
package goterm
import (
"fmt"
"math"
"strings"
)
const (
AXIS_LEFT = iota
AXIS_RIGHT
)
const (
DRAW_INDEPENDENT = 1 << iota
DRAW_RELATIVE
)
type DataTable struct {
columns []string
rows [][]float64
}
func (d *DataTable) AddColumn(name string) {
d.columns = append(d.columns, name)
}
func (d *DataTable) AddRow(elms ...float64) {
d.rows = append(d.rows, elms)
}
type Chart interface {
Draw(data DataTable, flags int) string
}
type LineChart struct {
Buf []string
chartBuf []string
data *DataTable
Width int
Height int
chartHeight int
chartWidth int
paddingX int
paddingY int
Flags int
}
func genBuf(size int) []string {
buf := make([]string, size)
for i := 0; i < size; i++ {
buf[i] = " "
}
return buf
}
// Format float
func ff(num interface{}) string {
return fmt.Sprintf("%.1f", num)
}
func NewLineChart(width, height int) *LineChart {
chart := new(LineChart)
chart.Width = width
chart.Height = height
chart.Buf = genBuf(width * height)
// axis lines + axies text
chart.paddingY = 2
return chart
}
func (c *LineChart) DrawAxes(maxX, minX, maxY, minY float64, index int) {
side := AXIS_LEFT
if c.Flags&DRAW_INDEPENDENT != 0 {
if index%2 == 0 {
side = AXIS_RIGHT
}
c.DrawLine(c.paddingX-1, 1, c.Width-c.paddingX, 1, "-")
} else {
c.DrawLine(c.paddingX-1, 1, c.Width-1, 1, "-")
}
if side == AXIS_LEFT {
c.DrawLine(c.paddingX-1, 1, c.paddingX-1, c.Height-1, "│")
} else {
c.DrawLine(c.Width-c.paddingX, 1, c.Width-c.paddingX, c.Height-1, "│")
}
left := 0
if side == AXIS_RIGHT {
left = c.Width - c.paddingX + 1
}
if c.Flags&DRAW_RELATIVE != 0 {
c.writeText(ff(minY), left, 1)
} else {
if minY > 0 {
c.writeText("0", left, 1)
} else {
c.writeText(ff(minY), left, 1)
}
}
c.writeText(ff(maxY), left, c.Height-1)
c.writeText(ff(minX), c.paddingX, 0)
x_col := c.data.columns[0]
c.writeText(c.data.columns[0], c.Width/2-len(x_col)/2, 1)
if c.Flags&DRAW_INDEPENDENT != 0 || len(c.data.columns) < 3 {
col := c.data.columns[index]
for idx, char := range strings.Split(col, "") {
start_from := c.Height/2 + len(col)/2 - idx
if side == AXIS_LEFT {
c.writeText(char, c.paddingX-1, start_from)
} else {
c.writeText(char, c.Width-c.paddingX, start_from)
}
}
}
if c.Flags&DRAW_INDEPENDENT != 0 {
c.writeText(ff(maxX), c.Width-c.paddingX-len(ff(maxX)), 0)
} else {
c.writeText(ff(maxX), c.Width-len(ff(maxX)), 0)
}
}
func (c *LineChart) writeText(text string, x, y int) {
coord := y*c.Width + x
for idx, char := range strings.Split(text, "") {
c.Buf[coord+idx] = char
}
}
func (c *LineChart) Draw(data *DataTable) (out string) {
var scaleY, scaleX float64
c.data = data
if c.Flags&DRAW_INDEPENDENT != 0 && len(data.columns) > 3 {
fmt.Println("Error: Can't use DRAW_INDEPENDENT for more then 2 graphs")
return ""
}
charts := len(data.columns) - 1
prevPoint := [2]int{-1, -1}
maxX, minX, maxY, minY := getBoundaryValues(data, -1)
c.paddingX = int(math.Max(float64(len(ff(minY))), float64(len(ff(maxY))))) + 1
c.chartHeight = c.Height - c.paddingY
if c.Flags&DRAW_INDEPENDENT != 0 {
c.chartWidth = c.Width - 2*c.paddingX
} else {
c.chartWidth = c.Width - c.paddingX - 1
}
scaleX = float64(c.chartWidth) / (maxX - minX)
if c.Flags&DRAW_RELATIVE != 0 || minY < 0 {
scaleY = float64(c.chartHeight) / (maxY - minY)
} else {
scaleY = float64(c.chartHeight) / maxY
}
for i := 1; i < charts+1; i++ {
if c.Flags&DRAW_INDEPENDENT != 0 {
maxX, minX, maxY, minY = getBoundaryValues(data, i)
scaleX = float64(c.chartWidth-1) / (maxX - minX)
scaleY = float64(c.chartHeight) / maxY
if c.Flags&DRAW_RELATIVE != 0 || minY < 0 {
scaleY = float64(c.chartHeight) / (maxY - minY)
}
}
symbol := Color("•", i)
c_data := getChartData(data, i)
for _, point := range c_data {
x := int((point[0]-minX)*scaleX) + c.paddingX
y := int((point[1])*scaleY) + c.paddingY
if c.Flags&DRAW_RELATIVE != 0 || minY < 0 {
y = int((point[1]-minY)*scaleY) + c.paddingY
}
if prevPoint[0] == -1 {
prevPoint[0] = x
prevPoint[1] = y
}
if prevPoint[0] <= x {
c.DrawLine(prevPoint[0], prevPoint[1], x, y, symbol)
}
prevPoint[0] = x
prevPoint[1] = y
}
c.DrawAxes(maxX, minX, maxY, minY, i)
}
for row := c.Height - 1; row >= 0; row-- {
out += strings.Join(c.Buf[row*c.Width:(row+1)*c.Width], "") + "\n"
}
return
}
func (c *LineChart) DrawLine(x0, y0, x1, y1 int, symbol string) {
drawLine(x0, y0, x1, y1, func(x, y int) {
coord := y*c.Width + x
if coord > 0 && coord < len(c.Buf) {
c.Buf[coord] = symbol
}
})
}
func getBoundaryValues(data *DataTable, index int) (maxX, minX, maxY, minY float64) {
maxX = data.rows[0][0]
minX = data.rows[0][0]
maxY = data.rows[0][1]
minY = data.rows[0][1]
for _, r := range data.rows {
maxX = math.Max(maxX, r[0])
minX = math.Min(minX, r[0])
for idx, c := range r {
if idx > 0 {
if index == -1 || index == idx {
maxY = math.Max(maxY, c)
minY = math.Min(minY, c)
}
}
}
}
if maxY > 0 {
maxY = maxY * 1.1
} else {
maxY = maxY * 0.9
}
if minY > 0 {
minY = minY * 0.9
} else {
minY = minY * 1.1
}
return
}
// DataTable can contain data for multiple graphs, we need to extract only 1
func getChartData(data *DataTable, index int) (out [][]float64) {
for _, r := range data.rows {
out = append(out, []float64{r[0], r[index]})
}
return
}
// Algorithm for drawing line between two points
//
// http://en.wikipedia.org/wiki/Bresenham's_line_algorithm
func drawLine(x0, y0, x1, y1 int, plot func(int, int)) {
dx := x1 - x0
if dx < 0 {
dx = -dx
}
dy := y1 - y0
if dy < 0 {
dy = -dy
}
var sx, sy int
if x0 < x1 {
sx = 1
} else {
sx = -1
}
if y0 < y1 {
sy = 1
} else {
sy = -1
}
err := dx - dy
for {
plot(x0, y0)
if x0 == x1 && y0 == y1 {
break
}
e2 := 2 * err
if e2 > -dy {
err -= dy
x0 += sx
}
if e2 < dx {
err += dx
y0 += sy
}
}
}

View file

@ -1,34 +0,0 @@
package goterm
import (
"bytes"
"text/tabwriter"
)
// Tabwriter with own buffer:
//
// totals := tm.NewTable(0, 10, 5, ' ', 0)
// fmt.Fprintf(totals, "Time\tStarted\tActive\tFinished\n")
// fmt.Fprintf(totals, "%s\t%d\t%d\t%d\n", "All", started, started-finished, finished)
// tm.Println(totals)
//
// Based on http://golang.org/pkg/text/tabwriter
type Table struct {
tabwriter.Writer
Buf *bytes.Buffer
}
// Same as here http://golang.org/pkg/text/tabwriter/#Writer.Init
func NewTable(minwidth, tabwidth, padding int, padchar byte, flags uint) *Table {
tbl := new(Table)
tbl.Buf = new(bytes.Buffer)
tbl.Init(tbl.Buf, minwidth, tabwidth, padding, padchar, flags)
return tbl
}
func (t *Table) String() string {
t.Flush()
return t.Buf.String()
}

View file

@ -1,258 +0,0 @@
// Provides basic bulding blocks for advanced console UI
//
// Coordinate system:
//
// 1/1---X---->
// |
// Y
// |
// v
//
// Documentation for ANSI codes: http://en.wikipedia.org/wiki/ANSI_escape_code#Colors
//
// Inspired by: http://www.darkcoding.net/software/pretty-command-line-console-output-on-unix-in-python-and-go-lang/
package goterm
import (
"bufio"
"bytes"
"fmt"
"os"
"strings"
)
// Reset all custom styles
const RESET = "\033[0m"
// Reset to default color
const RESET_COLOR = "\033[32m"
// Return curor to start of line and clean it
const RESET_LINE = "\r\033[K"
// List of possible colors
const (
BLACK = iota
RED
GREEN
YELLOW
BLUE
MAGENTA
CYAN
WHITE
)
var Output *bufio.Writer = bufio.NewWriter(os.Stdout)
func getColor(code int) string {
return fmt.Sprintf("\033[3%dm", code)
}
func getBgColor(code int) string {
return fmt.Sprintf("\033[4%dm", code)
}
// Set percent flag: num | PCT
//
// Check percent flag: num & PCT
//
// Reset percent flag: num & 0xFF
const shift = uint(^uint(0)>>63) << 4
const PCT = 0x8000 << shift
type winsize struct {
Row uint16
Col uint16
Xpixel uint16
Ypixel uint16
}
// Global screen buffer
// Its not recommented write to buffer dirrectly, use package Print,Printf,Println fucntions instead.
var Screen *bytes.Buffer = new(bytes.Buffer)
// Get relative or absolute coorditantes
// To get relative, set PCT flag to number:
//
// // Get 10% of total width to `x` and 20 to y
// x, y = tm.GetXY(10|tm.PCT, 20)
//
func GetXY(x int, y int) (int, int) {
if y == -1 {
y = CurrentHeight() + 1
}
if x&PCT != 0 {
x = int((x & 0xFF) * Width() / 100)
}
if y&PCT != 0 {
y = int((y & 0xFF) * Height() / 100)
}
return x, y
}
type sf func(int, string) string
// Apply given transformation func for each line in string
func applyTransform(str string, transform sf) (out string) {
out = ""
for idx, line := range strings.Split(str, "\n") {
out += transform(idx, line)
}
return
}
// Clear screen
func Clear() {
Output.WriteString("\033[2J")
}
// Move cursor to given position
func MoveCursor(x int, y int) {
fmt.Fprintf(Screen, "\033[%d;%dH", x, y)
}
// Move cursor up relative the current position
func MoveCursorUp(bias int) {
fmt.Fprintf(Screen, "\033[%dA", bias)
}
// Move cursor down relative the current position
func MoveCursorDown(bias int) {
fmt.Fprintf(Screen, "\033[%dB", bias)
}
// Move cursor forward relative the current position
func MoveCursorForward(bias int) {
fmt.Fprintf(Screen, "\033[%dC", bias)
}
// Move cursor backward relative the current position
func MoveCursorBackward(bias int) {
fmt.Fprintf(Screen, "\033[%dD", bias)
}
// Move string to possition
func MoveTo(str string, x int, y int) (out string) {
x, y = GetXY(x, y)
return applyTransform(str, func(idx int, line string) string {
return fmt.Sprintf("\033[%d;%dH%s", y+idx, x, line)
})
}
// Return carrier to start of line
func ResetLine(str string) (out string) {
return applyTransform(str, func(idx int, line string) string {
return fmt.Sprintf(RESET_LINE, line)
})
}
// Make bold
func Bold(str string) string {
return applyTransform(str, func(idx int, line string) string {
return fmt.Sprintf("\033[1m%s\033[0m", line)
})
}
// Apply given color to string:
//
// tm.Color("RED STRING", tm.RED)
//
func Color(str string, color int) string {
return applyTransform(str, func(idx int, line string) string {
return fmt.Sprintf("%s%s%s", getColor(color), line, RESET)
})
}
func Highlight(str, substr string, color int) string {
hiSubstr := Color(substr, color)
return strings.Replace(str, substr, hiSubstr, -1)
}
func HighlightRegion(str string, from, to, color int) string {
return str[:from] + Color(str[from:to], color) + str[to:]
}
// Change background color of string:
//
// tm.Background("string", tm.RED)
//
func Background(str string, color int) string {
return applyTransform(str, func(idx int, line string) string {
return fmt.Sprintf("%s%s%s", getBgColor(color), line, RESET)
})
}
// Get console width
func Width() int {
ws, err := getWinsize()
if err != nil {
return -1
}
return int(ws.Col)
}
// Get console height
func Height() int {
ws, err := getWinsize()
if err != nil {
return -1
}
return int(ws.Row)
}
// Get current height. Line count in Screen buffer.
func CurrentHeight() int {
return strings.Count(Screen.String(), "\n")
}
// Flush buffer and ensure that it will not overflow screen
func Flush() {
for idx, str := range strings.Split(Screen.String(), "\n") {
if idx > Height() {
return
}
Output.WriteString(str + "\n")
}
Output.Flush()
Screen.Reset()
}
func Print(a ...interface{}) (n int, err error) {
return fmt.Fprint(Screen, a...)
}
func Println(a ...interface{}) (n int, err error) {
return fmt.Fprintln(Screen, a...)
}
func Printf(format string, a ...interface{}) (n int, err error) {
return fmt.Fprintf(Screen, format, a...)
}
func Context(data string, idx, max int) string {
var start, end int
if len(data[:idx]) < (max / 2) {
start = 0
} else {
start = idx - max/2
}
if len(data)-idx < (max / 2) {
end = len(data) - 1
} else {
end = idx + max/2
}
return data[start:end]
}

View file

@ -1,12 +0,0 @@
// +build windows plan9 solaris
package goterm
func getWinsize() (*winsize, error) {
ws := new(winsize)
ws.Col = 80
ws.Row = 24
return ws, nil
}

View file

@ -1,36 +0,0 @@
// +build !windows,!plan9,!solaris
package goterm
import (
"fmt"
"os"
"runtime"
"syscall"
"unsafe"
)
func getWinsize() (*winsize, error) {
ws := new(winsize)
var _TIOCGWINSZ int64
switch runtime.GOOS {
case "linux":
_TIOCGWINSZ = 0x5413
case "darwin":
_TIOCGWINSZ = 1074295912
}
r1, _, errno := syscall.Syscall(syscall.SYS_IOCTL,
uintptr(syscall.Stdin),
uintptr(_TIOCGWINSZ),
uintptr(unsafe.Pointer(ws)),
)
if int(r1) == -1 {
fmt.Println("Error:", os.NewSyscallError("GetWinsize", errno))
return nil, os.NewSyscallError("GetWinsize", errno)
}
return ws, nil
}

View file

@ -1,86 +0,0 @@
package sysregistries
import (
"github.com/BurntSushi/toml"
"github.com/containers/image/types"
"io/ioutil"
"path/filepath"
)
// systemRegistriesConfPath is the path to the system-wide registry configuration file
// and is used to add/subtract potential registries for obtaining images.
// You can override this at build time with
// -ldflags '-X github.com/containers/image/sysregistries.systemRegistriesConfPath=$your_path'
var systemRegistriesConfPath = builtinRegistriesConfPath
// builtinRegistriesConfPath is the path to registry configuration file
// DO NOT change this, instead see systemRegistriesConfPath above.
const builtinRegistriesConfPath = "/etc/containers/registries.conf"
type registries struct {
Registries []string `toml:"registries"`
}
type tomlConfig struct {
Registries struct {
Search registries `toml:"search"`
Insecure registries `toml:"insecure"`
Block registries `toml:"block"`
} `toml:"registries"`
}
// Reads the global registry file from the filesystem. Returns
// a byte array
func readRegistryConf(ctx *types.SystemContext) ([]byte, error) {
dirPath := systemRegistriesConfPath
if ctx != nil {
if ctx.SystemRegistriesConfPath != "" {
dirPath = ctx.SystemRegistriesConfPath
} else if ctx.RootForImplicitAbsolutePaths != "" {
dirPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfPath)
}
}
configBytes, err := ioutil.ReadFile(dirPath)
return configBytes, err
}
// For mocking in unittests
var readConf = readRegistryConf
// Loads the registry configuration file from the filesystem and
// then unmarshals it. Returns the unmarshalled object.
func loadRegistryConf(ctx *types.SystemContext) (*tomlConfig, error) {
config := &tomlConfig{}
configBytes, err := readConf(ctx)
if err != nil {
return nil, err
}
err = toml.Unmarshal(configBytes, &config)
return config, err
}
// GetRegistries returns an array of strings that contain the names
// of the registries as defined in the system-wide
// registries file. it returns an empty array if none are
// defined
func GetRegistries(ctx *types.SystemContext) ([]string, error) {
config, err := loadRegistryConf(ctx)
if err != nil {
return nil, err
}
return config.Registries.Search.Registries, nil
}
// GetInsecureRegistries returns an array of strings that contain the names
// of the insecure registries as defined in the system-wide
// registries file. it returns an empty array if none are
// defined
func GetInsecureRegistries(ctx *types.SystemContext) ([]string, error) {
config, err := loadRegistryConf(ctx)
if err != nil {
return nil, err
}
return config.Registries.Insecure.Registries, nil
}

View file

@ -1,22 +0,0 @@
Copyright (c) 2017 Ernest Micklei
MIT License
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View file

@ -1,83 +0,0 @@
# go-restful-swagger12
[![Build Status](https://travis-ci.org/emicklei/go-restful-swagger12.png)](https://travis-ci.org/emicklei/go-restful-swagger12)
[![GoDoc](https://godoc.org/github.com/emicklei/go-restful-swagger12?status.svg)](https://godoc.org/github.com/emicklei/go-restful-swagger12)
How to use Swagger UI with go-restful
=
Get the Swagger UI sources (version 1.2 only)
git clone https://github.com/wordnik/swagger-ui.git
The project contains a "dist" folder.
Its contents has all the Swagger UI files you need.
The `index.html` has an `url` set to `http://petstore.swagger.wordnik.com/api/api-docs`.
You need to change that to match your WebService JSON endpoint e.g. `http://localhost:8080/apidocs.json`
Now, you can install the Swagger WebService for serving the Swagger specification in JSON.
config := swagger.Config{
WebServices: restful.RegisteredWebServices(),
ApiPath: "/apidocs.json",
SwaggerPath: "/apidocs/",
SwaggerFilePath: "/Users/emicklei/Projects/swagger-ui/dist"}
swagger.InstallSwaggerService(config)
Documenting Structs
--
Currently there are 2 ways to document your structs in the go-restful Swagger.
###### By using struct tags
- Use tag "description" to annotate a struct field with a description to show in the UI
- Use tag "modelDescription" to annotate the struct itself with a description to show in the UI. The tag can be added in an field of the struct and in case that there are multiple definition, they will be appended with an empty line.
###### By using the SwaggerDoc method
Here is an example with an `Address` struct and the documentation for each of the fields. The `""` is a special entry for **documenting the struct itself**.
type Address struct {
Country string `json:"country,omitempty"`
PostCode int `json:"postcode,omitempty"`
}
func (Address) SwaggerDoc() map[string]string {
return map[string]string{
"": "Address doc",
"country": "Country doc",
"postcode": "PostCode doc",
}
}
This example will generate a JSON like this
{
"Address": {
"id": "Address",
"description": "Address doc",
"properties": {
"country": {
"type": "string",
"description": "Country doc"
},
"postcode": {
"type": "integer",
"format": "int32",
"description": "PostCode doc"
}
}
}
}
**Very Important Notes:**
- `SwaggerDoc()` is using a **NON-Pointer** receiver (e.g. func (Address) and not func (*Address))
- The returned map should use as key the name of the field as defined in the JSON parameter (e.g. `"postcode"` and not `"PostCode"`)
Notes
--
- The Nickname of an Operation is automatically set by finding the name of the function. You can override it using RouteBuilder.Operation(..)
- The WebServices field of swagger.Config can be used to control which service you want to expose and document ; you can have multiple configs and therefore multiple endpoints.
© 2017, ernestmicklei.com. MIT License. Contributions welcome.

View file

@ -1,64 +0,0 @@
package swagger
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bytes"
"encoding/json"
)
// ApiDeclarationList maintains an ordered list of ApiDeclaration.
type ApiDeclarationList struct {
List []ApiDeclaration
}
// At returns the ApiDeclaration by its path unless absent, then ok is false
func (l *ApiDeclarationList) At(path string) (a ApiDeclaration, ok bool) {
for _, each := range l.List {
if each.ResourcePath == path {
return each, true
}
}
return a, false
}
// Put adds or replaces a ApiDeclaration with this name
func (l *ApiDeclarationList) Put(path string, a ApiDeclaration) {
// maybe replace existing
for i, each := range l.List {
if each.ResourcePath == path {
// replace
l.List[i] = a
return
}
}
// add
l.List = append(l.List, a)
}
// Do enumerates all the properties, each with its assigned name
func (l *ApiDeclarationList) Do(block func(path string, decl ApiDeclaration)) {
for _, each := range l.List {
block(each.ResourcePath, each)
}
}
// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty
func (l ApiDeclarationList) MarshalJSON() ([]byte, error) {
var buf bytes.Buffer
encoder := json.NewEncoder(&buf)
buf.WriteString("{\n")
for i, each := range l.List {
buf.WriteString("\"")
buf.WriteString(each.ResourcePath)
buf.WriteString("\": ")
encoder.Encode(each)
if i < len(l.List)-1 {
buf.WriteString(",\n")
}
}
buf.WriteString("}")
return buf.Bytes(), nil
}

View file

@ -1,46 +0,0 @@
package swagger
import (
"net/http"
"reflect"
"github.com/emicklei/go-restful"
)
// PostBuildDeclarationMapFunc can be used to modify the api declaration map.
type PostBuildDeclarationMapFunc func(apiDeclarationMap *ApiDeclarationList)
// MapSchemaFormatFunc can be used to modify typeName at definition time.
type MapSchemaFormatFunc func(typeName string) string
// MapModelTypeNameFunc can be used to return the desired typeName for a given
// type. It will return false if the default name should be used.
type MapModelTypeNameFunc func(t reflect.Type) (string, bool)
type Config struct {
// url where the services are available, e.g. http://localhost:8080
// if left empty then the basePath of Swagger is taken from the actual request
WebServicesUrl string
// path where the JSON api is avaiable , e.g. /apidocs
ApiPath string
// [optional] path where the swagger UI will be served, e.g. /swagger
SwaggerPath string
// [optional] location of folder containing Swagger HTML5 application index.html
SwaggerFilePath string
// api listing is constructed from this list of restful WebServices.
WebServices []*restful.WebService
// will serve all static content (scripts,pages,images)
StaticHandler http.Handler
// [optional] on default CORS (Cross-Origin-Resource-Sharing) is enabled.
DisableCORS bool
// Top-level API version. Is reflected in the resource listing.
ApiVersion string
// If set then call this handler after building the complete ApiDeclaration Map
PostBuildHandler PostBuildDeclarationMapFunc
// Swagger global info struct
Info Info
// [optional] If set, model builder should call this handler to get addition typename-to-swagger-format-field conversion.
SchemaFormatHandler MapSchemaFormatFunc
// [optional] If set, model builder should call this handler to retrieve the name for a given type.
ModelTypeNameHandler MapModelTypeNameFunc
}

View file

@ -1,467 +0,0 @@
package swagger
import (
"encoding/json"
"reflect"
"strings"
)
// ModelBuildable is used for extending Structs that need more control over
// how the Model appears in the Swagger api declaration.
type ModelBuildable interface {
PostBuildModel(m *Model) *Model
}
type modelBuilder struct {
Models *ModelList
Config *Config
}
type documentable interface {
SwaggerDoc() map[string]string
}
// Check if this structure has a method with signature func (<theModel>) SwaggerDoc() map[string]string
// If it exists, retrive the documentation and overwrite all struct tag descriptions
func getDocFromMethodSwaggerDoc2(model reflect.Type) map[string]string {
if docable, ok := reflect.New(model).Elem().Interface().(documentable); ok {
return docable.SwaggerDoc()
}
return make(map[string]string)
}
// addModelFrom creates and adds a Model to the builder and detects and calls
// the post build hook for customizations
func (b modelBuilder) addModelFrom(sample interface{}) {
if modelOrNil := b.addModel(reflect.TypeOf(sample), ""); modelOrNil != nil {
// allow customizations
if buildable, ok := sample.(ModelBuildable); ok {
modelOrNil = buildable.PostBuildModel(modelOrNil)
b.Models.Put(modelOrNil.Id, *modelOrNil)
}
}
}
func (b modelBuilder) addModel(st reflect.Type, nameOverride string) *Model {
// Turn pointers into simpler types so further checks are
// correct.
if st.Kind() == reflect.Ptr {
st = st.Elem()
}
modelName := b.keyFrom(st)
if nameOverride != "" {
modelName = nameOverride
}
// no models needed for primitive types
if b.isPrimitiveType(modelName) {
return nil
}
// golang encoding/json packages says array and slice values encode as
// JSON arrays, except that []byte encodes as a base64-encoded string.
// If we see a []byte here, treat it at as a primitive type (string)
// and deal with it in buildArrayTypeProperty.
if (st.Kind() == reflect.Slice || st.Kind() == reflect.Array) &&
st.Elem().Kind() == reflect.Uint8 {
return nil
}
// see if we already have visited this model
if _, ok := b.Models.At(modelName); ok {
return nil
}
sm := Model{
Id: modelName,
Required: []string{},
Properties: ModelPropertyList{}}
// reference the model before further initializing (enables recursive structs)
b.Models.Put(modelName, sm)
// check for slice or array
if st.Kind() == reflect.Slice || st.Kind() == reflect.Array {
b.addModel(st.Elem(), "")
return &sm
}
// check for structure or primitive type
if st.Kind() != reflect.Struct {
return &sm
}
fullDoc := getDocFromMethodSwaggerDoc2(st)
modelDescriptions := []string{}
for i := 0; i < st.NumField(); i++ {
field := st.Field(i)
jsonName, modelDescription, prop := b.buildProperty(field, &sm, modelName)
if len(modelDescription) > 0 {
modelDescriptions = append(modelDescriptions, modelDescription)
}
// add if not omitted
if len(jsonName) != 0 {
// update description
if fieldDoc, ok := fullDoc[jsonName]; ok {
prop.Description = fieldDoc
}
// update Required
if b.isPropertyRequired(field) {
sm.Required = append(sm.Required, jsonName)
}
sm.Properties.Put(jsonName, prop)
}
}
// We always overwrite documentation if SwaggerDoc method exists
// "" is special for documenting the struct itself
if modelDoc, ok := fullDoc[""]; ok {
sm.Description = modelDoc
} else if len(modelDescriptions) != 0 {
sm.Description = strings.Join(modelDescriptions, "\n")
}
// update model builder with completed model
b.Models.Put(modelName, sm)
return &sm
}
func (b modelBuilder) isPropertyRequired(field reflect.StructField) bool {
required := true
if jsonTag := field.Tag.Get("json"); jsonTag != "" {
s := strings.Split(jsonTag, ",")
if len(s) > 1 && s[1] == "omitempty" {
return false
}
}
return required
}
func (b modelBuilder) buildProperty(field reflect.StructField, model *Model, modelName string) (jsonName, modelDescription string, prop ModelProperty) {
jsonName = b.jsonNameOfField(field)
if len(jsonName) == 0 {
// empty name signals skip property
return "", "", prop
}
if field.Name == "XMLName" && field.Type.String() == "xml.Name" {
// property is metadata for the xml.Name attribute, can be skipped
return "", "", prop
}
if tag := field.Tag.Get("modelDescription"); tag != "" {
modelDescription = tag
}
prop.setPropertyMetadata(field)
if prop.Type != nil {
return jsonName, modelDescription, prop
}
fieldType := field.Type
// check if type is doing its own marshalling
marshalerType := reflect.TypeOf((*json.Marshaler)(nil)).Elem()
if fieldType.Implements(marshalerType) {
var pType = "string"
if prop.Type == nil {
prop.Type = &pType
}
if prop.Format == "" {
prop.Format = b.jsonSchemaFormat(b.keyFrom(fieldType))
}
return jsonName, modelDescription, prop
}
// check if annotation says it is a string
if jsonTag := field.Tag.Get("json"); jsonTag != "" {
s := strings.Split(jsonTag, ",")
if len(s) > 1 && s[1] == "string" {
stringt := "string"
prop.Type = &stringt
return jsonName, modelDescription, prop
}
}
fieldKind := fieldType.Kind()
switch {
case fieldKind == reflect.Struct:
jsonName, prop := b.buildStructTypeProperty(field, jsonName, model)
return jsonName, modelDescription, prop
case fieldKind == reflect.Slice || fieldKind == reflect.Array:
jsonName, prop := b.buildArrayTypeProperty(field, jsonName, modelName)
return jsonName, modelDescription, prop
case fieldKind == reflect.Ptr:
jsonName, prop := b.buildPointerTypeProperty(field, jsonName, modelName)
return jsonName, modelDescription, prop
case fieldKind == reflect.String:
stringt := "string"
prop.Type = &stringt
return jsonName, modelDescription, prop
case fieldKind == reflect.Map:
// if it's a map, it's unstructured, and swagger 1.2 can't handle it
objectType := "object"
prop.Type = &objectType
return jsonName, modelDescription, prop
}
fieldTypeName := b.keyFrom(fieldType)
if b.isPrimitiveType(fieldTypeName) {
mapped := b.jsonSchemaType(fieldTypeName)
prop.Type = &mapped
prop.Format = b.jsonSchemaFormat(fieldTypeName)
return jsonName, modelDescription, prop
}
modelType := b.keyFrom(fieldType)
prop.Ref = &modelType
if fieldType.Name() == "" { // override type of anonymous structs
nestedTypeName := modelName + "." + jsonName
prop.Ref = &nestedTypeName
b.addModel(fieldType, nestedTypeName)
}
return jsonName, modelDescription, prop
}
func hasNamedJSONTag(field reflect.StructField) bool {
parts := strings.Split(field.Tag.Get("json"), ",")
if len(parts) == 0 {
return false
}
for _, s := range parts[1:] {
if s == "inline" {
return false
}
}
return len(parts[0]) > 0
}
func (b modelBuilder) buildStructTypeProperty(field reflect.StructField, jsonName string, model *Model) (nameJson string, prop ModelProperty) {
prop.setPropertyMetadata(field)
// Check for type override in tag
if prop.Type != nil {
return jsonName, prop
}
fieldType := field.Type
// check for anonymous
if len(fieldType.Name()) == 0 {
// anonymous
anonType := model.Id + "." + jsonName
b.addModel(fieldType, anonType)
prop.Ref = &anonType
return jsonName, prop
}
if field.Name == fieldType.Name() && field.Anonymous && !hasNamedJSONTag(field) {
// embedded struct
sub := modelBuilder{new(ModelList), b.Config}
sub.addModel(fieldType, "")
subKey := sub.keyFrom(fieldType)
// merge properties from sub
subModel, _ := sub.Models.At(subKey)
subModel.Properties.Do(func(k string, v ModelProperty) {
model.Properties.Put(k, v)
// if subModel says this property is required then include it
required := false
for _, each := range subModel.Required {
if k == each {
required = true
break
}
}
if required {
model.Required = append(model.Required, k)
}
})
// add all new referenced models
sub.Models.Do(func(key string, sub Model) {
if key != subKey {
if _, ok := b.Models.At(key); !ok {
b.Models.Put(key, sub)
}
}
})
// empty name signals skip property
return "", prop
}
// simple struct
b.addModel(fieldType, "")
var pType = b.keyFrom(fieldType)
prop.Ref = &pType
return jsonName, prop
}
func (b modelBuilder) buildArrayTypeProperty(field reflect.StructField, jsonName, modelName string) (nameJson string, prop ModelProperty) {
// check for type override in tags
prop.setPropertyMetadata(field)
if prop.Type != nil {
return jsonName, prop
}
fieldType := field.Type
if fieldType.Elem().Kind() == reflect.Uint8 {
stringt := "string"
prop.Type = &stringt
return jsonName, prop
}
var pType = "array"
prop.Type = &pType
isPrimitive := b.isPrimitiveType(fieldType.Elem().Name())
elemTypeName := b.getElementTypeName(modelName, jsonName, fieldType.Elem())
prop.Items = new(Item)
if isPrimitive {
mapped := b.jsonSchemaType(elemTypeName)
prop.Items.Type = &mapped
} else {
prop.Items.Ref = &elemTypeName
}
// add|overwrite model for element type
if fieldType.Elem().Kind() == reflect.Ptr {
fieldType = fieldType.Elem()
}
if !isPrimitive {
b.addModel(fieldType.Elem(), elemTypeName)
}
return jsonName, prop
}
func (b modelBuilder) buildPointerTypeProperty(field reflect.StructField, jsonName, modelName string) (nameJson string, prop ModelProperty) {
prop.setPropertyMetadata(field)
// Check for type override in tags
if prop.Type != nil {
return jsonName, prop
}
fieldType := field.Type
// override type of pointer to list-likes
if fieldType.Elem().Kind() == reflect.Slice || fieldType.Elem().Kind() == reflect.Array {
var pType = "array"
prop.Type = &pType
isPrimitive := b.isPrimitiveType(fieldType.Elem().Elem().Name())
elemName := b.getElementTypeName(modelName, jsonName, fieldType.Elem().Elem())
if isPrimitive {
primName := b.jsonSchemaType(elemName)
prop.Items = &Item{Ref: &primName}
} else {
prop.Items = &Item{Ref: &elemName}
}
if !isPrimitive {
// add|overwrite model for element type
b.addModel(fieldType.Elem().Elem(), elemName)
}
} else {
// non-array, pointer type
fieldTypeName := b.keyFrom(fieldType.Elem())
var pType = b.jsonSchemaType(fieldTypeName) // no star, include pkg path
if b.isPrimitiveType(fieldTypeName) {
prop.Type = &pType
prop.Format = b.jsonSchemaFormat(fieldTypeName)
return jsonName, prop
}
prop.Ref = &pType
elemName := ""
if fieldType.Elem().Name() == "" {
elemName = modelName + "." + jsonName
prop.Ref = &elemName
}
b.addModel(fieldType.Elem(), elemName)
}
return jsonName, prop
}
func (b modelBuilder) getElementTypeName(modelName, jsonName string, t reflect.Type) string {
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
if t.Name() == "" {
return modelName + "." + jsonName
}
return b.keyFrom(t)
}
func (b modelBuilder) keyFrom(st reflect.Type) string {
key := st.String()
if b.Config != nil && b.Config.ModelTypeNameHandler != nil {
if name, ok := b.Config.ModelTypeNameHandler(st); ok {
key = name
}
}
if len(st.Name()) == 0 { // unnamed type
// Swagger UI has special meaning for [
key = strings.Replace(key, "[]", "||", -1)
}
return key
}
// see also https://golang.org/ref/spec#Numeric_types
func (b modelBuilder) isPrimitiveType(modelName string) bool {
if len(modelName) == 0 {
return false
}
return strings.Contains("uint uint8 uint16 uint32 uint64 int int8 int16 int32 int64 float32 float64 bool string byte rune time.Time", modelName)
}
// jsonNameOfField returns the name of the field as it should appear in JSON format
// An empty string indicates that this field is not part of the JSON representation
func (b modelBuilder) jsonNameOfField(field reflect.StructField) string {
if jsonTag := field.Tag.Get("json"); jsonTag != "" {
s := strings.Split(jsonTag, ",")
if s[0] == "-" {
// empty name signals skip property
return ""
} else if s[0] != "" {
return s[0]
}
}
return field.Name
}
// see also http://json-schema.org/latest/json-schema-core.html#anchor8
func (b modelBuilder) jsonSchemaType(modelName string) string {
schemaMap := map[string]string{
"uint": "integer",
"uint8": "integer",
"uint16": "integer",
"uint32": "integer",
"uint64": "integer",
"int": "integer",
"int8": "integer",
"int16": "integer",
"int32": "integer",
"int64": "integer",
"byte": "integer",
"float64": "number",
"float32": "number",
"bool": "boolean",
"time.Time": "string",
}
mapped, ok := schemaMap[modelName]
if !ok {
return modelName // use as is (custom or struct)
}
return mapped
}
func (b modelBuilder) jsonSchemaFormat(modelName string) string {
if b.Config != nil && b.Config.SchemaFormatHandler != nil {
if mapped := b.Config.SchemaFormatHandler(modelName); mapped != "" {
return mapped
}
}
schemaMap := map[string]string{
"int": "int32",
"int32": "int32",
"int64": "int64",
"byte": "byte",
"uint": "integer",
"uint8": "byte",
"float64": "double",
"float32": "float",
"time.Time": "date-time",
"*time.Time": "date-time",
}
mapped, ok := schemaMap[modelName]
if !ok {
return "" // no format
}
return mapped
}

View file

@ -1,86 +0,0 @@
package swagger
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bytes"
"encoding/json"
)
// NamedModel associates a name with a Model (not using its Id)
type NamedModel struct {
Name string
Model Model
}
// ModelList encapsulates a list of NamedModel (association)
type ModelList struct {
List []NamedModel
}
// Put adds or replaces a Model by its name
func (l *ModelList) Put(name string, model Model) {
for i, each := range l.List {
if each.Name == name {
// replace
l.List[i] = NamedModel{name, model}
return
}
}
// add
l.List = append(l.List, NamedModel{name, model})
}
// At returns a Model by its name, ok is false if absent
func (l *ModelList) At(name string) (m Model, ok bool) {
for _, each := range l.List {
if each.Name == name {
return each.Model, true
}
}
return m, false
}
// Do enumerates all the models, each with its assigned name
func (l *ModelList) Do(block func(name string, value Model)) {
for _, each := range l.List {
block(each.Name, each.Model)
}
}
// MarshalJSON writes the ModelList as if it was a map[string]Model
func (l ModelList) MarshalJSON() ([]byte, error) {
var buf bytes.Buffer
encoder := json.NewEncoder(&buf)
buf.WriteString("{\n")
for i, each := range l.List {
buf.WriteString("\"")
buf.WriteString(each.Name)
buf.WriteString("\": ")
encoder.Encode(each.Model)
if i < len(l.List)-1 {
buf.WriteString(",\n")
}
}
buf.WriteString("}")
return buf.Bytes(), nil
}
// UnmarshalJSON reads back a ModelList. This is an expensive operation.
func (l *ModelList) UnmarshalJSON(data []byte) error {
raw := map[string]interface{}{}
json.NewDecoder(bytes.NewReader(data)).Decode(&raw)
for k, v := range raw {
// produces JSON bytes for each value
data, err := json.Marshal(v)
if err != nil {
return err
}
var m Model
json.NewDecoder(bytes.NewReader(data)).Decode(&m)
l.Put(k, m)
}
return nil
}

View file

@ -1,81 +0,0 @@
package swagger
import (
"reflect"
"strings"
)
func (prop *ModelProperty) setDescription(field reflect.StructField) {
if tag := field.Tag.Get("description"); tag != "" {
prop.Description = tag
}
}
func (prop *ModelProperty) setDefaultValue(field reflect.StructField) {
if tag := field.Tag.Get("default"); tag != "" {
prop.DefaultValue = Special(tag)
}
}
func (prop *ModelProperty) setEnumValues(field reflect.StructField) {
// We use | to separate the enum values. This value is chosen
// since its unlikely to be useful in actual enumeration values.
if tag := field.Tag.Get("enum"); tag != "" {
prop.Enum = strings.Split(tag, "|")
}
}
func (prop *ModelProperty) setMaximum(field reflect.StructField) {
if tag := field.Tag.Get("maximum"); tag != "" {
prop.Maximum = tag
}
}
func (prop *ModelProperty) setType(field reflect.StructField) {
if tag := field.Tag.Get("type"); tag != "" {
// Check if the first two characters of the type tag are
// intended to emulate slice/array behaviour.
//
// If type is intended to be a slice/array then add the
// overriden type to the array item instead of the main property
if len(tag) > 2 && tag[0:2] == "[]" {
pType := "array"
prop.Type = &pType
prop.Items = new(Item)
iType := tag[2:]
prop.Items.Type = &iType
return
}
prop.Type = &tag
}
}
func (prop *ModelProperty) setMinimum(field reflect.StructField) {
if tag := field.Tag.Get("minimum"); tag != "" {
prop.Minimum = tag
}
}
func (prop *ModelProperty) setUniqueItems(field reflect.StructField) {
tag := field.Tag.Get("unique")
switch tag {
case "true":
v := true
prop.UniqueItems = &v
case "false":
v := false
prop.UniqueItems = &v
}
}
func (prop *ModelProperty) setPropertyMetadata(field reflect.StructField) {
prop.setDescription(field)
prop.setEnumValues(field)
prop.setMinimum(field)
prop.setMaximum(field)
prop.setUniqueItems(field)
prop.setDefaultValue(field)
prop.setType(field)
}

View file

@ -1,87 +0,0 @@
package swagger
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bytes"
"encoding/json"
)
// NamedModelProperty associates a name to a ModelProperty
type NamedModelProperty struct {
Name string
Property ModelProperty
}
// ModelPropertyList encapsulates a list of NamedModelProperty (association)
type ModelPropertyList struct {
List []NamedModelProperty
}
// At returns the ModelPropety by its name unless absent, then ok is false
func (l *ModelPropertyList) At(name string) (p ModelProperty, ok bool) {
for _, each := range l.List {
if each.Name == name {
return each.Property, true
}
}
return p, false
}
// Put adds or replaces a ModelProperty with this name
func (l *ModelPropertyList) Put(name string, prop ModelProperty) {
// maybe replace existing
for i, each := range l.List {
if each.Name == name {
// replace
l.List[i] = NamedModelProperty{Name: name, Property: prop}
return
}
}
// add
l.List = append(l.List, NamedModelProperty{Name: name, Property: prop})
}
// Do enumerates all the properties, each with its assigned name
func (l *ModelPropertyList) Do(block func(name string, value ModelProperty)) {
for _, each := range l.List {
block(each.Name, each.Property)
}
}
// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty
func (l ModelPropertyList) MarshalJSON() ([]byte, error) {
var buf bytes.Buffer
encoder := json.NewEncoder(&buf)
buf.WriteString("{\n")
for i, each := range l.List {
buf.WriteString("\"")
buf.WriteString(each.Name)
buf.WriteString("\": ")
encoder.Encode(each.Property)
if i < len(l.List)-1 {
buf.WriteString(",\n")
}
}
buf.WriteString("}")
return buf.Bytes(), nil
}
// UnmarshalJSON reads back a ModelPropertyList. This is an expensive operation.
func (l *ModelPropertyList) UnmarshalJSON(data []byte) error {
raw := map[string]interface{}{}
json.NewDecoder(bytes.NewReader(data)).Decode(&raw)
for k, v := range raw {
// produces JSON bytes for each value
data, err := json.Marshal(v)
if err != nil {
return err
}
var m ModelProperty
json.NewDecoder(bytes.NewReader(data)).Decode(&m)
l.Put(k, m)
}
return nil
}

View file

@ -1,36 +0,0 @@
package swagger
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import "github.com/emicklei/go-restful"
type orderedRouteMap struct {
elements map[string][]restful.Route
keys []string
}
func newOrderedRouteMap() *orderedRouteMap {
return &orderedRouteMap{
elements: map[string][]restful.Route{},
keys: []string{},
}
}
func (o *orderedRouteMap) Add(key string, route restful.Route) {
routes, ok := o.elements[key]
if ok {
routes = append(routes, route)
o.elements[key] = routes
return
}
o.elements[key] = []restful.Route{route}
o.keys = append(o.keys, key)
}
func (o *orderedRouteMap) Do(block func(key string, routes []restful.Route)) {
for _, k := range o.keys {
block(k, o.elements[k])
}
}

View file

@ -1,185 +0,0 @@
// Package swagger implements the structures of the Swagger
// https://github.com/wordnik/swagger-spec/blob/master/versions/1.2.md
package swagger
const swaggerVersion = "1.2"
// 4.3.3 Data Type Fields
type DataTypeFields struct {
Type *string `json:"type,omitempty"` // if Ref not used
Ref *string `json:"$ref,omitempty"` // if Type not used
Format string `json:"format,omitempty"`
DefaultValue Special `json:"defaultValue,omitempty"`
Enum []string `json:"enum,omitempty"`
Minimum string `json:"minimum,omitempty"`
Maximum string `json:"maximum,omitempty"`
Items *Item `json:"items,omitempty"`
UniqueItems *bool `json:"uniqueItems,omitempty"`
}
type Special string
// 4.3.4 Items Object
type Item struct {
Type *string `json:"type,omitempty"`
Ref *string `json:"$ref,omitempty"`
Format string `json:"format,omitempty"`
}
// 5.1 Resource Listing
type ResourceListing struct {
SwaggerVersion string `json:"swaggerVersion"` // e.g 1.2
Apis []Resource `json:"apis"`
ApiVersion string `json:"apiVersion"`
Info Info `json:"info"`
Authorizations []Authorization `json:"authorizations,omitempty"`
}
// 5.1.2 Resource Object
type Resource struct {
Path string `json:"path"` // relative or absolute, must start with /
Description string `json:"description"`
}
// 5.1.3 Info Object
type Info struct {
Title string `json:"title"`
Description string `json:"description"`
TermsOfServiceUrl string `json:"termsOfServiceUrl,omitempty"`
Contact string `json:"contact,omitempty"`
License string `json:"license,omitempty"`
LicenseUrl string `json:"licenseUrl,omitempty"`
}
// 5.1.5
type Authorization struct {
Type string `json:"type"`
PassAs string `json:"passAs"`
Keyname string `json:"keyname"`
Scopes []Scope `json:"scopes"`
GrantTypes []GrantType `json:"grandTypes"`
}
// 5.1.6, 5.2.11
type Scope struct {
// Required. The name of the scope.
Scope string `json:"scope"`
// Recommended. A short description of the scope.
Description string `json:"description"`
}
// 5.1.7
type GrantType struct {
Implicit Implicit `json:"implicit"`
AuthorizationCode AuthorizationCode `json:"authorization_code"`
}
// 5.1.8 Implicit Object
type Implicit struct {
// Required. The login endpoint definition.
loginEndpoint LoginEndpoint `json:"loginEndpoint"`
// An optional alternative name to standard "access_token" OAuth2 parameter.
TokenName string `json:"tokenName"`
}
// 5.1.9 Authorization Code Object
type AuthorizationCode struct {
TokenRequestEndpoint TokenRequestEndpoint `json:"tokenRequestEndpoint"`
TokenEndpoint TokenEndpoint `json:"tokenEndpoint"`
}
// 5.1.10 Login Endpoint Object
type LoginEndpoint struct {
// Required. The URL of the authorization endpoint for the implicit grant flow. The value SHOULD be in a URL format.
Url string `json:"url"`
}
// 5.1.11 Token Request Endpoint Object
type TokenRequestEndpoint struct {
// Required. The URL of the authorization endpoint for the authentication code grant flow. The value SHOULD be in a URL format.
Url string `json:"url"`
// An optional alternative name to standard "client_id" OAuth2 parameter.
ClientIdName string `json:"clientIdName"`
// An optional alternative name to the standard "client_secret" OAuth2 parameter.
ClientSecretName string `json:"clientSecretName"`
}
// 5.1.12 Token Endpoint Object
type TokenEndpoint struct {
// Required. The URL of the token endpoint for the authentication code grant flow. The value SHOULD be in a URL format.
Url string `json:"url"`
// An optional alternative name to standard "access_token" OAuth2 parameter.
TokenName string `json:"tokenName"`
}
// 5.2 API Declaration
type ApiDeclaration struct {
SwaggerVersion string `json:"swaggerVersion"`
ApiVersion string `json:"apiVersion"`
BasePath string `json:"basePath"`
ResourcePath string `json:"resourcePath"` // must start with /
Info Info `json:"info"`
Apis []Api `json:"apis,omitempty"`
Models ModelList `json:"models,omitempty"`
Produces []string `json:"produces,omitempty"`
Consumes []string `json:"consumes,omitempty"`
Authorizations []Authorization `json:"authorizations,omitempty"`
}
// 5.2.2 API Object
type Api struct {
Path string `json:"path"` // relative or absolute, must start with /
Description string `json:"description"`
Operations []Operation `json:"operations,omitempty"`
}
// 5.2.3 Operation Object
type Operation struct {
DataTypeFields
Method string `json:"method"`
Summary string `json:"summary,omitempty"`
Notes string `json:"notes,omitempty"`
Nickname string `json:"nickname"`
Authorizations []Authorization `json:"authorizations,omitempty"`
Parameters []Parameter `json:"parameters"`
ResponseMessages []ResponseMessage `json:"responseMessages,omitempty"` // optional
Produces []string `json:"produces,omitempty"`
Consumes []string `json:"consumes,omitempty"`
Deprecated string `json:"deprecated,omitempty"`
}
// 5.2.4 Parameter Object
type Parameter struct {
DataTypeFields
ParamType string `json:"paramType"` // path,query,body,header,form
Name string `json:"name"`
Description string `json:"description"`
Required bool `json:"required"`
AllowMultiple bool `json:"allowMultiple"`
}
// 5.2.5 Response Message Object
type ResponseMessage struct {
Code int `json:"code"`
Message string `json:"message"`
ResponseModel string `json:"responseModel,omitempty"`
}
// 5.2.6, 5.2.7 Models Object
type Model struct {
Id string `json:"id"`
Description string `json:"description,omitempty"`
Required []string `json:"required,omitempty"`
Properties ModelPropertyList `json:"properties"`
SubTypes []string `json:"subTypes,omitempty"`
Discriminator string `json:"discriminator,omitempty"`
}
// 5.2.8 Properties Object
type ModelProperty struct {
DataTypeFields
Description string `json:"description,omitempty"`
}
// 5.2.10
type Authorizations map[string]Authorization

View file

@ -1,21 +0,0 @@
package swagger
type SwaggerBuilder struct {
SwaggerService
}
func NewSwaggerBuilder(config Config) *SwaggerBuilder {
return &SwaggerBuilder{*newSwaggerService(config)}
}
func (sb SwaggerBuilder) ProduceListing() ResourceListing {
return sb.SwaggerService.produceListing()
}
func (sb SwaggerBuilder) ProduceAllDeclarations() map[string]ApiDeclaration {
return sb.SwaggerService.produceAllDeclarations()
}
func (sb SwaggerBuilder) ProduceDeclarations(route string) (*ApiDeclaration, bool) {
return sb.SwaggerService.produceDeclarations(route)
}

View file

@ -1,443 +0,0 @@
package swagger
import (
"fmt"
"github.com/emicklei/go-restful"
// "github.com/emicklei/hopwatch"
"net/http"
"reflect"
"sort"
"strings"
"github.com/emicklei/go-restful/log"
)
type SwaggerService struct {
config Config
apiDeclarationMap *ApiDeclarationList
}
func newSwaggerService(config Config) *SwaggerService {
sws := &SwaggerService{
config: config,
apiDeclarationMap: new(ApiDeclarationList)}
// Build all ApiDeclarations
for _, each := range config.WebServices {
rootPath := each.RootPath()
// skip the api service itself
if rootPath != config.ApiPath {
if rootPath == "" || rootPath == "/" {
// use routes
for _, route := range each.Routes() {
entry := staticPathFromRoute(route)
_, exists := sws.apiDeclarationMap.At(entry)
if !exists {
sws.apiDeclarationMap.Put(entry, sws.composeDeclaration(each, entry))
}
}
} else { // use root path
sws.apiDeclarationMap.Put(each.RootPath(), sws.composeDeclaration(each, each.RootPath()))
}
}
}
// if specified then call the PostBuilderHandler
if config.PostBuildHandler != nil {
config.PostBuildHandler(sws.apiDeclarationMap)
}
return sws
}
// LogInfo is the function that is called when this package needs to log. It defaults to log.Printf
var LogInfo = func(format string, v ...interface{}) {
// use the restful package-wide logger
log.Printf(format, v...)
}
// InstallSwaggerService add the WebService that provides the API documentation of all services
// conform the Swagger documentation specifcation. (https://github.com/wordnik/swagger-core/wiki).
func InstallSwaggerService(aSwaggerConfig Config) {
RegisterSwaggerService(aSwaggerConfig, restful.DefaultContainer)
}
// RegisterSwaggerService add the WebService that provides the API documentation of all services
// conform the Swagger documentation specifcation. (https://github.com/wordnik/swagger-core/wiki).
func RegisterSwaggerService(config Config, wsContainer *restful.Container) {
sws := newSwaggerService(config)
ws := new(restful.WebService)
ws.Path(config.ApiPath)
ws.Produces(restful.MIME_JSON)
if config.DisableCORS {
ws.Filter(enableCORS)
}
ws.Route(ws.GET("/").To(sws.getListing))
ws.Route(ws.GET("/{a}").To(sws.getDeclarations))
ws.Route(ws.GET("/{a}/{b}").To(sws.getDeclarations))
ws.Route(ws.GET("/{a}/{b}/{c}").To(sws.getDeclarations))
ws.Route(ws.GET("/{a}/{b}/{c}/{d}").To(sws.getDeclarations))
ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}").To(sws.getDeclarations))
ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}/{f}").To(sws.getDeclarations))
ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}/{f}/{g}").To(sws.getDeclarations))
LogInfo("[restful/swagger] listing is available at %v%v", config.WebServicesUrl, config.ApiPath)
wsContainer.Add(ws)
// Check paths for UI serving
if config.StaticHandler == nil && config.SwaggerFilePath != "" && config.SwaggerPath != "" {
swaggerPathSlash := config.SwaggerPath
// path must end with slash /
if "/" != config.SwaggerPath[len(config.SwaggerPath)-1:] {
LogInfo("[restful/swagger] use corrected SwaggerPath ; must end with slash (/)")
swaggerPathSlash += "/"
}
LogInfo("[restful/swagger] %v%v is mapped to folder %v", config.WebServicesUrl, swaggerPathSlash, config.SwaggerFilePath)
wsContainer.Handle(swaggerPathSlash, http.StripPrefix(swaggerPathSlash, http.FileServer(http.Dir(config.SwaggerFilePath))))
//if we define a custom static handler use it
} else if config.StaticHandler != nil && config.SwaggerPath != "" {
swaggerPathSlash := config.SwaggerPath
// path must end with slash /
if "/" != config.SwaggerPath[len(config.SwaggerPath)-1:] {
LogInfo("[restful/swagger] use corrected SwaggerFilePath ; must end with slash (/)")
swaggerPathSlash += "/"
}
LogInfo("[restful/swagger] %v%v is mapped to custom Handler %T", config.WebServicesUrl, swaggerPathSlash, config.StaticHandler)
wsContainer.Handle(swaggerPathSlash, config.StaticHandler)
} else {
LogInfo("[restful/swagger] Swagger(File)Path is empty ; no UI is served")
}
}
func staticPathFromRoute(r restful.Route) string {
static := r.Path
bracket := strings.Index(static, "{")
if bracket <= 1 { // result cannot be empty
return static
}
if bracket != -1 {
static = r.Path[:bracket]
}
if strings.HasSuffix(static, "/") {
return static[:len(static)-1]
} else {
return static
}
}
func enableCORS(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) {
if origin := req.HeaderParameter(restful.HEADER_Origin); origin != "" {
// prevent duplicate header
if len(resp.Header().Get(restful.HEADER_AccessControlAllowOrigin)) == 0 {
resp.AddHeader(restful.HEADER_AccessControlAllowOrigin, origin)
}
}
chain.ProcessFilter(req, resp)
}
func (sws SwaggerService) getListing(req *restful.Request, resp *restful.Response) {
listing := sws.produceListing()
resp.WriteAsJson(listing)
}
func (sws SwaggerService) produceListing() ResourceListing {
listing := ResourceListing{SwaggerVersion: swaggerVersion, ApiVersion: sws.config.ApiVersion, Info: sws.config.Info}
sws.apiDeclarationMap.Do(func(k string, v ApiDeclaration) {
ref := Resource{Path: k}
if len(v.Apis) > 0 { // use description of first (could still be empty)
ref.Description = v.Apis[0].Description
}
listing.Apis = append(listing.Apis, ref)
})
return listing
}
func (sws SwaggerService) getDeclarations(req *restful.Request, resp *restful.Response) {
decl, ok := sws.produceDeclarations(composeRootPath(req))
if !ok {
resp.WriteErrorString(http.StatusNotFound, "ApiDeclaration not found")
return
}
// unless WebServicesUrl is given
if len(sws.config.WebServicesUrl) == 0 {
// update base path from the actual request
// TODO how to detect https? assume http for now
var host string
// X-Forwarded-Host or Host or Request.Host
hostvalues, ok := req.Request.Header["X-Forwarded-Host"] // apache specific?
if !ok || len(hostvalues) == 0 {
forwarded, ok := req.Request.Header["Host"] // without reverse-proxy
if !ok || len(forwarded) == 0 {
// fallback to Host field
host = req.Request.Host
} else {
host = forwarded[0]
}
} else {
host = hostvalues[0]
}
// inspect Referer for the scheme (http vs https)
scheme := "http"
if referer := req.Request.Header["Referer"]; len(referer) > 0 {
if strings.HasPrefix(referer[0], "https") {
scheme = "https"
}
}
decl.BasePath = fmt.Sprintf("%s://%s", scheme, host)
}
resp.WriteAsJson(decl)
}
func (sws SwaggerService) produceAllDeclarations() map[string]ApiDeclaration {
decls := map[string]ApiDeclaration{}
sws.apiDeclarationMap.Do(func(k string, v ApiDeclaration) {
decls[k] = v
})
return decls
}
func (sws SwaggerService) produceDeclarations(route string) (*ApiDeclaration, bool) {
decl, ok := sws.apiDeclarationMap.At(route)
if !ok {
return nil, false
}
decl.BasePath = sws.config.WebServicesUrl
return &decl, true
}
// composeDeclaration uses all routes and parameters to create a ApiDeclaration
func (sws SwaggerService) composeDeclaration(ws *restful.WebService, pathPrefix string) ApiDeclaration {
decl := ApiDeclaration{
SwaggerVersion: swaggerVersion,
BasePath: sws.config.WebServicesUrl,
ResourcePath: pathPrefix,
Models: ModelList{},
ApiVersion: ws.Version()}
// collect any path parameters
rootParams := []Parameter{}
for _, param := range ws.PathParameters() {
rootParams = append(rootParams, asSwaggerParameter(param.Data()))
}
// aggregate by path
pathToRoutes := newOrderedRouteMap()
for _, other := range ws.Routes() {
if strings.HasPrefix(other.Path, pathPrefix) {
if len(pathPrefix) > 1 && len(other.Path) > len(pathPrefix) && other.Path[len(pathPrefix)] != '/' {
continue
}
pathToRoutes.Add(other.Path, other)
}
}
pathToRoutes.Do(func(path string, routes []restful.Route) {
api := Api{Path: strings.TrimSuffix(withoutWildcard(path), "/"), Description: ws.Documentation()}
voidString := "void"
for _, route := range routes {
operation := Operation{
Method: route.Method,
Summary: route.Doc,
Notes: route.Notes,
// Type gets overwritten if there is a write sample
DataTypeFields: DataTypeFields{Type: &voidString},
Parameters: []Parameter{},
Nickname: route.Operation,
ResponseMessages: composeResponseMessages(route, &decl, &sws.config)}
operation.Consumes = route.Consumes
operation.Produces = route.Produces
// share root params if any
for _, swparam := range rootParams {
operation.Parameters = append(operation.Parameters, swparam)
}
// route specific params
for _, param := range route.ParameterDocs {
operation.Parameters = append(operation.Parameters, asSwaggerParameter(param.Data()))
}
sws.addModelsFromRouteTo(&operation, route, &decl)
api.Operations = append(api.Operations, operation)
}
decl.Apis = append(decl.Apis, api)
})
return decl
}
func withoutWildcard(path string) string {
if strings.HasSuffix(path, ":*}") {
return path[0:len(path)-3] + "}"
}
return path
}
// composeResponseMessages takes the ResponseErrors (if any) and creates ResponseMessages from them.
func composeResponseMessages(route restful.Route, decl *ApiDeclaration, config *Config) (messages []ResponseMessage) {
if route.ResponseErrors == nil {
return messages
}
// sort by code
codes := sort.IntSlice{}
for code := range route.ResponseErrors {
codes = append(codes, code)
}
codes.Sort()
for _, code := range codes {
each := route.ResponseErrors[code]
message := ResponseMessage{
Code: code,
Message: each.Message,
}
if each.Model != nil {
st := reflect.TypeOf(each.Model)
isCollection, st := detectCollectionType(st)
// collection cannot be in responsemodel
if !isCollection {
modelName := modelBuilder{}.keyFrom(st)
modelBuilder{Models: &decl.Models, Config: config}.addModel(st, "")
message.ResponseModel = modelName
}
}
messages = append(messages, message)
}
return
}
// addModelsFromRoute takes any read or write sample from the Route and creates a Swagger model from it.
func (sws SwaggerService) addModelsFromRouteTo(operation *Operation, route restful.Route, decl *ApiDeclaration) {
if route.ReadSample != nil {
sws.addModelFromSampleTo(operation, false, route.ReadSample, &decl.Models)
}
if route.WriteSample != nil {
sws.addModelFromSampleTo(operation, true, route.WriteSample, &decl.Models)
}
}
func detectCollectionType(st reflect.Type) (bool, reflect.Type) {
isCollection := false
if st.Kind() == reflect.Slice || st.Kind() == reflect.Array {
st = st.Elem()
isCollection = true
} else {
if st.Kind() == reflect.Ptr {
if st.Elem().Kind() == reflect.Slice || st.Elem().Kind() == reflect.Array {
st = st.Elem().Elem()
isCollection = true
}
}
}
return isCollection, st
}
// addModelFromSample creates and adds (or overwrites) a Model from a sample resource
func (sws SwaggerService) addModelFromSampleTo(operation *Operation, isResponse bool, sample interface{}, models *ModelList) {
mb := modelBuilder{Models: models, Config: &sws.config}
if isResponse {
sampleType, items := asDataType(sample, &sws.config)
operation.Type = sampleType
operation.Items = items
}
mb.addModelFrom(sample)
}
func asSwaggerParameter(param restful.ParameterData) Parameter {
return Parameter{
DataTypeFields: DataTypeFields{
Type: &param.DataType,
Format: asFormat(param.DataType, param.DataFormat),
DefaultValue: Special(param.DefaultValue),
},
Name: param.Name,
Description: param.Description,
ParamType: asParamType(param.Kind),
Required: param.Required}
}
// Between 1..7 path parameters is supported
func composeRootPath(req *restful.Request) string {
path := "/" + req.PathParameter("a")
b := req.PathParameter("b")
if b == "" {
return path
}
path = path + "/" + b
c := req.PathParameter("c")
if c == "" {
return path
}
path = path + "/" + c
d := req.PathParameter("d")
if d == "" {
return path
}
path = path + "/" + d
e := req.PathParameter("e")
if e == "" {
return path
}
path = path + "/" + e
f := req.PathParameter("f")
if f == "" {
return path
}
path = path + "/" + f
g := req.PathParameter("g")
if g == "" {
return path
}
return path + "/" + g
}
func asFormat(dataType string, dataFormat string) string {
if dataFormat != "" {
return dataFormat
}
return "" // TODO
}
func asParamType(kind int) string {
switch {
case kind == restful.PathParameterKind:
return "path"
case kind == restful.QueryParameterKind:
return "query"
case kind == restful.BodyParameterKind:
return "body"
case kind == restful.HeaderParameterKind:
return "header"
case kind == restful.FormParameterKind:
return "form"
}
return ""
}
func asDataType(any interface{}, config *Config) (*string, *Item) {
// If it's not a collection, return the suggested model name
st := reflect.TypeOf(any)
isCollection, st := detectCollectionType(st)
modelName := modelBuilder{}.keyFrom(st)
// if it's not a collection we are done
if !isCollection {
return &modelName, nil
}
// XXX: This is not very elegant
// We create an Item object referring to the given model
models := ModelList{}
mb := modelBuilder{Models: &models, Config: config}
mb.addModelFrom(any)
elemTypeName := mb.getElementTypeName(modelName, "", st)
item := new(Item)
if mb.isPrimitiveType(elemTypeName) {
mapped := mb.jsonSchemaType(elemTypeName)
item.Type = &mapped
} else {
item.Ref = &elemTypeName
}
tmp := "array"
return &tmp, item
}

View file

@ -1,20 +0,0 @@
The MIT License (MIT)
Copyright (c) 2015 Fatih Arslan
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View file

@ -1,58 +0,0 @@
# CamelCase [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/fatih/camelcase) [![Build Status](http://img.shields.io/travis/fatih/camelcase.svg?style=flat-square)](https://travis-ci.org/fatih/camelcase)
CamelCase is a Golang (Go) package to split the words of a camelcase type
string into a slice of words. It can be used to convert a camelcase word (lower
or upper case) into any type of word.
## Splitting rules:
1. If string is not valid UTF-8, return it without splitting as
single item array.
2. Assign all unicode characters into one of 4 sets: lower case
letters, upper case letters, numbers, and all other characters.
3. Iterate through characters of string, introducing splits
between adjacent characters that belong to different sets.
4. Iterate through array of split strings, and if a given string
is upper case:
* if subsequent string is lower case:
* move last character of upper case string to beginning of
lower case string
## Install
```bash
go get github.com/fatih/camelcase
```
## Usage and examples
```go
splitted := camelcase.Split("GolangPackage")
fmt.Println(splitted[0], splitted[1]) // prints: "Golang", "Package"
```
Both lower camel case and upper camel case are supported. For more info please
check: [http://en.wikipedia.org/wiki/CamelCase](http://en.wikipedia.org/wiki/CamelCase)
Below are some example cases:
```
"" => []
"lowercase" => ["lowercase"]
"Class" => ["Class"]
"MyClass" => ["My", "Class"]
"MyC" => ["My", "C"]
"HTML" => ["HTML"]
"PDFLoader" => ["PDF", "Loader"]
"AString" => ["A", "String"]
"SimpleXMLParser" => ["Simple", "XML", "Parser"]
"vimRPCPlugin" => ["vim", "RPC", "Plugin"]
"GL11Version" => ["GL", "11", "Version"]
"99Bottles" => ["99", "Bottles"]
"May5" => ["May", "5"]
"BFG9000" => ["BFG", "9000"]
"BöseÜberraschung" => ["Böse", "Überraschung"]
"Two spaces" => ["Two", " ", "spaces"]
"BadUTF8\xe2\xe2\xa1" => ["BadUTF8\xe2\xe2\xa1"]
```

View file

@ -1,90 +0,0 @@
// Package camelcase is a micro package to split the words of a camelcase type
// string into a slice of words.
package camelcase
import (
"unicode"
"unicode/utf8"
)
// Split splits the camelcase word and returns a list of words. It also
// supports digits. Both lower camel case and upper camel case are supported.
// For more info please check: http://en.wikipedia.org/wiki/CamelCase
//
// Examples
//
// "" => [""]
// "lowercase" => ["lowercase"]
// "Class" => ["Class"]
// "MyClass" => ["My", "Class"]
// "MyC" => ["My", "C"]
// "HTML" => ["HTML"]
// "PDFLoader" => ["PDF", "Loader"]
// "AString" => ["A", "String"]
// "SimpleXMLParser" => ["Simple", "XML", "Parser"]
// "vimRPCPlugin" => ["vim", "RPC", "Plugin"]
// "GL11Version" => ["GL", "11", "Version"]
// "99Bottles" => ["99", "Bottles"]
// "May5" => ["May", "5"]
// "BFG9000" => ["BFG", "9000"]
// "BöseÜberraschung" => ["Böse", "Überraschung"]
// "Two spaces" => ["Two", " ", "spaces"]
// "BadUTF8\xe2\xe2\xa1" => ["BadUTF8\xe2\xe2\xa1"]
//
// Splitting rules
//
// 1) If string is not valid UTF-8, return it without splitting as
// single item array.
// 2) Assign all unicode characters into one of 4 sets: lower case
// letters, upper case letters, numbers, and all other characters.
// 3) Iterate through characters of string, introducing splits
// between adjacent characters that belong to different sets.
// 4) Iterate through array of split strings, and if a given string
// is upper case:
// if subsequent string is lower case:
// move last character of upper case string to beginning of
// lower case string
func Split(src string) (entries []string) {
// don't split invalid utf8
if !utf8.ValidString(src) {
return []string{src}
}
entries = []string{}
var runes [][]rune
lastClass := 0
class := 0
// split into fields based on class of unicode character
for _, r := range src {
switch true {
case unicode.IsLower(r):
class = 1
case unicode.IsUpper(r):
class = 2
case unicode.IsDigit(r):
class = 3
default:
class = 4
}
if class == lastClass {
runes[len(runes)-1] = append(runes[len(runes)-1], r)
} else {
runes = append(runes, []rune{r})
}
lastClass = class
}
// handle upper case -> lower case sequences, e.g.
// "PDFL", "oader" -> "PDF", "Loader"
for i := 0; i < len(runes)-1; i++ {
if unicode.IsUpper(runes[i][0]) && unicode.IsLower(runes[i+1][0]) {
runes[i+1] = append([]rune{runes[i][len(runes[i])-1]}, runes[i+1]...)
runes[i] = runes[i][:len(runes[i])-1]
}
}
// construct []string from results
for _, s := range runes {
if len(s) > 0 {
entries = append(entries, string(s))
}
}
return
}

View file

@ -1,22 +0,0 @@
The MIT License (MIT)
Copyright (c) 2015 Ulule
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -1,129 +0,0 @@
# Deepcopier
[![Build Status](https://secure.travis-ci.org/ulule/deepcopier.png?branch=master)](http://travis-ci.org/ulule/deepcopier)
This package is meant to make copying of structs to/from others structs a bit easier.
## Installation
```bash
go get -u github.com/ulule/deepcopier
```
## Usage
```golang
// Deep copy instance1 into instance2
Copy(instance1).To(instance2)
// Deep copy instance1 into instance2 and passes the following context (which
// is basically a map[string]interface{}) as first argument
// to methods of instance2 that defined the struct tag "context".
Copy(instance1).WithContext(map[string]interface{}{"foo": "bar"}).To(instance2)
// Deep copy instance2 into instance1
Copy(instance1).From(instance2)
// Deep copy instance2 into instance1 and passes the following context (which
// is basically a map[string]interface{}) as first argument
// to methods of instance1 that defined the struct tag "context".
Copy(instance1).WithContext(map[string]interface{}{"foo": "bar"}).From(instance2)
```
Available options for `deepcopier` struct tag:
| Option | Description |
| --------- | -------------------------------------------------------------------- |
| `field` | Field or method name in source instance |
| `skip` | Ignores the field |
| `context` | Takes a `map[string]interface{}` as first argument (for methods) |
| `force` | Set the value of a `sql.Null*` field (instead of copying the struct) |
**Options example:**
```golang
type Source struct {
Name string
SkipMe string
SQLNullStringToSQLNullString sql.NullString
SQLNullStringToString sql.NullString
}
func (Source) MethodThatTakesContext(c map[string]interface{}) string {
return "whatever"
}
type Destination struct {
FieldWithAnotherNameInSource string `deepcopier:"field:Name"`
SkipMe string `deepcopier:"skip"`
MethodThatTakesContext string `deepcopier:"context"`
SQLNullStringToSQLNullString sql.NullString
SQLNullStringToString string `deepcopier:"force"`
}
```
Example:
```golang
package main
import (
"fmt"
"github.com/ulule/deepcopier"
)
// Model
type User struct {
// Basic string field
Name string
// Deepcopier supports https://golang.org/pkg/database/sql/driver/#Valuer
Email sql.NullString
}
func (u *User) MethodThatTakesContext(ctx map[string]interface{}) string {
// do whatever you want
return "hello from this method"
}
// Resource
type UserResource struct {
DisplayName string `deepcopier:"field:Name"`
SkipMe string `deepcopier:"skip"`
MethodThatTakesContext string `deepcopier:"context"`
Email string `deepcopier:"force"`
}
func main() {
user := &User{
Name: "gilles",
Email: sql.NullString{
Valid: true,
String: "gilles@example.com",
},
}
resource := &UserResource{}
deepcopier.Copy(user).To(resource)
fmt.Println(resource.DisplayName)
fmt.Println(resource.Email)
}
```
Looking for more information about the usage?
We wrote [an introduction article](https://github.com/ulule/deepcopier/blob/master/examples/rest-usage/README.rst).
Have a look and feel free to give us your feedback.
## Contributing
* Ping us on twitter [@oibafsellig](https://twitter.com/oibafsellig), [@thoas](https://twitter.com/thoas)
* Fork the [project](https://github.com/ulule/deepcopier)
* Help us improving and fixing [issues](https://github.com/ulule/deepcopier/issues)
Don't hesitate ;)

View file

@ -1,350 +0,0 @@
package deepcopier
import (
"database/sql/driver"
"fmt"
"reflect"
"strings"
)
const (
// TagName is the deepcopier struct tag name.
TagName = "deepcopier"
// FieldOptionName is the from field option name for struct tag.
FieldOptionName = "field"
// ContextOptionName is the context option name for struct tag.
ContextOptionName = "context"
// SkipOptionName is the skip option name for struct tag.
SkipOptionName = "skip"
// ForceOptionName is the skip option name for struct tag.
ForceOptionName = "force"
)
type (
// TagOptions is a map that contains extracted struct tag options.
TagOptions map[string]string
// Options are copier options.
Options struct {
// Context given to WithContext() method.
Context map[string]interface{}
// Reversed reverses struct tag checkings.
Reversed bool
}
)
// DeepCopier deep copies a struct to/from a struct.
type DeepCopier struct {
dst interface{}
src interface{}
ctx map[string]interface{}
}
// Copy sets source or destination.
func Copy(src interface{}) *DeepCopier {
return &DeepCopier{src: src}
}
// WithContext injects the given context into the builder instance.
func (dc *DeepCopier) WithContext(ctx map[string]interface{}) *DeepCopier {
dc.ctx = ctx
return dc
}
// To sets the destination.
func (dc *DeepCopier) To(dst interface{}) error {
dc.dst = dst
return process(dc.dst, dc.src, Options{Context: dc.ctx})
}
// From sets the given the source as destination and destination as source.
func (dc *DeepCopier) From(src interface{}) error {
dc.dst = dc.src
dc.src = src
return process(dc.dst, dc.src, Options{Context: dc.ctx, Reversed: true})
}
// process handles copy.
func process(dst interface{}, src interface{}, args ...Options) error {
var (
options = Options{}
srcValue = reflect.Indirect(reflect.ValueOf(src))
dstValue = reflect.Indirect(reflect.ValueOf(dst))
srcFieldNames = getFieldNames(src)
srcMethodNames = getMethodNames(src)
)
if len(args) > 0 {
options = args[0]
}
if !dstValue.CanAddr() {
return fmt.Errorf("destination %+v is unaddressable", dstValue.Interface())
}
for _, f := range srcFieldNames {
var (
srcFieldValue = srcValue.FieldByName(f)
srcFieldType, srcFieldFound = srcValue.Type().FieldByName(f)
srcFieldName = srcFieldType.Name
dstFieldName = srcFieldName
tagOptions TagOptions
)
if !srcFieldFound {
continue
}
if options.Reversed {
tagOptions = getTagOptions(srcFieldType.Tag.Get(TagName))
if v, ok := tagOptions[FieldOptionName]; ok && v != "" {
dstFieldName = v
}
} else {
if name, opts := getRelatedField(dst, srcFieldName); name != "" {
dstFieldName, tagOptions = name, opts
}
}
if _, ok := tagOptions[SkipOptionName]; ok {
continue
}
var (
dstFieldType, dstFieldFound = dstValue.Type().FieldByName(dstFieldName)
dstFieldValue = dstValue.FieldByName(dstFieldName)
)
if !dstFieldFound {
continue
}
// Force option for empty interfaces and nullable types
_, force := tagOptions[ForceOptionName]
// Valuer -> ptr
if isNullableType(srcFieldType.Type) && dstFieldValue.Kind() == reflect.Ptr && force {
v, _ := srcFieldValue.Interface().(driver.Valuer).Value()
if v == nil {
continue
}
valueType := reflect.TypeOf(v)
ptr := reflect.New(valueType)
ptr.Elem().Set(reflect.ValueOf(v))
if valueType.AssignableTo(dstFieldType.Type.Elem()) {
dstFieldValue.Set(ptr)
}
continue
}
// Valuer -> value
if isNullableType(srcFieldType.Type) {
if force {
v, _ := srcFieldValue.Interface().(driver.Valuer).Value()
if v == nil {
continue
}
rv := reflect.ValueOf(v)
if rv.Type().AssignableTo(dstFieldType.Type) {
dstFieldValue.Set(rv)
}
}
continue
}
if dstFieldValue.Kind() == reflect.Interface {
if force {
dstFieldValue.Set(srcFieldValue)
}
continue
}
// Ptr -> Value
if srcFieldType.Type.Kind() == reflect.Ptr && !srcFieldValue.IsNil() && dstFieldType.Type.Kind() != reflect.Ptr {
indirect := reflect.Indirect(srcFieldValue)
if indirect.Type().AssignableTo(dstFieldType.Type) {
dstFieldValue.Set(indirect)
continue
}
}
// Other types
if srcFieldType.Type.AssignableTo(dstFieldType.Type) {
dstFieldValue.Set(srcFieldValue)
}
}
for _, m := range srcMethodNames {
name, opts := getRelatedField(dst, m)
if name == "" {
continue
}
if _, ok := opts[SkipOptionName]; ok {
continue
}
method := reflect.ValueOf(src).MethodByName(m)
if !method.IsValid() {
return fmt.Errorf("method %s is invalid", m)
}
var (
dstFieldType, _ = dstValue.Type().FieldByName(name)
dstFieldValue = dstValue.FieldByName(name)
_, withContext = opts[ContextOptionName]
_, force = opts[ForceOptionName]
)
args := []reflect.Value{}
if withContext {
args = []reflect.Value{reflect.ValueOf(options.Context)}
}
var (
result = method.Call(args)[0]
resultInterface = result.Interface()
resultValue = reflect.ValueOf(resultInterface)
resultType = resultValue.Type()
)
// Value -> Ptr
if dstFieldValue.Kind() == reflect.Ptr && force {
ptr := reflect.New(resultType)
ptr.Elem().Set(resultValue)
if ptr.Type().AssignableTo(dstFieldType.Type) {
dstFieldValue.Set(ptr)
}
continue
}
// Ptr -> value
if resultValue.Kind() == reflect.Ptr && force {
if resultValue.Elem().Type().AssignableTo(dstFieldType.Type) {
dstFieldValue.Set(resultValue.Elem())
}
continue
}
if resultType.AssignableTo(dstFieldType.Type) && result.IsValid() {
dstFieldValue.Set(result)
}
}
return nil
}
// getTagOptions parses deepcopier tag field and returns options.
func getTagOptions(value string) TagOptions {
options := TagOptions{}
for _, opt := range strings.Split(value, ";") {
o := strings.Split(opt, ":")
// deepcopier:"keyword; without; value;"
if len(o) == 1 {
options[o[0]] = ""
}
// deepcopier:"key:value; anotherkey:anothervalue"
if len(o) == 2 {
options[strings.TrimSpace(o[0])] = strings.TrimSpace(o[1])
}
}
return options
}
// getRelatedField returns first matching field.
func getRelatedField(instance interface{}, name string) (string, TagOptions) {
var (
value = reflect.Indirect(reflect.ValueOf(instance))
fieldName string
tagOptions TagOptions
)
for i := 0; i < value.NumField(); i++ {
var (
vField = value.Field(i)
tField = value.Type().Field(i)
tagOptions = getTagOptions(tField.Tag.Get(TagName))
)
if tField.Type.Kind() == reflect.Struct && tField.Anonymous {
if n, o := getRelatedField(vField.Interface(), name); n != "" {
return n, o
}
}
if v, ok := tagOptions[FieldOptionName]; ok && v == name {
return tField.Name, tagOptions
}
if tField.Name == name {
return tField.Name, tagOptions
}
}
return fieldName, tagOptions
}
// getMethodNames returns instance's method names.
func getMethodNames(instance interface{}) []string {
var methods []string
t := reflect.TypeOf(instance)
for i := 0; i < t.NumMethod(); i++ {
methods = append(methods, t.Method(i).Name)
}
return methods
}
// getFieldNames returns instance's field names.
func getFieldNames(instance interface{}) []string {
var (
fields []string
v = reflect.Indirect(reflect.ValueOf(instance))
t = v.Type()
)
if t.Kind() != reflect.Struct {
return nil
}
for i := 0; i < v.NumField(); i++ {
var (
vField = v.Field(i)
tField = v.Type().Field(i)
)
// Is exportable?
if tField.PkgPath != "" {
continue
}
if tField.Type.Kind() == reflect.Struct && tField.Anonymous {
fields = append(fields, getFieldNames(vField.Interface())...)
continue
}
fields = append(fields, tField.Name)
}
return fields
}
// isNullableType returns true if the given type is a nullable one.
func isNullableType(t reflect.Type) bool {
return t.ConvertibleTo(reflect.TypeOf((*driver.Valuer)(nil)).Elem())
}

View file

@ -1,892 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package terminal
import (
"bytes"
"io"
"sync"
"unicode/utf8"
)
// EscapeCodes contains escape sequences that can be written to the terminal in
// order to achieve different styles of text.
type EscapeCodes struct {
// Foreground colors
Black, Red, Green, Yellow, Blue, Magenta, Cyan, White []byte
// Reset all attributes
Reset []byte
}
var vt100EscapeCodes = EscapeCodes{
Black: []byte{keyEscape, '[', '3', '0', 'm'},
Red: []byte{keyEscape, '[', '3', '1', 'm'},
Green: []byte{keyEscape, '[', '3', '2', 'm'},
Yellow: []byte{keyEscape, '[', '3', '3', 'm'},
Blue: []byte{keyEscape, '[', '3', '4', 'm'},
Magenta: []byte{keyEscape, '[', '3', '5', 'm'},
Cyan: []byte{keyEscape, '[', '3', '6', 'm'},
White: []byte{keyEscape, '[', '3', '7', 'm'},
Reset: []byte{keyEscape, '[', '0', 'm'},
}
// Terminal contains the state for running a VT100 terminal that is capable of
// reading lines of input.
type Terminal struct {
// AutoCompleteCallback, if non-null, is called for each keypress with
// the full input line and the current position of the cursor (in
// bytes, as an index into |line|). If it returns ok=false, the key
// press is processed normally. Otherwise it returns a replacement line
// and the new cursor position.
AutoCompleteCallback func(line string, pos int, key rune) (newLine string, newPos int, ok bool)
// Escape contains a pointer to the escape codes for this terminal.
// It's always a valid pointer, although the escape codes themselves
// may be empty if the terminal doesn't support them.
Escape *EscapeCodes
// lock protects the terminal and the state in this object from
// concurrent processing of a key press and a Write() call.
lock sync.Mutex
c io.ReadWriter
prompt []rune
// line is the current line being entered.
line []rune
// pos is the logical position of the cursor in line
pos int
// echo is true if local echo is enabled
echo bool
// pasteActive is true iff there is a bracketed paste operation in
// progress.
pasteActive bool
// cursorX contains the current X value of the cursor where the left
// edge is 0. cursorY contains the row number where the first row of
// the current line is 0.
cursorX, cursorY int
// maxLine is the greatest value of cursorY so far.
maxLine int
termWidth, termHeight int
// outBuf contains the terminal data to be sent.
outBuf []byte
// remainder contains the remainder of any partial key sequences after
// a read. It aliases into inBuf.
remainder []byte
inBuf [256]byte
// history contains previously entered commands so that they can be
// accessed with the up and down keys.
history stRingBuffer
// historyIndex stores the currently accessed history entry, where zero
// means the immediately previous entry.
historyIndex int
// When navigating up and down the history it's possible to return to
// the incomplete, initial line. That value is stored in
// historyPending.
historyPending string
}
// NewTerminal runs a VT100 terminal on the given ReadWriter. If the ReadWriter is
// a local terminal, that terminal must first have been put into raw mode.
// prompt is a string that is written at the start of each input line (i.e.
// "> ").
func NewTerminal(c io.ReadWriter, prompt string) *Terminal {
return &Terminal{
Escape: &vt100EscapeCodes,
c: c,
prompt: []rune(prompt),
termWidth: 80,
termHeight: 24,
echo: true,
historyIndex: -1,
}
}
const (
keyCtrlD = 4
keyCtrlU = 21
keyEnter = '\r'
keyEscape = 27
keyBackspace = 127
keyUnknown = 0xd800 /* UTF-16 surrogate area */ + iota
keyUp
keyDown
keyLeft
keyRight
keyAltLeft
keyAltRight
keyHome
keyEnd
keyDeleteWord
keyDeleteLine
keyClearScreen
keyPasteStart
keyPasteEnd
)
var pasteStart = []byte{keyEscape, '[', '2', '0', '0', '~'}
var pasteEnd = []byte{keyEscape, '[', '2', '0', '1', '~'}
// bytesToKey tries to parse a key sequence from b. If successful, it returns
// the key and the remainder of the input. Otherwise it returns utf8.RuneError.
func bytesToKey(b []byte, pasteActive bool) (rune, []byte) {
if len(b) == 0 {
return utf8.RuneError, nil
}
if !pasteActive {
switch b[0] {
case 1: // ^A
return keyHome, b[1:]
case 5: // ^E
return keyEnd, b[1:]
case 8: // ^H
return keyBackspace, b[1:]
case 11: // ^K
return keyDeleteLine, b[1:]
case 12: // ^L
return keyClearScreen, b[1:]
case 23: // ^W
return keyDeleteWord, b[1:]
}
}
if b[0] != keyEscape {
if !utf8.FullRune(b) {
return utf8.RuneError, b
}
r, l := utf8.DecodeRune(b)
return r, b[l:]
}
if !pasteActive && len(b) >= 3 && b[0] == keyEscape && b[1] == '[' {
switch b[2] {
case 'A':
return keyUp, b[3:]
case 'B':
return keyDown, b[3:]
case 'C':
return keyRight, b[3:]
case 'D':
return keyLeft, b[3:]
case 'H':
return keyHome, b[3:]
case 'F':
return keyEnd, b[3:]
}
}
if !pasteActive && len(b) >= 6 && b[0] == keyEscape && b[1] == '[' && b[2] == '1' && b[3] == ';' && b[4] == '3' {
switch b[5] {
case 'C':
return keyAltRight, b[6:]
case 'D':
return keyAltLeft, b[6:]
}
}
if !pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteStart) {
return keyPasteStart, b[6:]
}
if pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteEnd) {
return keyPasteEnd, b[6:]
}
// If we get here then we have a key that we don't recognise, or a
// partial sequence. It's not clear how one should find the end of a
// sequence without knowing them all, but it seems that [a-zA-Z~] only
// appears at the end of a sequence.
for i, c := range b[0:] {
if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c == '~' {
return keyUnknown, b[i+1:]
}
}
return utf8.RuneError, b
}
// queue appends data to the end of t.outBuf
func (t *Terminal) queue(data []rune) {
t.outBuf = append(t.outBuf, []byte(string(data))...)
}
var eraseUnderCursor = []rune{' ', keyEscape, '[', 'D'}
var space = []rune{' '}
func isPrintable(key rune) bool {
isInSurrogateArea := key >= 0xd800 && key <= 0xdbff
return key >= 32 && !isInSurrogateArea
}
// moveCursorToPos appends data to t.outBuf which will move the cursor to the
// given, logical position in the text.
func (t *Terminal) moveCursorToPos(pos int) {
if !t.echo {
return
}
x := visualLength(t.prompt) + pos
y := x / t.termWidth
x = x % t.termWidth
up := 0
if y < t.cursorY {
up = t.cursorY - y
}
down := 0
if y > t.cursorY {
down = y - t.cursorY
}
left := 0
if x < t.cursorX {
left = t.cursorX - x
}
right := 0
if x > t.cursorX {
right = x - t.cursorX
}
t.cursorX = x
t.cursorY = y
t.move(up, down, left, right)
}
func (t *Terminal) move(up, down, left, right int) {
movement := make([]rune, 3*(up+down+left+right))
m := movement
for i := 0; i < up; i++ {
m[0] = keyEscape
m[1] = '['
m[2] = 'A'
m = m[3:]
}
for i := 0; i < down; i++ {
m[0] = keyEscape
m[1] = '['
m[2] = 'B'
m = m[3:]
}
for i := 0; i < left; i++ {
m[0] = keyEscape
m[1] = '['
m[2] = 'D'
m = m[3:]
}
for i := 0; i < right; i++ {
m[0] = keyEscape
m[1] = '['
m[2] = 'C'
m = m[3:]
}
t.queue(movement)
}
func (t *Terminal) clearLineToRight() {
op := []rune{keyEscape, '[', 'K'}
t.queue(op)
}
const maxLineLength = 4096
func (t *Terminal) setLine(newLine []rune, newPos int) {
if t.echo {
t.moveCursorToPos(0)
t.writeLine(newLine)
for i := len(newLine); i < len(t.line); i++ {
t.writeLine(space)
}
t.moveCursorToPos(newPos)
}
t.line = newLine
t.pos = newPos
}
func (t *Terminal) advanceCursor(places int) {
t.cursorX += places
t.cursorY += t.cursorX / t.termWidth
if t.cursorY > t.maxLine {
t.maxLine = t.cursorY
}
t.cursorX = t.cursorX % t.termWidth
if places > 0 && t.cursorX == 0 {
// Normally terminals will advance the current position
// when writing a character. But that doesn't happen
// for the last character in a line. However, when
// writing a character (except a new line) that causes
// a line wrap, the position will be advanced two
// places.
//
// So, if we are stopping at the end of a line, we
// need to write a newline so that our cursor can be
// advanced to the next line.
t.outBuf = append(t.outBuf, '\n')
}
}
func (t *Terminal) eraseNPreviousChars(n int) {
if n == 0 {
return
}
if t.pos < n {
n = t.pos
}
t.pos -= n
t.moveCursorToPos(t.pos)
copy(t.line[t.pos:], t.line[n+t.pos:])
t.line = t.line[:len(t.line)-n]
if t.echo {
t.writeLine(t.line[t.pos:])
for i := 0; i < n; i++ {
t.queue(space)
}
t.advanceCursor(n)
t.moveCursorToPos(t.pos)
}
}
// countToLeftWord returns then number of characters from the cursor to the
// start of the previous word.
func (t *Terminal) countToLeftWord() int {
if t.pos == 0 {
return 0
}
pos := t.pos - 1
for pos > 0 {
if t.line[pos] != ' ' {
break
}
pos--
}
for pos > 0 {
if t.line[pos] == ' ' {
pos++
break
}
pos--
}
return t.pos - pos
}
// countToRightWord returns then number of characters from the cursor to the
// start of the next word.
func (t *Terminal) countToRightWord() int {
pos := t.pos
for pos < len(t.line) {
if t.line[pos] == ' ' {
break
}
pos++
}
for pos < len(t.line) {
if t.line[pos] != ' ' {
break
}
pos++
}
return pos - t.pos
}
// visualLength returns the number of visible glyphs in s.
func visualLength(runes []rune) int {
inEscapeSeq := false
length := 0
for _, r := range runes {
switch {
case inEscapeSeq:
if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') {
inEscapeSeq = false
}
case r == '\x1b':
inEscapeSeq = true
default:
length++
}
}
return length
}
// handleKey processes the given key and, optionally, returns a line of text
// that the user has entered.
func (t *Terminal) handleKey(key rune) (line string, ok bool) {
if t.pasteActive && key != keyEnter {
t.addKeyToLine(key)
return
}
switch key {
case keyBackspace:
if t.pos == 0 {
return
}
t.eraseNPreviousChars(1)
case keyAltLeft:
// move left by a word.
t.pos -= t.countToLeftWord()
t.moveCursorToPos(t.pos)
case keyAltRight:
// move right by a word.
t.pos += t.countToRightWord()
t.moveCursorToPos(t.pos)
case keyLeft:
if t.pos == 0 {
return
}
t.pos--
t.moveCursorToPos(t.pos)
case keyRight:
if t.pos == len(t.line) {
return
}
t.pos++
t.moveCursorToPos(t.pos)
case keyHome:
if t.pos == 0 {
return
}
t.pos = 0
t.moveCursorToPos(t.pos)
case keyEnd:
if t.pos == len(t.line) {
return
}
t.pos = len(t.line)
t.moveCursorToPos(t.pos)
case keyUp:
entry, ok := t.history.NthPreviousEntry(t.historyIndex + 1)
if !ok {
return "", false
}
if t.historyIndex == -1 {
t.historyPending = string(t.line)
}
t.historyIndex++
runes := []rune(entry)
t.setLine(runes, len(runes))
case keyDown:
switch t.historyIndex {
case -1:
return
case 0:
runes := []rune(t.historyPending)
t.setLine(runes, len(runes))
t.historyIndex--
default:
entry, ok := t.history.NthPreviousEntry(t.historyIndex - 1)
if ok {
t.historyIndex--
runes := []rune(entry)
t.setLine(runes, len(runes))
}
}
case keyEnter:
t.moveCursorToPos(len(t.line))
t.queue([]rune("\r\n"))
line = string(t.line)
ok = true
t.line = t.line[:0]
t.pos = 0
t.cursorX = 0
t.cursorY = 0
t.maxLine = 0
case keyDeleteWord:
// Delete zero or more spaces and then one or more characters.
t.eraseNPreviousChars(t.countToLeftWord())
case keyDeleteLine:
// Delete everything from the current cursor position to the
// end of line.
for i := t.pos; i < len(t.line); i++ {
t.queue(space)
t.advanceCursor(1)
}
t.line = t.line[:t.pos]
t.moveCursorToPos(t.pos)
case keyCtrlD:
// Erase the character under the current position.
// The EOF case when the line is empty is handled in
// readLine().
if t.pos < len(t.line) {
t.pos++
t.eraseNPreviousChars(1)
}
case keyCtrlU:
t.eraseNPreviousChars(t.pos)
case keyClearScreen:
// Erases the screen and moves the cursor to the home position.
t.queue([]rune("\x1b[2J\x1b[H"))
t.queue(t.prompt)
t.cursorX, t.cursorY = 0, 0
t.advanceCursor(visualLength(t.prompt))
t.setLine(t.line, t.pos)
default:
if t.AutoCompleteCallback != nil {
prefix := string(t.line[:t.pos])
suffix := string(t.line[t.pos:])
t.lock.Unlock()
newLine, newPos, completeOk := t.AutoCompleteCallback(prefix+suffix, len(prefix), key)
t.lock.Lock()
if completeOk {
t.setLine([]rune(newLine), utf8.RuneCount([]byte(newLine)[:newPos]))
return
}
}
if !isPrintable(key) {
return
}
if len(t.line) == maxLineLength {
return
}
t.addKeyToLine(key)
}
return
}
// addKeyToLine inserts the given key at the current position in the current
// line.
func (t *Terminal) addKeyToLine(key rune) {
if len(t.line) == cap(t.line) {
newLine := make([]rune, len(t.line), 2*(1+len(t.line)))
copy(newLine, t.line)
t.line = newLine
}
t.line = t.line[:len(t.line)+1]
copy(t.line[t.pos+1:], t.line[t.pos:])
t.line[t.pos] = key
if t.echo {
t.writeLine(t.line[t.pos:])
}
t.pos++
t.moveCursorToPos(t.pos)
}
func (t *Terminal) writeLine(line []rune) {
for len(line) != 0 {
remainingOnLine := t.termWidth - t.cursorX
todo := len(line)
if todo > remainingOnLine {
todo = remainingOnLine
}
t.queue(line[:todo])
t.advanceCursor(visualLength(line[:todo]))
line = line[todo:]
}
}
func (t *Terminal) Write(buf []byte) (n int, err error) {
t.lock.Lock()
defer t.lock.Unlock()
if t.cursorX == 0 && t.cursorY == 0 {
// This is the easy case: there's nothing on the screen that we
// have to move out of the way.
return t.c.Write(buf)
}
// We have a prompt and possibly user input on the screen. We
// have to clear it first.
t.move(0 /* up */, 0 /* down */, t.cursorX /* left */, 0 /* right */)
t.cursorX = 0
t.clearLineToRight()
for t.cursorY > 0 {
t.move(1 /* up */, 0, 0, 0)
t.cursorY--
t.clearLineToRight()
}
if _, err = t.c.Write(t.outBuf); err != nil {
return
}
t.outBuf = t.outBuf[:0]
if n, err = t.c.Write(buf); err != nil {
return
}
t.writeLine(t.prompt)
if t.echo {
t.writeLine(t.line)
}
t.moveCursorToPos(t.pos)
if _, err = t.c.Write(t.outBuf); err != nil {
return
}
t.outBuf = t.outBuf[:0]
return
}
// ReadPassword temporarily changes the prompt and reads a password, without
// echo, from the terminal.
func (t *Terminal) ReadPassword(prompt string) (line string, err error) {
t.lock.Lock()
defer t.lock.Unlock()
oldPrompt := t.prompt
t.prompt = []rune(prompt)
t.echo = false
line, err = t.readLine()
t.prompt = oldPrompt
t.echo = true
return
}
// ReadLine returns a line of input from the terminal.
func (t *Terminal) ReadLine() (line string, err error) {
t.lock.Lock()
defer t.lock.Unlock()
return t.readLine()
}
func (t *Terminal) readLine() (line string, err error) {
// t.lock must be held at this point
if t.cursorX == 0 && t.cursorY == 0 {
t.writeLine(t.prompt)
t.c.Write(t.outBuf)
t.outBuf = t.outBuf[:0]
}
lineIsPasted := t.pasteActive
for {
rest := t.remainder
lineOk := false
for !lineOk {
var key rune
key, rest = bytesToKey(rest, t.pasteActive)
if key == utf8.RuneError {
break
}
if !t.pasteActive {
if key == keyCtrlD {
if len(t.line) == 0 {
return "", io.EOF
}
}
if key == keyPasteStart {
t.pasteActive = true
if len(t.line) == 0 {
lineIsPasted = true
}
continue
}
} else if key == keyPasteEnd {
t.pasteActive = false
continue
}
if !t.pasteActive {
lineIsPasted = false
}
line, lineOk = t.handleKey(key)
}
if len(rest) > 0 {
n := copy(t.inBuf[:], rest)
t.remainder = t.inBuf[:n]
} else {
t.remainder = nil
}
t.c.Write(t.outBuf)
t.outBuf = t.outBuf[:0]
if lineOk {
if t.echo {
t.historyIndex = -1
t.history.Add(line)
}
if lineIsPasted {
err = ErrPasteIndicator
}
return
}
// t.remainder is a slice at the beginning of t.inBuf
// containing a partial key sequence
readBuf := t.inBuf[len(t.remainder):]
var n int
t.lock.Unlock()
n, err = t.c.Read(readBuf)
t.lock.Lock()
if err != nil {
return
}
t.remainder = t.inBuf[:n+len(t.remainder)]
}
panic("unreachable") // for Go 1.0.
}
// SetPrompt sets the prompt to be used when reading subsequent lines.
func (t *Terminal) SetPrompt(prompt string) {
t.lock.Lock()
defer t.lock.Unlock()
t.prompt = []rune(prompt)
}
func (t *Terminal) clearAndRepaintLinePlusNPrevious(numPrevLines int) {
// Move cursor to column zero at the start of the line.
t.move(t.cursorY, 0, t.cursorX, 0)
t.cursorX, t.cursorY = 0, 0
t.clearLineToRight()
for t.cursorY < numPrevLines {
// Move down a line
t.move(0, 1, 0, 0)
t.cursorY++
t.clearLineToRight()
}
// Move back to beginning.
t.move(t.cursorY, 0, 0, 0)
t.cursorX, t.cursorY = 0, 0
t.queue(t.prompt)
t.advanceCursor(visualLength(t.prompt))
t.writeLine(t.line)
t.moveCursorToPos(t.pos)
}
func (t *Terminal) SetSize(width, height int) error {
t.lock.Lock()
defer t.lock.Unlock()
if width == 0 {
width = 1
}
oldWidth := t.termWidth
t.termWidth, t.termHeight = width, height
switch {
case width == oldWidth:
// If the width didn't change then nothing else needs to be
// done.
return nil
case len(t.line) == 0 && t.cursorX == 0 && t.cursorY == 0:
// If there is nothing on current line and no prompt printed,
// just do nothing
return nil
case width < oldWidth:
// Some terminals (e.g. xterm) will truncate lines that were
// too long when shinking. Others, (e.g. gnome-terminal) will
// attempt to wrap them. For the former, repainting t.maxLine
// works great, but that behaviour goes badly wrong in the case
// of the latter because they have doubled every full line.
// We assume that we are working on a terminal that wraps lines
// and adjust the cursor position based on every previous line
// wrapping and turning into two. This causes the prompt on
// xterms to move upwards, which isn't great, but it avoids a
// huge mess with gnome-terminal.
if t.cursorX >= t.termWidth {
t.cursorX = t.termWidth - 1
}
t.cursorY *= 2
t.clearAndRepaintLinePlusNPrevious(t.maxLine * 2)
case width > oldWidth:
// If the terminal expands then our position calculations will
// be wrong in the future because we think the cursor is
// |t.pos| chars into the string, but there will be a gap at
// the end of any wrapped line.
//
// But the position will actually be correct until we move, so
// we can move back to the beginning and repaint everything.
t.clearAndRepaintLinePlusNPrevious(t.maxLine)
}
_, err := t.c.Write(t.outBuf)
t.outBuf = t.outBuf[:0]
return err
}
type pasteIndicatorError struct{}
func (pasteIndicatorError) Error() string {
return "terminal: ErrPasteIndicator not correctly handled"
}
// ErrPasteIndicator may be returned from ReadLine as the error, in addition
// to valid line data. It indicates that bracketed paste mode is enabled and
// that the returned line consists only of pasted data. Programs may wish to
// interpret pasted data more literally than typed data.
var ErrPasteIndicator = pasteIndicatorError{}
// SetBracketedPasteMode requests that the terminal bracket paste operations
// with markers. Not all terminals support this but, if it is supported, then
// enabling this mode will stop any autocomplete callback from running due to
// pastes. Additionally, any lines that are completely pasted will be returned
// from ReadLine with the error set to ErrPasteIndicator.
func (t *Terminal) SetBracketedPasteMode(on bool) {
if on {
io.WriteString(t.c, "\x1b[?2004h")
} else {
io.WriteString(t.c, "\x1b[?2004l")
}
}
// stRingBuffer is a ring buffer of strings.
type stRingBuffer struct {
// entries contains max elements.
entries []string
max int
// head contains the index of the element most recently added to the ring.
head int
// size contains the number of elements in the ring.
size int
}
func (s *stRingBuffer) Add(a string) {
if s.entries == nil {
const defaultNumEntries = 100
s.entries = make([]string, defaultNumEntries)
s.max = defaultNumEntries
}
s.head = (s.head + 1) % s.max
s.entries[s.head] = a
if s.size < s.max {
s.size++
}
}
// NthPreviousEntry returns the value passed to the nth previous call to Add.
// If n is zero then the immediately prior value is returned, if one, then the
// next most recent, and so on. If such an element doesn't exist then ok is
// false.
func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) {
if n >= s.size {
return "", false
}
index := s.head - n
if index < 0 {
index += s.max
}
return s.entries[index], true
}

View file

@ -1,128 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd
// Package terminal provides support functions for dealing with terminals, as
// commonly found on UNIX systems.
//
// Putting a terminal into raw mode is the most common requirement:
//
// oldState, err := terminal.MakeRaw(0)
// if err != nil {
// panic(err)
// }
// defer terminal.Restore(0, oldState)
package terminal // import "golang.org/x/crypto/ssh/terminal"
import (
"io"
"syscall"
"unsafe"
)
// State contains the state of a terminal.
type State struct {
termios syscall.Termios
}
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal(fd int) bool {
var termios syscall.Termios
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
return err == 0
}
// MakeRaw put the terminal connected to the given file descriptor into raw
// mode and returns the previous state of the terminal so that it can be
// restored.
func MakeRaw(fd int) (*State, error) {
var oldState State
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 {
return nil, err
}
newState := oldState.termios
newState.Iflag &^= syscall.ISTRIP | syscall.INLCR | syscall.ICRNL | syscall.IGNCR | syscall.IXON | syscall.IXOFF
newState.Lflag &^= syscall.ECHO | syscall.ICANON | syscall.ISIG
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 {
return nil, err
}
return &oldState, nil
}
// GetState returns the current state of a terminal which may be useful to
// restore the terminal after a signal.
func GetState(fd int) (*State, error) {
var oldState State
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 {
return nil, err
}
return &oldState, nil
}
// Restore restores the terminal connected to the given file descriptor to a
// previous state.
func Restore(fd int, state *State) error {
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&state.termios)), 0, 0, 0)
return err
}
// GetSize returns the dimensions of the given terminal.
func GetSize(fd int) (width, height int, err error) {
var dimensions [4]uint16
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0); err != 0 {
return -1, -1, err
}
return int(dimensions[1]), int(dimensions[0]), nil
}
// ReadPassword reads a line of input from a terminal without local echo. This
// is commonly used for inputting passwords and other sensitive data. The slice
// returned does not include the \n.
func ReadPassword(fd int) ([]byte, error) {
var oldState syscall.Termios
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); err != 0 {
return nil, err
}
newState := oldState
newState.Lflag &^= syscall.ECHO
newState.Lflag |= syscall.ICANON | syscall.ISIG
newState.Iflag |= syscall.ICRNL
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 {
return nil, err
}
defer func() {
syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0)
}()
var buf [16]byte
var ret []byte
for {
n, err := syscall.Read(fd, buf[:])
if err != nil {
return nil, err
}
if n == 0 {
if len(ret) == 0 {
return nil, io.EOF
}
break
}
if buf[n-1] == '\n' {
n--
}
ret = append(ret, buf[:n]...)
if n < len(buf) {
break
}
}
return ret, nil
}

View file

@ -1,12 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd netbsd openbsd
package terminal
import "syscall"
const ioctlReadTermios = syscall.TIOCGETA
const ioctlWriteTermios = syscall.TIOCSETA

View file

@ -1,11 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package terminal
// These constants are declared here, rather than importing
// them from the syscall package as some syscall packages, even
// on linux, for example gccgo, do not declare them.
const ioctlReadTermios = 0x5401 // syscall.TCGETS
const ioctlWriteTermios = 0x5402 // syscall.TCSETS

View file

@ -1,58 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package terminal provides support functions for dealing with terminals, as
// commonly found on UNIX systems.
//
// Putting a terminal into raw mode is the most common requirement:
//
// oldState, err := terminal.MakeRaw(0)
// if err != nil {
// panic(err)
// }
// defer terminal.Restore(0, oldState)
package terminal
import (
"fmt"
"runtime"
)
type State struct{}
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal(fd int) bool {
return false
}
// MakeRaw put the terminal connected to the given file descriptor into raw
// mode and returns the previous state of the terminal so that it can be
// restored.
func MakeRaw(fd int) (*State, error) {
return nil, fmt.Errorf("terminal: MakeRaw not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
}
// GetState returns the current state of a terminal which may be useful to
// restore the terminal after a signal.
func GetState(fd int) (*State, error) {
return nil, fmt.Errorf("terminal: GetState not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
}
// Restore restores the terminal connected to the given file descriptor to a
// previous state.
func Restore(fd int, state *State) error {
return fmt.Errorf("terminal: Restore not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
}
// GetSize returns the dimensions of the given terminal.
func GetSize(fd int) (width, height int, err error) {
return 0, 0, fmt.Errorf("terminal: GetSize not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
}
// ReadPassword reads a line of input from a terminal without local echo. This
// is commonly used for inputting passwords and other sensitive data. The slice
// returned does not include the \n.
func ReadPassword(fd int) ([]byte, error) {
return nil, fmt.Errorf("terminal: ReadPassword not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
}

View file

@ -1,174 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build windows
// Package terminal provides support functions for dealing with terminals, as
// commonly found on UNIX systems.
//
// Putting a terminal into raw mode is the most common requirement:
//
// oldState, err := terminal.MakeRaw(0)
// if err != nil {
// panic(err)
// }
// defer terminal.Restore(0, oldState)
package terminal
import (
"io"
"syscall"
"unsafe"
)
const (
enableLineInput = 2
enableEchoInput = 4
enableProcessedInput = 1
enableWindowInput = 8
enableMouseInput = 16
enableInsertMode = 32
enableQuickEditMode = 64
enableExtendedFlags = 128
enableAutoPosition = 256
enableProcessedOutput = 1
enableWrapAtEolOutput = 2
)
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
var (
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
procSetConsoleMode = kernel32.NewProc("SetConsoleMode")
procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
)
type (
short int16
word uint16
coord struct {
x short
y short
}
smallRect struct {
left short
top short
right short
bottom short
}
consoleScreenBufferInfo struct {
size coord
cursorPosition coord
attributes word
window smallRect
maximumWindowSize coord
}
)
type State struct {
mode uint32
}
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal(fd int) bool {
var st uint32
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
return r != 0 && e == 0
}
// MakeRaw put the terminal connected to the given file descriptor into raw
// mode and returns the previous state of the terminal so that it can be
// restored.
func MakeRaw(fd int) (*State, error) {
var st uint32
_, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
if e != 0 {
return nil, error(e)
}
raw := st &^ (enableEchoInput | enableProcessedInput | enableLineInput | enableProcessedOutput)
_, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(raw), 0)
if e != 0 {
return nil, error(e)
}
return &State{st}, nil
}
// GetState returns the current state of a terminal which may be useful to
// restore the terminal after a signal.
func GetState(fd int) (*State, error) {
var st uint32
_, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
if e != 0 {
return nil, error(e)
}
return &State{st}, nil
}
// Restore restores the terminal connected to the given file descriptor to a
// previous state.
func Restore(fd int, state *State) error {
_, _, err := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(state.mode), 0)
return err
}
// GetSize returns the dimensions of the given terminal.
func GetSize(fd int) (width, height int, err error) {
var info consoleScreenBufferInfo
_, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&info)), 0)
if e != 0 {
return 0, 0, error(e)
}
return int(info.size.x), int(info.size.y), nil
}
// ReadPassword reads a line of input from a terminal without local echo. This
// is commonly used for inputting passwords and other sensitive data. The slice
// returned does not include the \n.
func ReadPassword(fd int) ([]byte, error) {
var st uint32
_, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
if e != 0 {
return nil, error(e)
}
old := st
st &^= (enableEchoInput)
st |= (enableProcessedInput | enableLineInput | enableProcessedOutput)
_, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0)
if e != 0 {
return nil, error(e)
}
defer func() {
syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(old), 0)
}()
var buf [16]byte
var ret []byte
for {
n, err := syscall.Read(syscall.Handle(fd), buf[:])
if err != nil {
return nil, err
}
if n == 0 {
if len(ret) == 0 {
return nil, io.EOF
}
break
}
if buf[n-1] == '\n' {
n--
}
if n > 0 && buf[n-1] == '\r' {
n--
}
ret = append(ret, buf[:n]...)
if n < len(buf) {
break
}
}
return ret, nil
}

View file

@ -151,6 +151,10 @@ func (m *AdmissionHookClientConfig) MarshalTo(dAtA []byte) (int, error) {
i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle)))
i += copy(dAtA[i:], m.CABundle) i += copy(dAtA[i:], m.CABundle)
} }
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(len(m.URLPath)))
i += copy(dAtA[i:], m.URLPath)
return i, nil return i, nil
} }
@ -554,6 +558,8 @@ func (m *AdmissionHookClientConfig) Size() (n int) {
l = len(m.CABundle) l = len(m.CABundle)
n += 1 + l + sovGenerated(uint64(l)) n += 1 + l + sovGenerated(uint64(l))
} }
l = len(m.URLPath)
n += 1 + l + sovGenerated(uint64(l))
return n return n
} }
@ -715,6 +721,7 @@ func (this *AdmissionHookClientConfig) String() string {
s := strings.Join([]string{`&AdmissionHookClientConfig{`, s := strings.Join([]string{`&AdmissionHookClientConfig{`,
`Service:` + strings.Replace(strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1), `&`, ``, 1) + `,`, `Service:` + strings.Replace(strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1), `&`, ``, 1) + `,`,
`CABundle:` + valueToStringGenerated(this.CABundle) + `,`, `CABundle:` + valueToStringGenerated(this.CABundle) + `,`,
`URLPath:` + fmt.Sprintf("%v", this.URLPath) + `,`,
`}`, `}`,
}, "") }, "")
return s return s
@ -919,6 +926,35 @@ func (m *AdmissionHookClientConfig) Unmarshal(dAtA []byte) error {
m.CABundle = []byte{} m.CABundle = []byte{}
} }
iNdEx = postIndex iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field URLPath", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.URLPath = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default: default:
iNdEx = preIndex iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:]) skippy, err := skipGenerated(dAtA[iNdEx:])
@ -2128,60 +2164,61 @@ func init() {
} }
var fileDescriptorGenerated = []byte{ var fileDescriptorGenerated = []byte{
// 871 bytes of a gzipped FileDescriptorProto // 893 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcd, 0x8b, 0x23, 0x45, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x8b, 0x23, 0x45,
0x14, 0x4f, 0x65, 0x32, 0x6c, 0x52, 0x49, 0xd8, 0xdd, 0x42, 0x97, 0x38, 0x48, 0x77, 0xe8, 0xc3, 0x14, 0x4e, 0x65, 0x32, 0x4c, 0x52, 0x49, 0xd8, 0xdd, 0x42, 0x97, 0x38, 0x48, 0x27, 0xf4, 0x61,
0x12, 0x11, 0xbb, 0x9d, 0x51, 0x16, 0x41, 0x44, 0xa7, 0xc7, 0xaf, 0x81, 0xfd, 0x18, 0xcb, 0x45, 0xc9, 0x22, 0x76, 0x3b, 0xa3, 0x2c, 0x82, 0x88, 0x4e, 0x8f, 0xbf, 0x06, 0x66, 0x77, 0xc7, 0x72,
0x41, 0x3c, 0x58, 0xe9, 0xbc, 0x24, 0x65, 0xfa, 0x8b, 0xaa, 0xea, 0xe0, 0x78, 0x10, 0x2f, 0xde, 0x55, 0x10, 0x0f, 0x56, 0x3a, 0x2f, 0x49, 0x99, 0xfe, 0x45, 0x55, 0x75, 0x70, 0x3c, 0x88, 0x17,
0x05, 0x2f, 0x5e, 0xbd, 0x79, 0xf1, 0xff, 0x98, 0xe3, 0x1e, 0xf7, 0x14, 0x9c, 0x16, 0xbc, 0x08, 0xef, 0x82, 0x17, 0xaf, 0xde, 0xfc, 0x53, 0xe6, 0xb8, 0xc7, 0x39, 0x05, 0xa7, 0x05, 0x2f, 0x82,
0xfe, 0x01, 0x73, 0x92, 0xfe, 0x4a, 0x3a, 0x9b, 0x84, 0x4d, 0x5c, 0x98, 0x5b, 0xea, 0xf7, 0xea, 0x7f, 0xc0, 0x9c, 0xa4, 0x7f, 0xa5, 0x3b, 0x9b, 0x84, 0x9d, 0x28, 0xec, 0x2d, 0xf5, 0xbd, 0xfa,
0xf7, 0xde, 0xef, 0xfd, 0xf2, 0x5e, 0x35, 0xa6, 0x93, 0x77, 0xa4, 0xc9, 0x03, 0x6b, 0x12, 0xf5, 0xde, 0xfb, 0xde, 0x97, 0xf7, 0xaa, 0x31, 0x9d, 0xbe, 0x2d, 0x0d, 0xee, 0x9b, 0xd3, 0x70, 0x00,
0x41, 0xf8, 0xa0, 0x40, 0x5a, 0x53, 0xf0, 0x07, 0x81, 0xb0, 0xf2, 0x00, 0x0b, 0xb9, 0xc5, 0x06, 0xc2, 0x03, 0x05, 0xd2, 0x9c, 0x81, 0x37, 0xf4, 0x85, 0x99, 0x05, 0x58, 0xc0, 0x4d, 0x36, 0x74,
0x1e, 0x97, 0x92, 0x07, 0xbe, 0x80, 0x11, 0x97, 0x4a, 0x30, 0xc5, 0x03, 0xdf, 0x9a, 0x1e, 0x32, 0xb9, 0x94, 0xdc, 0xf7, 0x04, 0x8c, 0xb9, 0x54, 0x82, 0x29, 0xee, 0x7b, 0xe6, 0xec, 0x80, 0x39,
0x37, 0x1c, 0xb3, 0x43, 0x6b, 0x04, 0x3e, 0x08, 0xa6, 0x60, 0x60, 0x86, 0x22, 0x50, 0x01, 0x79, 0xc1, 0x84, 0x1d, 0x98, 0x63, 0xf0, 0x40, 0x30, 0x05, 0x43, 0x23, 0x10, 0xbe, 0xf2, 0xc9, 0xfd,
0x2d, 0xa3, 0x9a, 0x2c, 0xe4, 0xe6, 0x5a, 0xaa, 0x59, 0x50, 0x0f, 0xde, 0x18, 0x71, 0x35, 0x8e, 0x94, 0x6a, 0xb0, 0x80, 0x1b, 0x6b, 0xa9, 0x46, 0x4e, 0xdd, 0x7f, 0x7d, 0xcc, 0xd5, 0x24, 0x1c,
0xfa, 0xa6, 0x13, 0x78, 0xd6, 0x28, 0x18, 0x05, 0x56, 0x9a, 0xa1, 0x1f, 0x0d, 0xd3, 0x53, 0x7a, 0x18, 0xb6, 0xef, 0x9a, 0x63, 0x7f, 0xec, 0x9b, 0x49, 0x86, 0x41, 0x38, 0x4a, 0x4e, 0xc9, 0x21,
0x48, 0x7f, 0x65, 0x99, 0x0f, 0xde, 0x5e, 0x88, 0xf2, 0x98, 0x33, 0xe6, 0x3e, 0x88, 0x73, 0x2b, 0xf9, 0x95, 0x66, 0xde, 0x7f, 0xab, 0x10, 0xe5, 0x32, 0x7b, 0xc2, 0x3d, 0x10, 0xe7, 0x66, 0x30,
0x9c, 0x8c, 0x12, 0x40, 0x5a, 0x1e, 0x28, 0x66, 0x4d, 0x57, 0xf4, 0x1c, 0x58, 0x9b, 0x58, 0x22, 0x1d, 0xc7, 0x80, 0x34, 0x5d, 0x50, 0xcc, 0x9c, 0xad, 0xe8, 0xd9, 0x37, 0x37, 0xb1, 0x44, 0xe8,
0xf2, 0x15, 0xf7, 0x60, 0x85, 0x70, 0xef, 0x79, 0x04, 0xe9, 0x8c, 0xc1, 0x63, 0x2b, 0xbc, 0xb7, 0x29, 0xee, 0xc2, 0x0a, 0xe1, 0xc1, 0xf3, 0x08, 0xd2, 0x9e, 0x80, 0xcb, 0x56, 0x78, 0x6f, 0x6e,
0x36, 0xf1, 0x22, 0xc5, 0x5d, 0x8b, 0xfb, 0x4a, 0x2a, 0xf1, 0x2c, 0xc9, 0xf8, 0x03, 0xe1, 0x57, 0xe2, 0x85, 0x8a, 0x3b, 0x26, 0xf7, 0x94, 0x54, 0xe2, 0x59, 0x92, 0x7e, 0x89, 0xf0, 0x2b, 0x47,
0x8e, 0x0b, 0x97, 0x3e, 0x0d, 0x82, 0xc9, 0x89, 0xcb, 0xc1, 0x57, 0x27, 0x81, 0x3f, 0xe4, 0x23, 0xb9, 0x4b, 0x9f, 0xf8, 0xfe, 0xf4, 0xd8, 0xe1, 0xe0, 0xa9, 0x63, 0xdf, 0x1b, 0xf1, 0x31, 0x19,
0x32, 0xc4, 0x37, 0x24, 0x88, 0x29, 0x77, 0xa0, 0x83, 0xba, 0xa8, 0xd7, 0x3c, 0x7a, 0xd7, 0xdc, 0xe1, 0x3d, 0x09, 0x62, 0xc6, 0x6d, 0xe8, 0xa0, 0x1e, 0xea, 0x37, 0x0f, 0xdf, 0x31, 0x6e, 0xec,
0xda, 0x5d, 0xf3, 0xf3, 0x8c, 0x49, 0x61, 0x08, 0x02, 0x7c, 0x07, 0xec, 0x9b, 0x17, 0x33, 0xbd, 0xae, 0xf1, 0x59, 0xca, 0xa4, 0x30, 0x02, 0x01, 0x9e, 0x0d, 0xd6, 0xad, 0x8b, 0x79, 0xb7, 0x12,
0x12, 0xcf, 0xf4, 0x1b, 0x45, 0xa4, 0x48, 0x4e, 0x7a, 0xb8, 0xee, 0x30, 0x3b, 0xf2, 0x07, 0x2e, 0xcd, 0xbb, 0x7b, 0x79, 0x24, 0x4f, 0x4e, 0xfa, 0xb8, 0x6e, 0x33, 0x2b, 0xf4, 0x86, 0x0e, 0x74,
0x74, 0xaa, 0x5d, 0xd4, 0x6b, 0xd9, 0xad, 0x78, 0xa6, 0xd7, 0x4f, 0x8e, 0x33, 0x8c, 0xce, 0xa3, 0xaa, 0x3d, 0xd4, 0x6f, 0x59, 0xad, 0x68, 0xde, 0xad, 0x1f, 0x1f, 0xa5, 0x18, 0x5d, 0x44, 0xc9,
0xc6, 0x3f, 0x55, 0xfc, 0xf2, 0x47, 0xdf, 0x29, 0x10, 0x3e, 0x73, 0x97, 0x74, 0x93, 0x2e, 0xae, 0x7d, 0xbc, 0x17, 0x0a, 0xe7, 0x8c, 0xa9, 0x49, 0x67, 0xa7, 0x87, 0xfa, 0x8d, 0x22, 0xe9, 0xe7,
0xf9, 0xcc, 0xcb, 0x84, 0x36, 0xec, 0x56, 0x5e, 0xab, 0xf6, 0x90, 0x79, 0x40, 0xd3, 0x08, 0xf9, 0xf4, 0x34, 0x86, 0x69, 0x1e, 0xd7, 0xff, 0xae, 0xe2, 0x97, 0x3f, 0xfc, 0x4e, 0x81, 0xf0, 0x98,
0x01, 0xb7, 0x9c, 0x52, 0x77, 0x69, 0xa5, 0xe6, 0xd1, 0x87, 0x3b, 0xb4, 0xb4, 0xd1, 0x29, 0xfb, 0xb3, 0xd4, 0x22, 0xe9, 0xe1, 0x9a, 0xc7, 0xdc, 0xb4, 0xa7, 0x86, 0xd5, 0xca, 0x32, 0xd4, 0x1e,
0xa5, 0xbc, 0x5e, 0xab, 0x8c, 0xd2, 0xa5, 0x7a, 0xa4, 0x8f, 0xf7, 0x45, 0xe4, 0x82, 0xec, 0xec, 0x31, 0x17, 0x68, 0x12, 0x21, 0x3f, 0xe0, 0x96, 0x5d, 0x32, 0x22, 0x11, 0xd5, 0x3c, 0xfc, 0x60,
0x75, 0xf7, 0x7a, 0xcd, 0xa3, 0xf7, 0x76, 0x28, 0x4c, 0x23, 0x17, 0xbe, 0xe4, 0x6a, 0xfc, 0x28, 0x8b, 0xee, 0x37, 0x9a, 0x6a, 0xbd, 0x94, 0xd5, 0x6b, 0x95, 0x51, 0xba, 0x54, 0x8f, 0x0c, 0xf0,
0x84, 0x2c, 0x24, 0xed, 0x76, 0x5e, 0x71, 0x3f, 0x89, 0x49, 0x9a, 0xa5, 0x26, 0xf7, 0x71, 0x7b, 0xae, 0x08, 0x1d, 0x90, 0x9d, 0x9d, 0xde, 0x4e, 0xbf, 0x79, 0xf8, 0xee, 0x16, 0x85, 0x69, 0xe8,
0xc8, 0xb8, 0x1b, 0x09, 0x38, 0x0b, 0x5c, 0xee, 0x9c, 0x77, 0x6a, 0xa9, 0x1d, 0x77, 0xe3, 0x99, 0xc0, 0x97, 0x5c, 0x4d, 0x1e, 0x07, 0x90, 0x86, 0xa4, 0xd5, 0xce, 0x2a, 0xee, 0xc6, 0x31, 0x49,
0xde, 0xfe, 0xb8, 0x1c, 0xb8, 0x9a, 0xe9, 0xb7, 0x97, 0x80, 0xc7, 0xe7, 0x21, 0xd0, 0x65, 0xb2, 0xd3, 0xd4, 0xe4, 0x14, 0xb7, 0x47, 0x8c, 0x3b, 0xa1, 0x80, 0x33, 0xdf, 0xe1, 0xf6, 0x79, 0xa7,
0xf1, 0x5b, 0x15, 0x1b, 0x6b, 0xdd, 0xce, 0x3a, 0x8a, 0x32, 0x2d, 0xe4, 0x1b, 0x5c, 0x4f, 0xa6, 0x96, 0xd8, 0x71, 0x2f, 0x9a, 0x77, 0xdb, 0x1f, 0x95, 0x03, 0xd7, 0xf3, 0xee, 0x9d, 0x25, 0xe0,
0x7f, 0xc0, 0x14, 0xcb, 0xe7, 0xe4, 0xcd, 0x52, 0x6f, 0xf3, 0x61, 0x34, 0xc3, 0xc9, 0x28, 0x01, 0xc9, 0x79, 0x00, 0x74, 0x99, 0xac, 0xff, 0x56, 0xc5, 0xfa, 0x5a, 0xb7, 0xd3, 0x8e, 0xc2, 0x54,
0xa4, 0x99, 0xdc, 0x36, 0xa7, 0x87, 0xe6, 0xa3, 0xfe, 0xb7, 0xe0, 0xa8, 0x07, 0xa0, 0x98, 0x4d, 0x0b, 0xf9, 0x06, 0xd7, 0xe3, 0x45, 0x19, 0x32, 0xc5, 0xb2, 0x91, 0x7a, 0xa3, 0xd4, 0xdb, 0x62,
0xf2, 0x76, 0xf0, 0x02, 0xa3, 0xf3, 0xac, 0xe4, 0x57, 0x84, 0xef, 0xc0, 0x3a, 0x21, 0xb2, 0x53, 0x6e, 0x8d, 0x60, 0x3a, 0x8e, 0x01, 0x69, 0xc4, 0xb7, 0x8d, 0xd9, 0x81, 0xf1, 0x78, 0xf0, 0x2d,
0x4d, 0xcd, 0xfc, 0x60, 0x07, 0x33, 0xd7, 0x76, 0x64, 0x6b, 0xb9, 0x80, 0x3b, 0x6b, 0xc3, 0x92, 0xd8, 0xea, 0x21, 0x28, 0x66, 0x91, 0xac, 0x1d, 0x5c, 0x60, 0x74, 0x91, 0x95, 0xfc, 0x8a, 0xf0,
0x6e, 0xa8, 0x6f, 0x5c, 0x21, 0x7c, 0xf7, 0xf9, 0x1e, 0xdd, 0xe7, 0x52, 0x91, 0xaf, 0x57, 0x7c, 0x5d, 0x58, 0x27, 0x44, 0x76, 0xaa, 0x89, 0x99, 0xef, 0x6f, 0x61, 0xe6, 0xda, 0x8e, 0x2c, 0x2d,
0x32, 0xb7, 0xf3, 0x29, 0x61, 0xa7, 0x2e, 0xdd, 0xca, 0x45, 0xd6, 0x0b, 0xa4, 0xe4, 0x91, 0xc0, 0x13, 0x70, 0x77, 0x6d, 0x58, 0xd2, 0x0d, 0xf5, 0xf5, 0x6b, 0x84, 0xef, 0x3d, 0xdf, 0xa3, 0x53,
0xfb, 0x5c, 0x81, 0x57, 0x38, 0xf2, 0xe0, 0x45, 0x1d, 0x59, 0xd2, 0xbf, 0x18, 0xb7, 0xd3, 0xa4, 0x2e, 0x15, 0xf9, 0x7a, 0xc5, 0x27, 0xe3, 0x66, 0x3e, 0xc5, 0xec, 0xc4, 0xa5, 0xdb, 0x99, 0xc8,
0x06, 0xcd, 0x4a, 0x19, 0x3f, 0x21, 0xdc, 0x3c, 0xf5, 0xb9, 0xe2, 0xcc, 0xe5, 0xdf, 0x83, 0xd8, 0x7a, 0x8e, 0x94, 0x3c, 0x12, 0x78, 0x97, 0x2b, 0x70, 0x73, 0x47, 0x1e, 0xfe, 0x5f, 0x47, 0x96,
0x62, 0x09, 0x1f, 0x17, 0x4b, 0x90, 0xa9, 0xb4, 0x76, 0x5c, 0x82, 0xf5, 0x63, 0x6f, 0xfc, 0x8b, 0xf4, 0x17, 0xe3, 0x76, 0x12, 0xd7, 0xa0, 0x69, 0x29, 0xfd, 0x27, 0x84, 0x9b, 0x27, 0x1e, 0x57,
0x70, 0xa7, 0xa4, 0xe3, 0xba, 0xc7, 0x33, 0xc4, 0x2d, 0xbe, 0xa8, 0x5e, 0xf4, 0x76, 0x6f, 0x87, 0x9c, 0x39, 0xfc, 0x7b, 0x10, 0x37, 0x58, 0xc2, 0x27, 0xf9, 0x12, 0xa4, 0x2a, 0xcd, 0x2d, 0x97,
0xde, 0x4a, 0xe2, 0x17, 0x6f, 0x49, 0x09, 0x94, 0x74, 0xa9, 0x82, 0xf1, 0x37, 0xc2, 0xaf, 0x6e, 0x60, 0xfd, 0xd8, 0xeb, 0xff, 0x20, 0xdc, 0x29, 0xe9, 0x78, 0xd1, 0xe3, 0x19, 0xe0, 0x16, 0x2f,
0x6a, 0xf8, 0x1a, 0x66, 0x6d, 0xbc, 0x3c, 0x6b, 0x27, 0xff, 0xaf, 0xd3, 0x6d, 0x26, 0xec, 0x17, 0xaa, 0xe7, 0xbd, 0x3d, 0xd8, 0xa2, 0xb7, 0x92, 0xf8, 0xe2, 0x2d, 0x29, 0x81, 0x92, 0x2e, 0x55,
0x84, 0x6b, 0xc9, 0x5f, 0x4d, 0x5e, 0xc7, 0x0d, 0x16, 0xf2, 0x4f, 0x44, 0x10, 0x85, 0xb2, 0x83, 0xd0, 0xff, 0x42, 0xf8, 0xd5, 0x4d, 0x0d, 0xbf, 0x80, 0x59, 0x9b, 0x2c, 0xcf, 0xda, 0xf1, 0x7f,
0xba, 0x7b, 0xbd, 0x86, 0xdd, 0x8e, 0x67, 0x7a, 0xe3, 0xf8, 0xec, 0x34, 0x03, 0xe9, 0x22, 0x4e, 0xeb, 0xf4, 0x26, 0x13, 0xf6, 0x0b, 0xc2, 0xb5, 0xf8, 0xaf, 0x26, 0xaf, 0xe1, 0x06, 0x0b, 0xf8,
0x0e, 0x71, 0x93, 0x85, 0xfc, 0x0b, 0x10, 0x89, 0x8e, 0x4c, 0x65, 0xc3, 0xbe, 0x19, 0xcf, 0xf4, 0xc7, 0xc2, 0x0f, 0x03, 0xd9, 0x41, 0xbd, 0x9d, 0x7e, 0xc3, 0x6a, 0x47, 0xf3, 0x6e, 0xe3, 0xe8,
0xe6, 0xf1, 0xd9, 0x69, 0x01, 0xd3, 0xf2, 0x9d, 0x24, 0xbf, 0x00, 0x19, 0x44, 0xc2, 0xc9, 0x5f, 0xec, 0x24, 0x05, 0x69, 0x11, 0x27, 0x07, 0xb8, 0xc9, 0x02, 0xfe, 0x05, 0x88, 0x58, 0x47, 0xaa,
0xe8, 0x3c, 0x3f, 0x2d, 0x40, 0xba, 0x88, 0x1b, 0xbf, 0x23, 0x4c, 0x56, 0xdf, 0x64, 0xf2, 0x3e, 0xb2, 0x61, 0xdd, 0x8a, 0xe6, 0xdd, 0xe6, 0xd1, 0xd9, 0x49, 0x0e, 0xd3, 0xf2, 0x9d, 0x38, 0xbf,
0xc6, 0xc1, 0xfc, 0x94, 0x8b, 0xd4, 0xd3, 0xa9, 0x99, 0xa3, 0x57, 0x33, 0xbd, 0x3d, 0x3f, 0xa5, 0x00, 0xe9, 0x87, 0xc2, 0xce, 0x5e, 0xe8, 0x2c, 0x3f, 0xcd, 0x41, 0x5a, 0xc4, 0xf5, 0xdf, 0x11,
0x6f, 0x6e, 0x89, 0x42, 0x3e, 0xc3, 0xb5, 0x64, 0xa0, 0xf3, 0x4f, 0xd3, 0xce, 0xcb, 0x31, 0x5f, 0x26, 0xab, 0x6f, 0x32, 0x79, 0x0f, 0x63, 0x7f, 0x71, 0xca, 0x44, 0x76, 0x93, 0xa9, 0x59, 0xa0,
0xb8, 0xe4, 0x44, 0xd3, 0x54, 0x06, 0xe0, 0x5b, 0xcf, 0x7e, 0x89, 0x89, 0x85, 0x1b, 0xc9, 0x32, 0xd7, 0xf3, 0x6e, 0x7b, 0x71, 0x4a, 0xde, 0xdc, 0x12, 0x85, 0x7c, 0x8a, 0x6b, 0xf1, 0x40, 0x67,
0xca, 0x90, 0x39, 0xc5, 0xae, 0xde, 0xce, 0xa9, 0x8d, 0x87, 0x45, 0x80, 0x2e, 0xee, 0xcc, 0xf7, 0x9f, 0xa6, 0xad, 0x97, 0x63, 0xb1, 0x70, 0xf1, 0x89, 0x26, 0xa9, 0x74, 0xc0, 0xb7, 0x9f, 0xfd,
0xba, 0xba, 0x69, 0xaf, 0x6d, 0xf3, 0xe2, 0x52, 0xab, 0x3c, 0xb9, 0xd4, 0x2a, 0x4f, 0x2f, 0xb5, 0x68, 0x13, 0x13, 0x37, 0xe2, 0x65, 0x94, 0x01, 0xb3, 0xf3, 0x5d, 0xbd, 0x93, 0x51, 0x1b, 0x8f,
0xca, 0x8f, 0xb1, 0x86, 0x2e, 0x62, 0x0d, 0x3d, 0x89, 0x35, 0xf4, 0x34, 0xd6, 0xd0, 0x9f, 0xb1, 0xf2, 0x00, 0x2d, 0xee, 0x2c, 0xf6, 0xba, 0xba, 0x69, 0xaf, 0x2d, 0xe3, 0xe2, 0x4a, 0xab, 0x3c,
0x86, 0x7e, 0xfe, 0x4b, 0xab, 0x7c, 0x55, 0x2f, 0xf4, 0xfe, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xbd, 0xd2, 0x2a, 0x97, 0x57, 0x5a, 0xe5, 0xc7, 0x48, 0x43, 0x17, 0x91, 0x86, 0x9e, 0x46, 0x1a,
0xb5, 0x5f, 0xd5, 0xfb, 0x09, 0x00, 0x00, 0xba, 0x8c, 0x34, 0xf4, 0x47, 0xa4, 0xa1, 0x9f, 0xff, 0xd4, 0x2a, 0x5f, 0xd5, 0x73, 0xbd, 0xff,
0x06, 0x00, 0x00, 0xff, 0xff, 0x01, 0xf7, 0xd5, 0xa0, 0x26, 0x0a, 0x00, 0x00,
} }

View file

@ -38,6 +38,9 @@ message AdmissionHookClientConfig {
// Required // Required
optional ServiceReference service = 1; optional ServiceReference service = 1;
// URLPath is an optional field that specifies the URL path to use when posting the AdmissionReview object.
optional string urlPath = 3;
// CABundle is a PEM encoded CA bundle which will be used to validate webhook's server certificate. // CABundle is a PEM encoded CA bundle which will be used to validate webhook's server certificate.
// Required // Required
optional bytes caBundle = 2; optional bytes caBundle = 2;

View file

@ -108,7 +108,7 @@ type Rule struct {
type FailurePolicyType string type FailurePolicyType string
const ( const (
// Ignore means the initilizer is removed from the initializers list of an // Ignore means the initializer is removed from the initializers list of an
// object if the initializer is timed out. // object if the initializer is timed out.
Ignore FailurePolicyType = "Ignore" Ignore FailurePolicyType = "Ignore"
// For 1.7, only "Ignore" is allowed. "Fail" will be allowed when the // For 1.7, only "Ignore" is allowed. "Fail" will be allowed when the
@ -203,6 +203,10 @@ type AdmissionHookClientConfig struct {
// ports open, port 443 will be used if it is open, otherwise it is an error. // ports open, port 443 will be used if it is open, otherwise it is an error.
// Required // Required
Service ServiceReference `json:"service" protobuf:"bytes,1,opt,name=service"` Service ServiceReference `json:"service" protobuf:"bytes,1,opt,name=service"`
// URLPath is an optional field that specifies the URL path to use when posting the AdmissionReview object.
URLPath string `json:"urlPath" protobuf:"bytes,3,opt,name=urlPath"`
// CABundle is a PEM encoded CA bundle which will be used to validate webhook's server certificate. // CABundle is a PEM encoded CA bundle which will be used to validate webhook's server certificate.
// Required // Required
CABundle []byte `json:"caBundle" protobuf:"bytes,2,opt,name=caBundle"` CABundle []byte `json:"caBundle" protobuf:"bytes,2,opt,name=caBundle"`

View file

@ -30,6 +30,7 @@ package v1alpha1
var map_AdmissionHookClientConfig = map[string]string{ var map_AdmissionHookClientConfig = map[string]string{
"": "AdmissionHookClientConfig contains the information to make a TLS connection with the webhook", "": "AdmissionHookClientConfig contains the information to make a TLS connection with the webhook",
"service": "Service is a reference to the service for this webhook. If there is only one port open for the service, that port will be used. If there are multiple ports open, port 443 will be used if it is open, otherwise it is an error. Required", "service": "Service is a reference to the service for this webhook. If there is only one port open for the service, that port will be used. If there are multiple ports open, port 443 will be used if it is open, otherwise it is an error. Required",
"urlPath": "URLPath is an optional field that specifies the URL path to use when posting the AdmissionReview object.",
"caBundle": "CABundle is a PEM encoded CA bundle which will be used to validate webhook's server certificate. Required", "caBundle": "CABundle is a PEM encoded CA bundle which will be used to validate webhook's server certificate. Required",
} }

20
vendor/k8s.io/api/apps/v1/doc.go generated vendored Normal file
View file

@ -0,0 +1,20 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package,register
// +k8s:openapi-gen=true
package v1 // import "k8s.io/api/apps/v1"

1558
vendor/k8s.io/api/apps/v1/generated.pb.go generated vendored Normal file

File diff suppressed because it is too large Load diff

181
vendor/k8s.io/api/apps/v1/generated.proto generated vendored Normal file
View file

@ -0,0 +1,181 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by go-to-protobuf. Do not edit it manually!
syntax = 'proto2';
package k8s.io.api.apps.v1;
import "k8s.io/api/core/v1/generated.proto";
import "k8s.io/api/policy/v1beta1/generated.proto";
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
import "k8s.io/apimachinery/pkg/runtime/generated.proto";
import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
// Package-wide variables from generator "generated".
option go_package = "v1";
// DaemonSet represents the configuration of a daemon set.
message DaemonSet {
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// The desired behavior of this daemon set.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
optional DaemonSetSpec spec = 2;
// The current status of this daemon set. This data may be
// out of date by some window of time.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
optional DaemonSetStatus status = 3;
}
// DaemonSetList is a collection of daemon sets.
message DaemonSetList {
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
// A list of daemon sets.
repeated DaemonSet items = 2;
}
// DaemonSetSpec is the specification of a daemon set.
message DaemonSetSpec {
// A label query over pods that are managed by the daemon set.
// Must match in order to be controlled.
// If empty, defaulted to labels on Pod template.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1;
// An object that describes the pod that will be created.
// The DaemonSet will create exactly one copy of this pod on every node
// that matches the template's node selector (or on every node if no node
// selector is specified).
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
optional k8s.io.api.core.v1.PodTemplateSpec template = 2;
// An update strategy to replace existing DaemonSet pods with new pods.
// +optional
optional DaemonSetUpdateStrategy updateStrategy = 3;
// The minimum number of seconds for which a newly created DaemonSet pod should
// be ready without any of its container crashing, for it to be considered
// available. Defaults to 0 (pod will be considered available as soon as it
// is ready).
// +optional
optional int32 minReadySeconds = 4;
// The number of old history to retain to allow rollback.
// This is a pointer to distinguish between explicit zero and not specified.
// Defaults to 10.
// +optional
optional int32 revisionHistoryLimit = 6;
}
// DaemonSetStatus represents the current status of a daemon set.
message DaemonSetStatus {
// The number of nodes that are running at least 1
// daemon pod and are supposed to run the daemon pod.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
optional int32 currentNumberScheduled = 1;
// The number of nodes that are running the daemon pod, but are
// not supposed to run the daemon pod.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
optional int32 numberMisscheduled = 2;
// The total number of nodes that should be running the daemon
// pod (including nodes correctly running the daemon pod).
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
optional int32 desiredNumberScheduled = 3;
// The number of nodes that should be running the daemon pod and have one
// or more of the daemon pod running and ready.
optional int32 numberReady = 4;
// The most recent generation observed by the daemon set controller.
// +optional
optional int64 observedGeneration = 5;
// The total number of nodes that are running updated daemon pod
// +optional
optional int32 updatedNumberScheduled = 6;
// The number of nodes that should be running the
// daemon pod and have one or more of the daemon pod running and
// available (ready for at least spec.minReadySeconds)
// +optional
optional int32 numberAvailable = 7;
// The number of nodes that should be running the
// daemon pod and have none of the daemon pod running and available
// (ready for at least spec.minReadySeconds)
// +optional
optional int32 numberUnavailable = 8;
// Count of hash collisions for the DaemonSet. The DaemonSet controller
// uses this field as a collision avoidance mechanism when it needs to
// create the name for the newest ControllerRevision.
// +optional
optional int32 collisionCount = 9;
}
// DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet.
message DaemonSetUpdateStrategy {
// Type of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate.
// +optional
optional string type = 1;
// Rolling update config params. Present only if type = "RollingUpdate".
// ---
// TODO: Update this to follow our convention for oneOf, whatever we decide it
// to be. Same as Deployment `strategy.rollingUpdate`.
// See https://github.com/kubernetes/kubernetes/issues/35345
// +optional
optional RollingUpdateDaemonSet rollingUpdate = 2;
}
// Spec to control the desired behavior of daemon set rolling update.
message RollingUpdateDaemonSet {
// The maximum number of DaemonSet pods that can be unavailable during the
// update. Value can be an absolute number (ex: 5) or a percentage of total
// number of DaemonSet pods at the start of the update (ex: 10%). Absolute
// number is calculated from percentage by rounding up.
// This cannot be 0.
// Default value is 1.
// Example: when this is set to 30%, at most 30% of the total number of nodes
// that should be running the daemon pod (i.e. status.desiredNumberScheduled)
// can have their pods stopped for an update at any given
// time. The update starts by stopping at most 30% of those DaemonSet pods
// and then brings up new DaemonSet pods in their place. Once the new pods
// are available, it then proceeds onto other DaemonSet pods, thus ensuring
// that at least 70% of original number of DaemonSet pods are available at
// all times during the update.
// +optional
optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
}

52
vendor/k8s.io/api/apps/v1/register.go generated vendored Normal file
View file

@ -0,0 +1,52 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "apps"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
localSchemeBuilder = &SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
// Adds the list of known types to api.Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&DaemonSet{},
&DaemonSetList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}

201
vendor/k8s.io/api/apps/v1/types.go generated vendored Normal file
View file

@ -0,0 +1,201 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
const (
ControllerRevisionHashLabelKey = "controller-revision-hash"
DeprecatedTemplateGeneration = "deprecated.daemonset.template.generation"
)
// DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet.
type DaemonSetUpdateStrategy struct {
// Type of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate.
// +optional
Type DaemonSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"`
// Rolling update config params. Present only if type = "RollingUpdate".
//---
// TODO: Update this to follow our convention for oneOf, whatever we decide it
// to be. Same as Deployment `strategy.rollingUpdate`.
// See https://github.com/kubernetes/kubernetes/issues/35345
// +optional
RollingUpdate *RollingUpdateDaemonSet `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"`
}
type DaemonSetUpdateStrategyType string
const (
// Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other.
RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate"
// Replace the old daemons only when it's killed
OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete"
)
// Spec to control the desired behavior of daemon set rolling update.
type RollingUpdateDaemonSet struct {
// The maximum number of DaemonSet pods that can be unavailable during the
// update. Value can be an absolute number (ex: 5) or a percentage of total
// number of DaemonSet pods at the start of the update (ex: 10%). Absolute
// number is calculated from percentage by rounding up.
// This cannot be 0.
// Default value is 1.
// Example: when this is set to 30%, at most 30% of the total number of nodes
// that should be running the daemon pod (i.e. status.desiredNumberScheduled)
// can have their pods stopped for an update at any given
// time. The update starts by stopping at most 30% of those DaemonSet pods
// and then brings up new DaemonSet pods in their place. Once the new pods
// are available, it then proceeds onto other DaemonSet pods, thus ensuring
// that at least 70% of original number of DaemonSet pods are available at
// all times during the update.
// +optional
MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"`
}
// DaemonSetSpec is the specification of a daemon set.
type DaemonSetSpec struct {
// A label query over pods that are managed by the daemon set.
// Must match in order to be controlled.
// If empty, defaulted to labels on Pod template.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
// +optional
Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"`
// An object that describes the pod that will be created.
// The DaemonSet will create exactly one copy of this pod on every node
// that matches the template's node selector (or on every node if no node
// selector is specified).
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"`
// An update strategy to replace existing DaemonSet pods with new pods.
// +optional
UpdateStrategy DaemonSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,3,opt,name=updateStrategy"`
// The minimum number of seconds for which a newly created DaemonSet pod should
// be ready without any of its container crashing, for it to be considered
// available. Defaults to 0 (pod will be considered available as soon as it
// is ready).
// +optional
MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"`
// The number of old history to retain to allow rollback.
// This is a pointer to distinguish between explicit zero and not specified.
// Defaults to 10.
// +optional
RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"`
}
// DaemonSetStatus represents the current status of a daemon set.
type DaemonSetStatus struct {
// The number of nodes that are running at least 1
// daemon pod and are supposed to run the daemon pod.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
CurrentNumberScheduled int32 `json:"currentNumberScheduled" protobuf:"varint,1,opt,name=currentNumberScheduled"`
// The number of nodes that are running the daemon pod, but are
// not supposed to run the daemon pod.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
NumberMisscheduled int32 `json:"numberMisscheduled" protobuf:"varint,2,opt,name=numberMisscheduled"`
// The total number of nodes that should be running the daemon
// pod (including nodes correctly running the daemon pod).
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
DesiredNumberScheduled int32 `json:"desiredNumberScheduled" protobuf:"varint,3,opt,name=desiredNumberScheduled"`
// The number of nodes that should be running the daemon pod and have one
// or more of the daemon pod running and ready.
NumberReady int32 `json:"numberReady" protobuf:"varint,4,opt,name=numberReady"`
// The most recent generation observed by the daemon set controller.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,5,opt,name=observedGeneration"`
// The total number of nodes that are running updated daemon pod
// +optional
UpdatedNumberScheduled int32 `json:"updatedNumberScheduled,omitempty" protobuf:"varint,6,opt,name=updatedNumberScheduled"`
// The number of nodes that should be running the
// daemon pod and have one or more of the daemon pod running and
// available (ready for at least spec.minReadySeconds)
// +optional
NumberAvailable int32 `json:"numberAvailable,omitempty" protobuf:"varint,7,opt,name=numberAvailable"`
// The number of nodes that should be running the
// daemon pod and have none of the daemon pod running and available
// (ready for at least spec.minReadySeconds)
// +optional
NumberUnavailable int32 `json:"numberUnavailable,omitempty" protobuf:"varint,8,opt,name=numberUnavailable"`
// Count of hash collisions for the DaemonSet. The DaemonSet controller
// uses this field as a collision avoidance mechanism when it needs to
// create the name for the newest ControllerRevision.
// +optional
CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// DaemonSet represents the configuration of a daemon set.
type DaemonSet struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// The desired behavior of this daemon set.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// The current status of this daemon set. This data may be
// out of date by some window of time.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
const (
// DefaultDaemonSetUniqueLabelKey is the default label key that is added
// to existing DaemonSet pods to distinguish between old and new
// DaemonSet pods during DaemonSet template updates.
DefaultDaemonSetUniqueLabelKey = ControllerRevisionHashLabelKey
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// DaemonSetList is a collection of daemon sets.
type DaemonSetList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// A list of daemon sets.
Items []DaemonSet `json:"items" protobuf:"bytes,2,rep,name=items"`
}

View file

@ -0,0 +1,100 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
// This file contains a collection of methods that can be used from go-restful to
// generate Swagger API documentation for its models. Please read this PR for more
// information on the implementation: https://github.com/emicklei/go-restful/pull/215
//
// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
// they are on one line! For multiple line or blocks that you want to ignore use ---.
// Any context after a --- is ignored.
//
// Those methods can be generated by using hack/update-generated-swagger-docs.sh
// AUTO-GENERATED FUNCTIONS START HERE
var map_DaemonSet = map[string]string{
"": "DaemonSet represents the configuration of a daemon set.",
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
"spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
"status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
}
func (DaemonSet) SwaggerDoc() map[string]string {
return map_DaemonSet
}
var map_DaemonSetList = map[string]string{
"": "DaemonSetList is a collection of daemon sets.",
"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
"items": "A list of daemon sets.",
}
func (DaemonSetList) SwaggerDoc() map[string]string {
return map_DaemonSetList
}
var map_DaemonSetSpec = map[string]string{
"": "DaemonSetSpec is the specification of a daemon set.",
"selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. If empty, defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
"template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template",
"updateStrategy": "An update strategy to replace existing DaemonSet pods with new pods.",
"minReadySeconds": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).",
"revisionHistoryLimit": "The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.",
}
func (DaemonSetSpec) SwaggerDoc() map[string]string {
return map_DaemonSetSpec
}
var map_DaemonSetStatus = map[string]string{
"": "DaemonSetStatus represents the current status of a daemon set.",
"currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
"numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
"desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
"numberReady": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.",
"observedGeneration": "The most recent generation observed by the daemon set controller.",
"updatedNumberScheduled": "The total number of nodes that are running updated daemon pod",
"numberAvailable": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)",
"numberUnavailable": "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)",
"collisionCount": "Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.",
}
func (DaemonSetStatus) SwaggerDoc() map[string]string {
return map_DaemonSetStatus
}
var map_DaemonSetUpdateStrategy = map[string]string{
"": "DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet.",
"type": "Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is RollingUpdate.",
"rollingUpdate": "Rolling update config params. Present only if type = \"RollingUpdate\".",
}
func (DaemonSetUpdateStrategy) SwaggerDoc() map[string]string {
return map_DaemonSetUpdateStrategy
}
var map_RollingUpdateDaemonSet = map[string]string{
"": "Spec to control the desired behavior of daemon set rolling update.",
"maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0. Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.",
}
func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string {
return map_RollingUpdateDaemonSet
}
// AUTO-GENERATED FUNCTIONS END HERE

240
vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go generated vendored Normal file
View file

@ -0,0 +1,240 @@
// +build !ignore_autogenerated
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
package v1
import (
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
intstr "k8s.io/apimachinery/pkg/util/intstr"
reflect "reflect"
)
func init() {
SchemeBuilder.Register(RegisterDeepCopies)
}
// RegisterDeepCopies adds deep-copy functions to the given scheme. Public
// to allow building arbitrary schemes.
//
// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented.
func RegisterDeepCopies(scheme *runtime.Scheme) error {
return scheme.AddGeneratedDeepCopyFuncs(
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*DaemonSet).DeepCopyInto(out.(*DaemonSet))
return nil
}, InType: reflect.TypeOf(&DaemonSet{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*DaemonSetList).DeepCopyInto(out.(*DaemonSetList))
return nil
}, InType: reflect.TypeOf(&DaemonSetList{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*DaemonSetSpec).DeepCopyInto(out.(*DaemonSetSpec))
return nil
}, InType: reflect.TypeOf(&DaemonSetSpec{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*DaemonSetStatus).DeepCopyInto(out.(*DaemonSetStatus))
return nil
}, InType: reflect.TypeOf(&DaemonSetStatus{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*DaemonSetUpdateStrategy).DeepCopyInto(out.(*DaemonSetUpdateStrategy))
return nil
}, InType: reflect.TypeOf(&DaemonSetUpdateStrategy{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*RollingUpdateDaemonSet).DeepCopyInto(out.(*RollingUpdateDaemonSet))
return nil
}, InType: reflect.TypeOf(&RollingUpdateDaemonSet{})},
)
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSet) DeepCopyInto(out *DaemonSet) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSet.
func (in *DaemonSet) DeepCopy() *DaemonSet {
if in == nil {
return nil
}
out := new(DaemonSet)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DaemonSet) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]DaemonSet, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetList.
func (in *DaemonSetList) DeepCopy() *DaemonSetList {
if in == nil {
return nil
}
out := new(DaemonSetList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DaemonSetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSetSpec) DeepCopyInto(out *DaemonSetSpec) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
if *in == nil {
*out = nil
} else {
*out = new(meta_v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
}
in.Template.DeepCopyInto(&out.Template)
in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy)
if in.RevisionHistoryLimit != nil {
in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetSpec.
func (in *DaemonSetSpec) DeepCopy() *DaemonSetSpec {
if in == nil {
return nil
}
out := new(DaemonSetSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) {
*out = *in
if in.CollisionCount != nil {
in, out := &in.CollisionCount, &out.CollisionCount
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetStatus.
func (in *DaemonSetStatus) DeepCopy() *DaemonSetStatus {
if in == nil {
return nil
}
out := new(DaemonSetStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSetUpdateStrategy) DeepCopyInto(out *DaemonSetUpdateStrategy) {
*out = *in
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
if *in == nil {
*out = nil
} else {
*out = new(RollingUpdateDaemonSet)
(*in).DeepCopyInto(*out)
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetUpdateStrategy.
func (in *DaemonSetUpdateStrategy) DeepCopy() *DaemonSetUpdateStrategy {
if in == nil {
return nil
}
out := new(DaemonSetUpdateStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RollingUpdateDaemonSet) DeepCopyInto(out *RollingUpdateDaemonSet) {
*out = *in
if in.MaxUnavailable != nil {
in, out := &in.MaxUnavailable, &out.MaxUnavailable
if *in == nil {
*out = nil
} else {
*out = new(intstr.IntOrString)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDaemonSet.
func (in *RollingUpdateDaemonSet) DeepCopy() *RollingUpdateDaemonSet {
if in == nil {
return nil
}
out := new(RollingUpdateDaemonSet)
in.DeepCopyInto(out)
return out
}

View file

@ -336,7 +336,7 @@ message DeploymentStrategy {
optional RollingUpdateDeployment rollingUpdate = 2; optional RollingUpdateDeployment rollingUpdate = 2;
} }
// ReplicaSet represents the configuration of a ReplicaSet. // ReplicaSet ensures that a specified number of pod replicas are running at any given time.
message ReplicaSet { message ReplicaSet {
// If the Labels of a ReplicaSet are empty, they are defaulted to // If the Labels of a ReplicaSet are empty, they are defaulted to
// be the same as the Pod(s) that the ReplicaSet manages. // be the same as the Pod(s) that the ReplicaSet manages.

View file

@ -629,12 +629,6 @@ type DaemonSet struct {
} }
const ( const (
// DEPRECATED: DefaultDaemonSetUniqueLabelKey is used instead.
// DaemonSetTemplateGenerationKey is the key of the labels that is added
// to daemon set pods to distinguish between old and new pod templates
// during DaemonSet template update.
DaemonSetTemplateGenerationKey string = "pod-template-generation"
// DefaultDaemonSetUniqueLabelKey is the default label key that is added // DefaultDaemonSetUniqueLabelKey is the default label key that is added
// to existing DaemonSet pods to distinguish between old and new // to existing DaemonSet pods to distinguish between old and new
// DaemonSet pods during DaemonSet template updates. // DaemonSet pods during DaemonSet template updates.
@ -658,7 +652,7 @@ type DaemonSetList struct {
// +genclient // +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ReplicaSet represents the configuration of a ReplicaSet. // ReplicaSet ensures that a specified number of pod replicas are running at any given time.
type ReplicaSet struct { type ReplicaSet struct {
metav1.TypeMeta `json:",inline"` metav1.TypeMeta `json:",inline"`

View file

@ -187,7 +187,7 @@ func (DeploymentStrategy) SwaggerDoc() map[string]string {
} }
var map_ReplicaSet = map[string]string{ var map_ReplicaSet = map[string]string{
"": "ReplicaSet represents the configuration of a ReplicaSet.", "": "ReplicaSet ensures that a specified number of pod replicas are running at any given time.",
"metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
"spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
"status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",

View file

@ -121,7 +121,8 @@ message ResourceRule {
// +optional // +optional
repeated string apiGroups = 2; repeated string apiGroups = 2;
// Resources is a list of resources this rule applies to. ResourceAll represents all resources. "*" means all. // Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups.
// "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups.
// +optional // +optional
repeated string resources = 3; repeated string resources = 3;

View file

@ -241,7 +241,8 @@ type ResourceRule struct {
// the enumerated resources in any API group will be allowed. "*" means all. // the enumerated resources in any API group will be allowed. "*" means all.
// +optional // +optional
APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"` APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"`
// Resources is a list of resources this rule applies to. ResourceAll represents all resources. "*" means all. // Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups.
// "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups.
// +optional // +optional
Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"`
// ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all. // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all.

View file

@ -76,7 +76,7 @@ var map_ResourceRule = map[string]string{
"": "ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.", "": "ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.",
"verbs": "Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", "verbs": "Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. \"*\" means all.",
"apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \"*\" means all.", "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \"*\" means all.",
"resources": "Resources is a list of resources this rule applies to. ResourceAll represents all resources. \"*\" means all.", "resources": "Resources is a list of resources this rule applies to. \"*\" means all in the specified apiGroups.\n \"*/foo\" represents the subresource 'foo' for all resources in the specified apiGroups.",
"resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. \"*\" means all.", "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. \"*\" means all.",
} }

View file

@ -121,7 +121,8 @@ message ResourceRule {
// +optional // +optional
repeated string apiGroups = 2; repeated string apiGroups = 2;
// Resources is a list of resources this rule applies to. ResourceAll represents all resources. "*" means all. // Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups.
// "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups.
// +optional // +optional
repeated string resources = 3; repeated string resources = 3;

View file

@ -241,7 +241,8 @@ type ResourceRule struct {
// the enumerated resources in any API group will be allowed. "*" means all. // the enumerated resources in any API group will be allowed. "*" means all.
// +optional // +optional
APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"` APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"`
// Resources is a list of resources this rule applies to. ResourceAll represents all resources. "*" means all. // Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups.
// "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups.
// +optional // +optional
Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"`
// ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all. // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all.

View file

@ -76,7 +76,7 @@ var map_ResourceRule = map[string]string{
"": "ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.", "": "ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.",
"verbs": "Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", "verbs": "Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. \"*\" means all.",
"apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \"*\" means all.", "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \"*\" means all.",
"resources": "Resources is a list of resources this rule applies to. ResourceAll represents all resources. \"*\" means all.", "resources": "Resources is a list of resources this rule applies to. \"*\" means all in the specified apiGroups.\n \"*/foo\" represents the subresource 'foo' for all resources in the specified apiGroups.",
"resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. \"*\" means all.", "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. \"*\" means all.",
} }

View file

@ -264,7 +264,7 @@ var (
// ScalingActive indicates that the HPA controller is able to scale if necessary: // ScalingActive indicates that the HPA controller is able to scale if necessary:
// it's correctly configured, can fetch the desired metrics, and isn't disabled. // it's correctly configured, can fetch the desired metrics, and isn't disabled.
ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive" ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive"
// AbleToScale indicates a lack of transient issues which prevent scaling from occuring, // AbleToScale indicates a lack of transient issues which prevent scaling from occurring,
// such as being in a backoff window, or being unable to access/update the target scale. // such as being in a backoff window, or being unable to access/update the target scale.
AbleToScale HorizontalPodAutoscalerConditionType = "AbleToScale" AbleToScale HorizontalPodAutoscalerConditionType = "AbleToScale"
// ScalingLimited indicates that the calculated scale based on metrics would be above or // ScalingLimited indicates that the calculated scale based on metrics would be above or

View file

@ -180,7 +180,7 @@ var (
// ScalingActive indicates that the HPA controller is able to scale if necessary: // ScalingActive indicates that the HPA controller is able to scale if necessary:
// it's correctly configured, can fetch the desired metrics, and isn't disabled. // it's correctly configured, can fetch the desired metrics, and isn't disabled.
ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive" ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive"
// AbleToScale indicates a lack of transient issues which prevent scaling from occuring, // AbleToScale indicates a lack of transient issues which prevent scaling from occurring,
// such as being in a backoff window, or being unable to access/update the target scale. // such as being in a backoff window, or being unable to access/update the target scale.
AbleToScale HorizontalPodAutoscalerConditionType = "AbleToScale" AbleToScale HorizontalPodAutoscalerConditionType = "AbleToScale"
// ScalingLimited indicates that the calculated scale based on metrics would be above or // ScalingLimited indicates that the calculated scale based on metrics would be above or

View file

@ -128,7 +128,7 @@ message JobSpec {
// and other jobs to not function correctly. However, You may see // and other jobs to not function correctly. However, You may see
// `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1`
// API. // API.
// More info: https://git.k8s.io/community/contributors/design-proposals/selector-generation.md // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector
// +optional // +optional
optional bool manualSelector = 5; optional bool manualSelector = 5;

View file

@ -107,7 +107,7 @@ type JobSpec struct {
// and other jobs to not function correctly. However, You may see // and other jobs to not function correctly. However, You may see
// `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1`
// API. // API.
// More info: https://git.k8s.io/community/contributors/design-proposals/selector-generation.md // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector
// +optional // +optional
ManualSelector *bool `json:"manualSelector,omitempty" protobuf:"varint,5,opt,name=manualSelector"` ManualSelector *bool `json:"manualSelector,omitempty" protobuf:"varint,5,opt,name=manualSelector"`

View file

@ -69,7 +69,7 @@ var map_JobSpec = map[string]string{
"activeDeadlineSeconds": "Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer", "activeDeadlineSeconds": "Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer",
"backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6", "backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6",
"selector": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", "selector": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
"manualSelector": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://git.k8s.io/community/contributors/design-proposals/selector-generation.md", "manualSelector": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector",
"template": "Describes the pod that will be created when executing a job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "template": "Describes the pod that will be created when executing a job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/",
} }

File diff suppressed because it is too large Load diff

View file

@ -121,7 +121,7 @@ message AzureDiskVolumeSource {
// +optional // +optional
optional bool readOnly = 5; optional bool readOnly = 5;
// Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared // Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared
optional string kind = 6; optional string kind = 6;
} }
@ -565,7 +565,7 @@ message Container {
// Security options the pod should run with. // Security options the pod should run with.
// More info: https://kubernetes.io/docs/concepts/policy/security-context/ // More info: https://kubernetes.io/docs/concepts/policy/security-context/
// More info: https://git.k8s.io/community/contributors/design-proposals/security_context.md // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
// +optional // +optional
optional SecurityContext securityContext = 15; optional SecurityContext securityContext = 15;
@ -1442,7 +1442,7 @@ message LimitRangeList {
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
// Items is a list of LimitRange objects. // Items is a list of LimitRange objects.
// More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_limit_range.md // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
repeated LimitRange items = 2; repeated LimitRange items = 2;
} }
@ -1593,7 +1593,7 @@ message NamespaceList {
// NamespaceSpec describes the attributes on a Namespace. // NamespaceSpec describes the attributes on a Namespace.
message NamespaceSpec { message NamespaceSpec {
// Finalizers is an opaque list of values that must be empty to permanently remove object from storage. // Finalizers is an opaque list of values that must be empty to permanently remove object from storage.
// More info: https://git.k8s.io/community/contributors/design-proposals/namespaces.md#finalizers // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/
// +optional // +optional
repeated string finalizers = 1; repeated string finalizers = 1;
} }
@ -1601,7 +1601,7 @@ message NamespaceSpec {
// NamespaceStatus is information about the current status of a Namespace. // NamespaceStatus is information about the current status of a Namespace.
message NamespaceStatus { message NamespaceStatus {
// Phase is the current lifecycle phase of the namespace. // Phase is the current lifecycle phase of the namespace.
// More info: https://git.k8s.io/community/contributors/design-proposals/namespaces.md#phases // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/
// +optional // +optional
optional string phase = 1; optional string phase = 1;
} }
@ -2287,7 +2287,7 @@ message PersistentVolumeSource {
// RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md
// +optional // +optional
optional RBDVolumeSource rbd = 6; optional RBDPersistentVolumeSource rbd = 6;
// ISCSI represents an ISCSI Disk resource that is attached to a // ISCSI represents an ISCSI Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod. Provisioned by an admin. // kubelet's host machine and then exposed to the pod. Provisioned by an admin.
@ -2342,7 +2342,7 @@ message PersistentVolumeSource {
// ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
// +optional // +optional
optional ScaleIOVolumeSource scaleIO = 19; optional ScaleIOPersistentVolumeSource scaleIO = 19;
// Local represents directly-attached storage with node affinity // Local represents directly-attached storage with node affinity
// +optional // +optional
@ -2474,7 +2474,7 @@ message PodAffinity {
// relative to the given namespace(s)) that this pod should be // relative to the given namespace(s)) that this pod should be
// co-located (affinity) or not co-located (anti-affinity) with, // co-located (affinity) or not co-located (anti-affinity) with,
// where co-located is defined as running on a node whose value of // where co-located is defined as running on a node whose value of
// the label with key <topologyKey> tches that of any node on which // the label with key <topologyKey> matches that of any node on which
// a pod of the set of pods is running // a pod of the set of pods is running
message PodAffinityTerm { message PodAffinityTerm {
// A label query over a set of resources, in this case pods. // A label query over a set of resources, in this case pods.
@ -3157,6 +3157,57 @@ message QuobyteVolumeSource {
optional string group = 5; optional string group = 5;
} }
// Represents a Rados Block Device mount that lasts the lifetime of a pod.
// RBD volumes support ownership management and SELinux relabeling.
message RBDPersistentVolumeSource {
// A collection of Ceph monitors.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
repeated string monitors = 1;
// The rados image name.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
optional string image = 2;
// Filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
optional string fsType = 3;
// The rados pool name.
// Default is rbd.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
optional string pool = 4;
// The rados user name.
// Default is admin.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
optional string user = 5;
// Keyring is the path to key ring for RBDUser.
// Default is /etc/ceph/keyring.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
optional string keyring = 6;
// SecretRef is name of the authentication secret for RBDUser. If provided
// overrides keyring.
// Default is nil.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
optional SecretReference secretRef = 7;
// ReadOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
optional bool readOnly = 8;
}
// Represents a Rados Block Device mount that lasts the lifetime of a pod. // Represents a Rados Block Device mount that lasts the lifetime of a pod.
// RBD volumes support ownership management and SELinux relabeling. // RBD volumes support ownership management and SELinux relabeling.
message RBDVolumeSource { message RBDVolumeSource {
@ -3377,14 +3428,14 @@ message ResourceQuotaList {
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
// Items is a list of ResourceQuota objects. // Items is a list of ResourceQuota objects.
// More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
repeated ResourceQuota items = 2; repeated ResourceQuota items = 2;
} }
// ResourceQuotaSpec defines the desired hard limits to enforce for Quota. // ResourceQuotaSpec defines the desired hard limits to enforce for Quota.
message ResourceQuotaSpec { message ResourceQuotaSpec {
// Hard is the set of desired hard limits for each named resource. // Hard is the set of desired hard limits for each named resource.
// More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
// +optional // +optional
map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> hard = 1; map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> hard = 1;
@ -3397,7 +3448,7 @@ message ResourceQuotaSpec {
// ResourceQuotaStatus defines the enforced hard limits and observed use. // ResourceQuotaStatus defines the enforced hard limits and observed use.
message ResourceQuotaStatus { message ResourceQuotaStatus {
// Hard is the set of enforced hard limits for each named resource. // Hard is the set of enforced hard limits for each named resource.
// More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
// +optional // +optional
map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> hard = 1; map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> hard = 1;
@ -3440,6 +3491,50 @@ message SELinuxOptions {
optional string level = 4; optional string level = 4;
} }
// ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume
message ScaleIOPersistentVolumeSource {
// The host address of the ScaleIO API Gateway.
optional string gateway = 1;
// The name of the storage system as configured in ScaleIO.
optional string system = 2;
// SecretRef references to the secret for ScaleIO user and other
// sensitive information. If this is not provided, Login operation will fail.
optional SecretReference secretRef = 3;
// Flag to enable/disable SSL communication with Gateway, default false
// +optional
optional bool sslEnabled = 4;
// The name of the ScaleIO Protection Domain for the configured storage.
// +optional
optional string protectionDomain = 5;
// The ScaleIO Storage Pool associated with the protection domain.
// +optional
optional string storagePool = 6;
// Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
// +optional
optional string storageMode = 7;
// The name of a volume already created in the ScaleIO system
// that is associated with this volume source.
optional string volumeName = 8;
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
optional string fsType = 9;
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
optional bool readOnly = 10;
}
// ScaleIOVolumeSource represents a persistent ScaleIO volume // ScaleIOVolumeSource represents a persistent ScaleIO volume
message ScaleIOVolumeSource { message ScaleIOVolumeSource {
// The host address of the ScaleIO API Gateway. // The host address of the ScaleIO API Gateway.
@ -3456,15 +3551,15 @@ message ScaleIOVolumeSource {
// +optional // +optional
optional bool sslEnabled = 4; optional bool sslEnabled = 4;
// The name of the Protection Domain for the configured storage (defaults to "default"). // The name of the ScaleIO Protection Domain for the configured storage.
// +optional // +optional
optional string protectionDomain = 5; optional string protectionDomain = 5;
// The Storage Pool associated with the protection domain (defaults to "default"). // The ScaleIO Storage Pool associated with the protection domain.
// +optional // +optional
optional string storagePool = 6; optional string storagePool = 6;
// Indicates whether the storage for a volume should be thick or thin (defaults to "thin"). // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
// +optional // +optional
optional string storageMode = 7; optional string storageMode = 7;

109
vendor/k8s.io/api/core/v1/types.go generated vendored
View file

@ -398,7 +398,7 @@ type PersistentVolumeSource struct {
// RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md
// +optional // +optional
RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,6,opt,name=rbd"` RBD *RBDPersistentVolumeSource `json:"rbd,omitempty" protobuf:"bytes,6,opt,name=rbd"`
// ISCSI represents an ISCSI Disk resource that is attached to a // ISCSI represents an ISCSI Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod. Provisioned by an admin. // kubelet's host machine and then exposed to the pod. Provisioned by an admin.
// +optional // +optional
@ -440,7 +440,7 @@ type PersistentVolumeSource struct {
PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,18,opt,name=portworxVolume"` PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,18,opt,name=portworxVolume"`
// ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
// +optional // +optional
ScaleIO *ScaleIOVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,19,opt,name=scaleIO"` ScaleIO *ScaleIOPersistentVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,19,opt,name=scaleIO"`
// Local represents directly-attached storage with node affinity // Local represents directly-attached storage with node affinity
// +optional // +optional
Local *LocalVolumeSource `json:"local,omitempty" protobuf:"bytes,20,opt,name=local"` Local *LocalVolumeSource `json:"local,omitempty" protobuf:"bytes,20,opt,name=local"`
@ -838,6 +838,50 @@ type RBDVolumeSource struct {
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"` ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"`
} }
// Represents a Rados Block Device mount that lasts the lifetime of a pod.
// RBD volumes support ownership management and SELinux relabeling.
type RBDPersistentVolumeSource struct {
// A collection of Ceph monitors.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
// The rados image name.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"`
// Filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// The rados pool name.
// Default is rbd.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"`
// The rados user name.
// Default is admin.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"`
// Keyring is the path to key ring for RBDUser.
// Default is /etc/ceph/keyring.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"`
// SecretRef is name of the authentication secret for RBDUser. If provided
// overrides keyring.
// Default is nil.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"`
// ReadOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"`
}
// Represents a cinder volume resource in Openstack. // Represents a cinder volume resource in Openstack.
// A Cinder volume must exist before mounting to a container. // A Cinder volume must exist before mounting to a container.
// The volume must also be in the same region as the kubelet. // The volume must also be in the same region as the kubelet.
@ -1322,7 +1366,7 @@ type AzureDiskVolumeSource struct {
// the ReadOnly setting in VolumeMounts. // the ReadOnly setting in VolumeMounts.
// +optional // +optional
ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,5,opt,name=readOnly"` ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,5,opt,name=readOnly"`
// Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared // Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared
Kind *AzureDataDiskKind `json:"kind,omitempty" protobuf:"bytes,6,opt,name=kind,casttype=AzureDataDiskKind"` Kind *AzureDataDiskKind `json:"kind,omitempty" protobuf:"bytes,6,opt,name=kind,casttype=AzureDataDiskKind"`
} }
@ -1352,13 +1396,48 @@ type ScaleIOVolumeSource struct {
// Flag to enable/disable SSL communication with Gateway, default false // Flag to enable/disable SSL communication with Gateway, default false
// +optional // +optional
SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"` SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"`
// The name of the Protection Domain for the configured storage (defaults to "default"). // The name of the ScaleIO Protection Domain for the configured storage.
// +optional // +optional
ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"` ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"`
// The Storage Pool associated with the protection domain (defaults to "default"). // The ScaleIO Storage Pool associated with the protection domain.
// +optional // +optional
StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"` StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"`
// Indicates whether the storage for a volume should be thick or thin (defaults to "thin"). // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
// +optional
StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"`
// The name of a volume already created in the ScaleIO system
// that is associated with this volume source.
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,10,opt,name=readOnly"`
}
// ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume
type ScaleIOPersistentVolumeSource struct {
// The host address of the ScaleIO API Gateway.
Gateway string `json:"gateway" protobuf:"bytes,1,opt,name=gateway"`
// The name of the storage system as configured in ScaleIO.
System string `json:"system" protobuf:"bytes,2,opt,name=system"`
// SecretRef references to the secret for ScaleIO user and other
// sensitive information. If this is not provided, Login operation will fail.
SecretRef *SecretReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"`
// Flag to enable/disable SSL communication with Gateway, default false
// +optional
SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"`
// The name of the ScaleIO Protection Domain for the configured storage.
// +optional
ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"`
// The ScaleIO Storage Pool associated with the protection domain.
// +optional
StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"`
// Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
// +optional // +optional
StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"` StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"`
// The name of a volume already created in the ScaleIO system // The name of a volume already created in the ScaleIO system
@ -1996,7 +2075,7 @@ type Container struct {
ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"` ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"`
// Security options the pod should run with. // Security options the pod should run with.
// More info: https://kubernetes.io/docs/concepts/policy/security-context/ // More info: https://kubernetes.io/docs/concepts/policy/security-context/
// More info: https://git.k8s.io/community/contributors/design-proposals/security_context.md // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
// +optional // +optional
SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"` SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"`
@ -2394,7 +2473,7 @@ type WeightedPodAffinityTerm struct {
// relative to the given namespace(s)) that this pod should be // relative to the given namespace(s)) that this pod should be
// co-located (affinity) or not co-located (anti-affinity) with, // co-located (affinity) or not co-located (anti-affinity) with,
// where co-located is defined as running on a node whose value of // where co-located is defined as running on a node whose value of
// the label with key <topologyKey> tches that of any node on which // the label with key <topologyKey> matches that of any node on which
// a pod of the set of pods is running // a pod of the set of pods is running
type PodAffinityTerm struct { type PodAffinityTerm struct {
// A label query over a set of resources, in this case pods. // A label query over a set of resources, in this case pods.
@ -2469,7 +2548,7 @@ type Taint struct {
// TimeAdded represents the time at which the taint was added. // TimeAdded represents the time at which the taint was added.
// It is only written for NoExecute taints. // It is only written for NoExecute taints.
// +optional // +optional
TimeAdded metav1.Time `json:"timeAdded,omitempty" protobuf:"bytes,4,opt,name=timeAdded"` TimeAdded *metav1.Time `json:"timeAdded,omitempty" protobuf:"bytes,4,opt,name=timeAdded"`
} }
type TaintEffect string type TaintEffect string
@ -3825,7 +3904,7 @@ const (
// NamespaceSpec describes the attributes on a Namespace. // NamespaceSpec describes the attributes on a Namespace.
type NamespaceSpec struct { type NamespaceSpec struct {
// Finalizers is an opaque list of values that must be empty to permanently remove object from storage. // Finalizers is an opaque list of values that must be empty to permanently remove object from storage.
// More info: https://git.k8s.io/community/contributors/design-proposals/namespaces.md#finalizers // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/
// +optional // +optional
Finalizers []FinalizerName `json:"finalizers,omitempty" protobuf:"bytes,1,rep,name=finalizers,casttype=FinalizerName"` Finalizers []FinalizerName `json:"finalizers,omitempty" protobuf:"bytes,1,rep,name=finalizers,casttype=FinalizerName"`
} }
@ -3833,7 +3912,7 @@ type NamespaceSpec struct {
// NamespaceStatus is information about the current status of a Namespace. // NamespaceStatus is information about the current status of a Namespace.
type NamespaceStatus struct { type NamespaceStatus struct {
// Phase is the current lifecycle phase of the namespace. // Phase is the current lifecycle phase of the namespace.
// More info: https://git.k8s.io/community/contributors/design-proposals/namespaces.md#phases // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/
// +optional // +optional
Phase NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=NamespacePhase"` Phase NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=NamespacePhase"`
} }
@ -4376,7 +4455,7 @@ type LimitRangeList struct {
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of LimitRange objects. // Items is a list of LimitRange objects.
// More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_limit_range.md // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
Items []LimitRange `json:"items" protobuf:"bytes,2,rep,name=items"` Items []LimitRange `json:"items" protobuf:"bytes,2,rep,name=items"`
} }
@ -4433,7 +4512,7 @@ const (
// ResourceQuotaSpec defines the desired hard limits to enforce for Quota. // ResourceQuotaSpec defines the desired hard limits to enforce for Quota.
type ResourceQuotaSpec struct { type ResourceQuotaSpec struct {
// Hard is the set of desired hard limits for each named resource. // Hard is the set of desired hard limits for each named resource.
// More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
// +optional // +optional
Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"` Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"`
// A collection of filters that must match each object tracked by a quota. // A collection of filters that must match each object tracked by a quota.
@ -4445,7 +4524,7 @@ type ResourceQuotaSpec struct {
// ResourceQuotaStatus defines the enforced hard limits and observed use. // ResourceQuotaStatus defines the enforced hard limits and observed use.
type ResourceQuotaStatus struct { type ResourceQuotaStatus struct {
// Hard is the set of enforced hard limits for each named resource. // Hard is the set of enforced hard limits for each named resource.
// More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
// +optional // +optional
Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"` Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"`
// Used is the current observed total usage of the resource in the namespace. // Used is the current observed total usage of the resource in the namespace.
@ -4486,7 +4565,7 @@ type ResourceQuotaList struct {
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of ResourceQuota objects. // Items is a list of ResourceQuota objects.
// More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
Items []ResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"` Items []ResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"`
} }

View file

@ -76,7 +76,7 @@ var map_AzureDiskVolumeSource = map[string]string{
"cachingMode": "Host Caching mode: None, Read Only, Read Write.", "cachingMode": "Host Caching mode: None, Read Only, Read Write.",
"fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
"readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
"kind": "Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared", "kind": "Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared",
} }
func (AzureDiskVolumeSource) SwaggerDoc() map[string]string { func (AzureDiskVolumeSource) SwaggerDoc() map[string]string {
@ -284,7 +284,7 @@ var map_Container = map[string]string{
"terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", "terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.",
"terminationMessagePolicy": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", "terminationMessagePolicy": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.",
"imagePullPolicy": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", "imagePullPolicy": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images",
"securityContext": "Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://git.k8s.io/community/contributors/design-proposals/security_context.md", "securityContext": "Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/",
"stdin": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", "stdin": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.",
"stdinOnce": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", "stdinOnce": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false",
"tty": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", "tty": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.",
@ -765,7 +765,7 @@ func (LimitRangeItem) SwaggerDoc() map[string]string {
var map_LimitRangeList = map[string]string{ var map_LimitRangeList = map[string]string{
"": "LimitRangeList is a list of LimitRange items.", "": "LimitRangeList is a list of LimitRange items.",
"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
"items": "Items is a list of LimitRange objects. More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_limit_range.md", "items": "Items is a list of LimitRange objects. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
} }
func (LimitRangeList) SwaggerDoc() map[string]string { func (LimitRangeList) SwaggerDoc() map[string]string {
@ -866,7 +866,7 @@ func (NamespaceList) SwaggerDoc() map[string]string {
var map_NamespaceSpec = map[string]string{ var map_NamespaceSpec = map[string]string{
"": "NamespaceSpec describes the attributes on a Namespace.", "": "NamespaceSpec describes the attributes on a Namespace.",
"finalizers": "Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: https://git.k8s.io/community/contributors/design-proposals/namespaces.md#finalizers", "finalizers": "Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/",
} }
func (NamespaceSpec) SwaggerDoc() map[string]string { func (NamespaceSpec) SwaggerDoc() map[string]string {
@ -875,7 +875,7 @@ func (NamespaceSpec) SwaggerDoc() map[string]string {
var map_NamespaceStatus = map[string]string{ var map_NamespaceStatus = map[string]string{
"": "NamespaceStatus is information about the current status of a Namespace.", "": "NamespaceStatus is information about the current status of a Namespace.",
"phase": "Phase is the current lifecycle phase of the namespace. More info: https://git.k8s.io/community/contributors/design-proposals/namespaces.md#phases", "phase": "Phase is the current lifecycle phase of the namespace. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/",
} }
func (NamespaceStatus) SwaggerDoc() map[string]string { func (NamespaceStatus) SwaggerDoc() map[string]string {
@ -1275,7 +1275,7 @@ func (PodAffinity) SwaggerDoc() map[string]string {
} }
var map_PodAffinityTerm = map[string]string{ var map_PodAffinityTerm = map[string]string{
"": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> tches that of any node on which a pod of the set of pods is running", "": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running",
"labelSelector": "A label query over a set of resources, in this case pods.", "labelSelector": "A label query over a set of resources, in this case pods.",
"namespaces": "namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means \"this pod's namespace\"", "namespaces": "namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means \"this pod's namespace\"",
"topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as \"all topologies\" (\"all topologies\" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed.", "topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as \"all topologies\" (\"all topologies\" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed.",
@ -1571,6 +1571,22 @@ func (QuobyteVolumeSource) SwaggerDoc() map[string]string {
return map_QuobyteVolumeSource return map_QuobyteVolumeSource
} }
var map_RBDPersistentVolumeSource = map[string]string{
"": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.",
"monitors": "A collection of Ceph monitors. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it",
"image": "The rados image name. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it",
"fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd",
"pool": "The rados pool name. Default is rbd. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it",
"user": "The rados user name. Default is admin. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it",
"keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it",
"secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it",
"readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it",
}
func (RBDPersistentVolumeSource) SwaggerDoc() map[string]string {
return map_RBDPersistentVolumeSource
}
var map_RBDVolumeSource = map[string]string{ var map_RBDVolumeSource = map[string]string{
"": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", "": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.",
"monitors": "A collection of Ceph monitors. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", "monitors": "A collection of Ceph monitors. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it",
@ -1683,7 +1699,7 @@ func (ResourceQuota) SwaggerDoc() map[string]string {
var map_ResourceQuotaList = map[string]string{ var map_ResourceQuotaList = map[string]string{
"": "ResourceQuotaList is a list of ResourceQuota items.", "": "ResourceQuotaList is a list of ResourceQuota items.",
"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
"items": "Items is a list of ResourceQuota objects. More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md", "items": "Items is a list of ResourceQuota objects. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/",
} }
func (ResourceQuotaList) SwaggerDoc() map[string]string { func (ResourceQuotaList) SwaggerDoc() map[string]string {
@ -1692,7 +1708,7 @@ func (ResourceQuotaList) SwaggerDoc() map[string]string {
var map_ResourceQuotaSpec = map[string]string{ var map_ResourceQuotaSpec = map[string]string{
"": "ResourceQuotaSpec defines the desired hard limits to enforce for Quota.", "": "ResourceQuotaSpec defines the desired hard limits to enforce for Quota.",
"hard": "Hard is the set of desired hard limits for each named resource. More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md", "hard": "Hard is the set of desired hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/",
"scopes": "A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects.", "scopes": "A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects.",
} }
@ -1702,7 +1718,7 @@ func (ResourceQuotaSpec) SwaggerDoc() map[string]string {
var map_ResourceQuotaStatus = map[string]string{ var map_ResourceQuotaStatus = map[string]string{
"": "ResourceQuotaStatus defines the enforced hard limits and observed use.", "": "ResourceQuotaStatus defines the enforced hard limits and observed use.",
"hard": "Hard is the set of enforced hard limits for each named resource. More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md", "hard": "Hard is the set of enforced hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/",
"used": "Used is the current observed total usage of the resource in the namespace.", "used": "Used is the current observed total usage of the resource in the namespace.",
} }
@ -1732,15 +1748,33 @@ func (SELinuxOptions) SwaggerDoc() map[string]string {
return map_SELinuxOptions return map_SELinuxOptions
} }
var map_ScaleIOPersistentVolumeSource = map[string]string{
"": "ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume",
"gateway": "The host address of the ScaleIO API Gateway.",
"system": "The name of the storage system as configured in ScaleIO.",
"secretRef": "SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.",
"sslEnabled": "Flag to enable/disable SSL communication with Gateway, default false",
"protectionDomain": "The name of the ScaleIO Protection Domain for the configured storage.",
"storagePool": "The ScaleIO Storage Pool associated with the protection domain.",
"storageMode": "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.",
"volumeName": "The name of a volume already created in the ScaleIO system that is associated with this volume source.",
"fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
"readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
}
func (ScaleIOPersistentVolumeSource) SwaggerDoc() map[string]string {
return map_ScaleIOPersistentVolumeSource
}
var map_ScaleIOVolumeSource = map[string]string{ var map_ScaleIOVolumeSource = map[string]string{
"": "ScaleIOVolumeSource represents a persistent ScaleIO volume", "": "ScaleIOVolumeSource represents a persistent ScaleIO volume",
"gateway": "The host address of the ScaleIO API Gateway.", "gateway": "The host address of the ScaleIO API Gateway.",
"system": "The name of the storage system as configured in ScaleIO.", "system": "The name of the storage system as configured in ScaleIO.",
"secretRef": "SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.", "secretRef": "SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.",
"sslEnabled": "Flag to enable/disable SSL communication with Gateway, default false", "sslEnabled": "Flag to enable/disable SSL communication with Gateway, default false",
"protectionDomain": "The name of the Protection Domain for the configured storage (defaults to \"default\").", "protectionDomain": "The name of the ScaleIO Protection Domain for the configured storage.",
"storagePool": "The Storage Pool associated with the protection domain (defaults to \"default\").", "storagePool": "The ScaleIO Storage Pool associated with the protection domain.",
"storageMode": "Indicates whether the storage for a volume should be thick or thin (defaults to \"thin\").", "storageMode": "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.",
"volumeName": "The name of a volume already created in the ScaleIO system that is associated with this volume source.", "volumeName": "The name of a volume already created in the ScaleIO system that is associated with this volume source.",
"fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
"readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",

View file

@ -571,6 +571,10 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
in.(*QuobyteVolumeSource).DeepCopyInto(out.(*QuobyteVolumeSource)) in.(*QuobyteVolumeSource).DeepCopyInto(out.(*QuobyteVolumeSource))
return nil return nil
}, InType: reflect.TypeOf(&QuobyteVolumeSource{})}, }, InType: reflect.TypeOf(&QuobyteVolumeSource{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*RBDPersistentVolumeSource).DeepCopyInto(out.(*RBDPersistentVolumeSource))
return nil
}, InType: reflect.TypeOf(&RBDPersistentVolumeSource{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*RBDVolumeSource).DeepCopyInto(out.(*RBDVolumeSource)) in.(*RBDVolumeSource).DeepCopyInto(out.(*RBDVolumeSource))
return nil return nil
@ -627,6 +631,10 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
in.(*SELinuxOptions).DeepCopyInto(out.(*SELinuxOptions)) in.(*SELinuxOptions).DeepCopyInto(out.(*SELinuxOptions))
return nil return nil
}, InType: reflect.TypeOf(&SELinuxOptions{})}, }, InType: reflect.TypeOf(&SELinuxOptions{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ScaleIOPersistentVolumeSource).DeepCopyInto(out.(*ScaleIOPersistentVolumeSource))
return nil
}, InType: reflect.TypeOf(&ScaleIOPersistentVolumeSource{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*ScaleIOVolumeSource).DeepCopyInto(out.(*ScaleIOVolumeSource)) in.(*ScaleIOVolumeSource).DeepCopyInto(out.(*ScaleIOVolumeSource))
return nil return nil
@ -3733,7 +3741,7 @@ func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) {
if *in == nil { if *in == nil {
*out = nil *out = nil
} else { } else {
*out = new(RBDVolumeSource) *out = new(RBDPersistentVolumeSource)
(*in).DeepCopyInto(*out) (*in).DeepCopyInto(*out)
} }
} }
@ -3850,7 +3858,7 @@ func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) {
if *in == nil { if *in == nil {
*out = nil *out = nil
} else { } else {
*out = new(ScaleIOVolumeSource) *out = new(ScaleIOPersistentVolumeSource)
(*in).DeepCopyInto(*out) (*in).DeepCopyInto(*out)
} }
} }
@ -4801,6 +4809,36 @@ func (in *QuobyteVolumeSource) DeepCopy() *QuobyteVolumeSource {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RBDPersistentVolumeSource) DeepCopyInto(out *RBDPersistentVolumeSource) {
*out = *in
if in.CephMonitors != nil {
in, out := &in.CephMonitors, &out.CephMonitors
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
if *in == nil {
*out = nil
} else {
*out = new(SecretReference)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBDPersistentVolumeSource.
func (in *RBDPersistentVolumeSource) DeepCopy() *RBDPersistentVolumeSource {
if in == nil {
return nil
}
out := new(RBDPersistentVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RBDVolumeSource) DeepCopyInto(out *RBDVolumeSource) { func (in *RBDVolumeSource) DeepCopyInto(out *RBDVolumeSource) {
*out = *in *out = *in
@ -5191,6 +5229,31 @@ func (in *SELinuxOptions) DeepCopy() *SELinuxOptions {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScaleIOPersistentVolumeSource) DeepCopyInto(out *ScaleIOPersistentVolumeSource) {
*out = *in
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
if *in == nil {
*out = nil
} else {
*out = new(SecretReference)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleIOPersistentVolumeSource.
func (in *ScaleIOPersistentVolumeSource) DeepCopy() *ScaleIOPersistentVolumeSource {
if in == nil {
return nil
}
out := new(ScaleIOPersistentVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScaleIOVolumeSource) DeepCopyInto(out *ScaleIOVolumeSource) { func (in *ScaleIOVolumeSource) DeepCopyInto(out *ScaleIOVolumeSource) {
*out = *in *out = *in
@ -5905,7 +5968,15 @@ func (in *TCPSocketAction) DeepCopy() *TCPSocketAction {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Taint) DeepCopyInto(out *Taint) { func (in *Taint) DeepCopyInto(out *Taint) {
*out = *in *out = *in
in.TimeAdded.DeepCopyInto(&out.TimeAdded) if in.TimeAdded != nil {
in, out := &in.TimeAdded, &out.TimeAdded
if *in == nil {
*out = nil
} else {
*out = new(meta_v1.Time)
(*in).DeepCopyInto(*out)
}
}
return return
} }

View file

@ -2175,16 +2175,18 @@ func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) {
} }
i++ i++
} }
if m.AllowPrivilegeEscalation != nil {
dAtA[i] = 0x80 dAtA[i] = 0x80
i++ i++
dAtA[i] = 0x1 dAtA[i] = 0x1
i++ i++
if m.AllowPrivilegeEscalation { if *m.AllowPrivilegeEscalation {
dAtA[i] = 1 dAtA[i] = 1
} else { } else {
dAtA[i] = 0 dAtA[i] = 0
} }
i++ i++
}
if len(m.AllowedHostPaths) > 0 { if len(m.AllowedHostPaths) > 0 {
for _, msg := range m.AllowedHostPaths { for _, msg := range m.AllowedHostPaths {
dAtA[i] = 0x8a dAtA[i] = 0x8a
@ -3520,7 +3522,9 @@ func (m *PodSecurityPolicySpec) Size() (n int) {
if m.DefaultAllowPrivilegeEscalation != nil { if m.DefaultAllowPrivilegeEscalation != nil {
n += 2 n += 2
} }
if m.AllowPrivilegeEscalation != nil {
n += 3 n += 3
}
if len(m.AllowedHostPaths) > 0 { if len(m.AllowedHostPaths) > 0 {
for _, e := range m.AllowedHostPaths { for _, e := range m.AllowedHostPaths {
l = e.Size() l = e.Size()
@ -4297,7 +4301,7 @@ func (this *PodSecurityPolicySpec) String() string {
`FSGroup:` + strings.Replace(strings.Replace(this.FSGroup.String(), "FSGroupStrategyOptions", "FSGroupStrategyOptions", 1), `&`, ``, 1) + `,`, `FSGroup:` + strings.Replace(strings.Replace(this.FSGroup.String(), "FSGroupStrategyOptions", "FSGroupStrategyOptions", 1), `&`, ``, 1) + `,`,
`ReadOnlyRootFilesystem:` + fmt.Sprintf("%v", this.ReadOnlyRootFilesystem) + `,`, `ReadOnlyRootFilesystem:` + fmt.Sprintf("%v", this.ReadOnlyRootFilesystem) + `,`,
`DefaultAllowPrivilegeEscalation:` + valueToStringGenerated(this.DefaultAllowPrivilegeEscalation) + `,`, `DefaultAllowPrivilegeEscalation:` + valueToStringGenerated(this.DefaultAllowPrivilegeEscalation) + `,`,
`AllowPrivilegeEscalation:` + fmt.Sprintf("%v", this.AllowPrivilegeEscalation) + `,`, `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`,
`AllowedHostPaths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedHostPaths), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + `,`, `AllowedHostPaths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedHostPaths), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + `,`,
`}`, `}`,
}, "") }, "")
@ -10170,7 +10174,8 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error {
break break
} }
} }
m.AllowPrivilegeEscalation = bool(v != 0) b := bool(v != 0)
m.AllowPrivilegeEscalation = &b
case 17: case 17:
if wireType != 2 { if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field AllowedHostPaths", wireType) return fmt.Errorf("proto: wrong wireType = %d for field AllowedHostPaths", wireType)
@ -12654,233 +12659,232 @@ func init() {
} }
var fileDescriptorGenerated = []byte{ var fileDescriptorGenerated = []byte{
// 3634 bytes of a gzipped FileDescriptorProto // 3632 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x5b, 0xcd, 0x6f, 0x23, 0xc9, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x5b, 0xcd, 0x6f, 0x24, 0xc7,
0x75, 0x9f, 0xe6, 0x87, 0x48, 0x3d, 0x8d, 0xbe, 0x4a, 0xb3, 0x12, 0xad, 0xdd, 0x11, 0xe5, 0x5e, 0x75, 0xdf, 0x9e, 0x0f, 0xce, 0xf0, 0x71, 0xf9, 0x55, 0x5c, 0x91, 0x63, 0x4a, 0xcb, 0x59, 0xb7,
0x60, 0x32, 0xbb, 0xd9, 0x25, 0x77, 0xb4, 0x3b, 0xeb, 0xcd, 0x2e, 0x62, 0x47, 0x94, 0xe6, 0x43, 0x80, 0xcd, 0x4a, 0x91, 0x66, 0xb4, 0x94, 0x56, 0x56, 0x24, 0xc4, 0x0e, 0x87, 0xdc, 0x0f, 0x3a,
0x8e, 0x3e, 0xb8, 0x45, 0x4a, 0x4e, 0x16, 0x1e, 0x67, 0x5b, 0x64, 0x89, 0xea, 0x51, 0xb3, 0xbb, 0xfc, 0x18, 0xd5, 0x0c, 0x69, 0x67, 0x91, 0x75, 0xd4, 0x9c, 0x29, 0x0e, 0x7b, 0xd9, 0xd3, 0xdd,
0xdd, 0x5d, 0x2d, 0x8b, 0x97, 0x20, 0x87, 0xc0, 0x40, 0x80, 0x04, 0x49, 0x0e, 0x0e, 0x1c, 0x20, 0xee, 0xae, 0xa6, 0x39, 0x97, 0x20, 0x87, 0xc0, 0x40, 0x80, 0x04, 0x49, 0x0e, 0x0e, 0x94, 0x5b,
0x87, 0xf8, 0x92, 0x53, 0x82, 0xf8, 0x96, 0x1c, 0x0c, 0x03, 0x01, 0x1c, 0x60, 0x10, 0x38, 0x81, 0x7c, 0xc9, 0x29, 0x41, 0x7c, 0x4b, 0x0e, 0x86, 0x81, 0x00, 0x0e, 0xb0, 0x08, 0x9c, 0xc0, 0xa7,
0x4f, 0x89, 0x4f, 0x42, 0x56, 0x3e, 0xe6, 0x1f, 0x08, 0xe6, 0x10, 0x04, 0x55, 0x5d, 0xfd, 0xdd, 0xc4, 0x27, 0x22, 0xa2, 0x8e, 0xf9, 0x07, 0x82, 0x3d, 0x04, 0x41, 0x55, 0x57, 0x7f, 0x77, 0x73,
0x2d, 0x92, 0xda, 0x91, 0x10, 0xe4, 0xc6, 0xae, 0xf7, 0xde, 0xef, 0xbd, 0x7a, 0x55, 0xf5, 0xde, 0x66, 0xa8, 0x25, 0x11, 0xf8, 0x36, 0x5d, 0xef, 0xbd, 0xdf, 0x7b, 0xf5, 0xaa, 0xea, 0xbd, 0x57,
0xab, 0x0f, 0xc2, 0xe3, 0x93, 0x8f, 0xec, 0x9a, 0x6a, 0xd4, 0x4f, 0x9c, 0x43, 0x62, 0xe9, 0x84, 0x1f, 0x03, 0x8f, 0x8e, 0x3f, 0xb2, 0x6b, 0xaa, 0x51, 0x3f, 0x76, 0x0e, 0x88, 0xa5, 0x13, 0x4a,
0x12, 0xbb, 0x7e, 0x4a, 0xf4, 0xae, 0x61, 0xd5, 0x05, 0x41, 0x31, 0xd5, 0x3a, 0x39, 0xa3, 0x44, 0xec, 0xfa, 0x09, 0xd1, 0xbb, 0x86, 0x55, 0x17, 0x04, 0xc5, 0x54, 0xeb, 0xe4, 0x94, 0x12, 0xdd,
0xb7, 0x55, 0x43, 0xb7, 0xeb, 0xa7, 0x0f, 0x0e, 0x09, 0x55, 0x1e, 0xd4, 0x7b, 0x44, 0x27, 0x96, 0x56, 0x0d, 0xdd, 0xae, 0x9f, 0xdc, 0x3f, 0x20, 0x54, 0xb9, 0x5f, 0xef, 0x11, 0x9d, 0x58, 0x0a,
0x42, 0x49, 0xb7, 0x66, 0x5a, 0x06, 0x35, 0xd0, 0x5d, 0x97, 0xbd, 0xa6, 0x98, 0x6a, 0x2d, 0x60, 0x25, 0xdd, 0x9a, 0x69, 0x19, 0xd4, 0x40, 0xb7, 0x5d, 0xf6, 0x9a, 0x62, 0xaa, 0xb5, 0x80, 0xbd,
0xaf, 0x09, 0xf6, 0xe5, 0x77, 0x7b, 0x2a, 0x3d, 0x76, 0x0e, 0x6b, 0x1d, 0xa3, 0x5f, 0xef, 0x19, 0x26, 0xd8, 0x97, 0xdf, 0xed, 0xa9, 0xf4, 0xc8, 0x39, 0xa8, 0x75, 0x8c, 0x7e, 0xbd, 0x67, 0xf4,
0x3d, 0xa3, 0xce, 0xa5, 0x0e, 0x9d, 0x23, 0xfe, 0xc5, 0x3f, 0xf8, 0x2f, 0x17, 0x6d, 0x59, 0x0e, 0x8c, 0x3a, 0x97, 0x3a, 0x70, 0x0e, 0xf9, 0x17, 0xff, 0xe0, 0xbf, 0x5c, 0xb4, 0x65, 0x39, 0xa4,
0x29, 0xef, 0x18, 0x16, 0xa9, 0x9f, 0x26, 0x34, 0x2e, 0xbf, 0x15, 0xe2, 0x31, 0x0d, 0x4d, 0xed, 0xbc, 0x63, 0x58, 0xa4, 0x7e, 0x92, 0xd0, 0xb8, 0xfc, 0x56, 0x88, 0xc7, 0x34, 0x34, 0xb5, 0x33,
0x0c, 0xb2, 0x8c, 0x5b, 0xfe, 0x20, 0x60, 0xed, 0x2b, 0x9d, 0x63, 0x55, 0x27, 0xd6, 0xa0, 0x6e, 0xc8, 0x32, 0x6e, 0xf9, 0x83, 0x80, 0xb5, 0xaf, 0x74, 0x8e, 0x54, 0x9d, 0x58, 0x83, 0xba, 0x79,
0x9e, 0xf4, 0xb8, 0xac, 0x45, 0x6c, 0xc3, 0xb1, 0x3a, 0x64, 0x2c, 0x29, 0xbb, 0xde, 0x27, 0x54, 0xdc, 0xe3, 0xb2, 0x16, 0xb1, 0x0d, 0xc7, 0xea, 0x90, 0xb1, 0xa4, 0xec, 0x7a, 0x9f, 0x50, 0x25,
0x49, 0x33, 0xab, 0x9e, 0x25, 0x65, 0x39, 0x3a, 0x55, 0xfb, 0x49, 0x35, 0x1f, 0x0e, 0x13, 0xb0, 0xcd, 0xac, 0x7a, 0x96, 0x94, 0xe5, 0xe8, 0x54, 0xed, 0x27, 0xd5, 0x7c, 0x38, 0x4c, 0xc0, 0xee,
0x3b, 0xc7, 0xa4, 0xaf, 0x24, 0xe4, 0xde, 0xcf, 0x92, 0x73, 0xa8, 0xaa, 0xd5, 0x55, 0x9d, 0xda, 0x1c, 0x91, 0xbe, 0x92, 0x90, 0x7b, 0x3f, 0x4b, 0xce, 0xa1, 0xaa, 0x56, 0x57, 0x75, 0x6a, 0x53,
0xd4, 0x8a, 0x0b, 0xc9, 0x35, 0x80, 0xf5, 0xe6, 0xd6, 0x01, 0xb1, 0xd8, 0xf0, 0xa0, 0x55, 0x28, 0x2b, 0x2e, 0x24, 0xd7, 0x00, 0xd6, 0x9a, 0x9b, 0xfb, 0xc4, 0x62, 0xc3, 0x83, 0xee, 0x40, 0x41,
0xe8, 0x4a, 0x9f, 0x54, 0xa4, 0x55, 0xe9, 0xfe, 0x64, 0xe3, 0xf6, 0x8b, 0xf3, 0xea, 0xad, 0x8b, 0x57, 0xfa, 0xa4, 0x22, 0xdd, 0x91, 0xee, 0x4d, 0x36, 0x6e, 0xbe, 0x38, 0xab, 0xde, 0x38, 0x3f,
0xf3, 0x6a, 0x61, 0x57, 0xe9, 0x13, 0xcc, 0x29, 0xf2, 0x23, 0x98, 0x5d, 0xd7, 0x34, 0xe3, 0x7b, 0xab, 0x16, 0x76, 0x94, 0x3e, 0xc1, 0x9c, 0x22, 0x3f, 0x84, 0xd9, 0x35, 0x4d, 0x33, 0x7e, 0x40,
0xa4, 0xfb, 0xd4, 0xb0, 0x69, 0x53, 0xa1, 0xc7, 0x68, 0x0d, 0xc0, 0x54, 0xe8, 0x71, 0xd3, 0x22, 0xba, 0x4f, 0x0c, 0x9b, 0x36, 0x15, 0x7a, 0x84, 0x56, 0x01, 0x4c, 0x85, 0x1e, 0x35, 0x2d, 0x72,
0x47, 0xea, 0x99, 0x10, 0x45, 0x42, 0x14, 0x9a, 0x3e, 0x05, 0x87, 0xb8, 0xe4, 0xbf, 0x94, 0xe0, 0xa8, 0x9e, 0x0a, 0x51, 0x24, 0x44, 0xa1, 0xe9, 0x53, 0x70, 0x88, 0x4b, 0xfe, 0x6b, 0x09, 0xbe,
0x2b, 0x1b, 0x8e, 0x4d, 0x8d, 0xfe, 0x0e, 0xa1, 0x96, 0xda, 0xd9, 0x70, 0x2c, 0x8b, 0xe8, 0xb4, 0xb6, 0xee, 0xd8, 0xd4, 0xe8, 0x6f, 0x13, 0x6a, 0xa9, 0x9d, 0x75, 0xc7, 0xb2, 0x88, 0x4e, 0x5b,
0x45, 0x15, 0xea, 0xd8, 0xc3, 0xcd, 0x40, 0x9f, 0x41, 0xf1, 0x54, 0xd1, 0x1c, 0x52, 0xc9, 0xad, 0x54, 0xa1, 0x8e, 0x3d, 0xdc, 0x0c, 0xf4, 0x14, 0x8a, 0x27, 0x8a, 0xe6, 0x90, 0x4a, 0xee, 0x8e,
0x4a, 0xf7, 0xa7, 0xd6, 0x6a, 0xb5, 0x60, 0xb6, 0xf9, 0x7d, 0xaf, 0x99, 0x27, 0x3d, 0x3e, 0xfd, 0x74, 0x6f, 0x6a, 0xb5, 0x56, 0x0b, 0x66, 0x9b, 0xdf, 0xf7, 0x9a, 0x79, 0xdc, 0xe3, 0xd3, 0xcf,
0xbc, 0x01, 0xad, 0x7d, 0xea, 0x28, 0x3a, 0x55, 0xe9, 0xa0, 0x71, 0x47, 0x40, 0xde, 0x16, 0x7a, 0x1b, 0xd0, 0xda, 0xa7, 0x8e, 0xa2, 0x53, 0x95, 0x0e, 0x1a, 0xb7, 0x04, 0xe4, 0x4d, 0xa1, 0x77,
0x0f, 0x18, 0x16, 0x76, 0x21, 0xe5, 0xdf, 0x87, 0xbb, 0x99, 0xa6, 0x6d, 0xab, 0x36, 0x45, 0xcf, 0x9f, 0x61, 0x61, 0x17, 0x52, 0xfe, 0x43, 0xb8, 0x9d, 0x69, 0xda, 0x96, 0x6a, 0x53, 0xf4, 0x0c,
0xa0, 0xa8, 0x52, 0xd2, 0xb7, 0x2b, 0xd2, 0x6a, 0xfe, 0xfe, 0xd4, 0xda, 0x47, 0xb5, 0x4b, 0xa7, 0x8a, 0x2a, 0x25, 0x7d, 0xbb, 0x22, 0xdd, 0xc9, 0xdf, 0x9b, 0x5a, 0xfd, 0xa8, 0x76, 0xe1, 0x54,
0x7a, 0x2d, 0x13, 0xac, 0x31, 0x2d, 0xcc, 0x28, 0x6e, 0x31, 0x38, 0xec, 0xa2, 0xca, 0x7f, 0x2e, 0xaf, 0x65, 0x82, 0x35, 0xa6, 0x85, 0x19, 0xc5, 0x4d, 0x06, 0x87, 0x5d, 0x54, 0xf9, 0x2f, 0x25,
0x01, 0x0a, 0xcb, 0xb4, 0x15, 0xab, 0x47, 0xe8, 0x08, 0x4e, 0xf9, 0xdd, 0x2f, 0xe7, 0x94, 0x05, 0x40, 0x61, 0x99, 0xb6, 0x62, 0xf5, 0x08, 0x1d, 0xc1, 0x29, 0xbf, 0xf7, 0xd5, 0x9c, 0xb2, 0x20,
0x01, 0x39, 0xe5, 0x2a, 0x8c, 0xf8, 0xc4, 0x84, 0xc5, 0xa4, 0x49, 0xdc, 0x19, 0x07, 0x51, 0x67, 0x20, 0xa7, 0x5c, 0x85, 0x11, 0x9f, 0x98, 0xb0, 0x98, 0x34, 0x89, 0x3b, 0x63, 0x3f, 0xea, 0x8c,
0x3c, 0x18, 0xc3, 0x19, 0x2e, 0x4a, 0x86, 0x17, 0x7e, 0x90, 0x83, 0xc9, 0x4d, 0x85, 0xf4, 0x0d, 0xfb, 0x63, 0x38, 0xc3, 0x45, 0xc9, 0xf0, 0xc2, 0x8f, 0x72, 0x30, 0xb9, 0xa1, 0x90, 0xbe, 0xa1,
0xbd, 0x45, 0x28, 0xfa, 0x1c, 0xca, 0x6c, 0x7d, 0x75, 0x15, 0xaa, 0x70, 0x07, 0x4c, 0xad, 0xbd, 0xb7, 0x08, 0x45, 0x9f, 0x41, 0x99, 0xad, 0xaf, 0xae, 0x42, 0x15, 0xee, 0x80, 0xa9, 0xd5, 0xf7,
0x77, 0x59, 0xef, 0xec, 0x1a, 0xe3, 0xae, 0x9d, 0x3e, 0xa8, 0xed, 0x1d, 0x3e, 0x27, 0x1d, 0xba, 0x2e, 0xea, 0x9d, 0x5d, 0x63, 0xdc, 0xb5, 0x93, 0xfb, 0xb5, 0xdd, 0x83, 0xe7, 0xa4, 0x43, 0xb7,
0x43, 0xa8, 0x12, 0xcc, 0xc9, 0xa0, 0x0d, 0xfb, 0xa8, 0x68, 0x17, 0x0a, 0xb6, 0x49, 0x3a, 0xc2, 0x09, 0x55, 0x82, 0x39, 0x19, 0xb4, 0x61, 0x1f, 0x15, 0xed, 0x40, 0xc1, 0x36, 0x49, 0x47, 0xf8,
0x77, 0xef, 0x0c, 0xe9, 0x86, 0x6f, 0x59, 0xcb, 0x24, 0x9d, 0x60, 0x30, 0xd8, 0x17, 0xe6, 0x38, 0xee, 0x9d, 0x21, 0xdd, 0xf0, 0x2d, 0x6b, 0x99, 0xa4, 0x13, 0x0c, 0x06, 0xfb, 0xc2, 0x1c, 0x07,
0xe8, 0x00, 0x26, 0x6c, 0x3e, 0xca, 0x95, 0x7c, 0x62, 0x34, 0x2e, 0x47, 0x74, 0xe7, 0xc6, 0x8c, 0xed, 0xc3, 0x84, 0xcd, 0x47, 0xb9, 0x92, 0x4f, 0x8c, 0xc6, 0xc5, 0x88, 0xee, 0xdc, 0x98, 0x11,
0xc0, 0x9c, 0x70, 0xbf, 0xb1, 0x40, 0x93, 0x7f, 0x22, 0xc1, 0xb4, 0xcf, 0xcb, 0x47, 0xe0, 0xdb, 0x98, 0x13, 0xee, 0x37, 0x16, 0x68, 0xf2, 0x4f, 0x25, 0x98, 0xf6, 0x79, 0xf9, 0x08, 0xfc, 0x7e,
0x09, 0xdf, 0xd4, 0x46, 0xf3, 0x0d, 0x93, 0xe6, 0x9e, 0x99, 0x13, 0xba, 0xca, 0x5e, 0x4b, 0xc8, 0xc2, 0x37, 0xb5, 0xd1, 0x7c, 0xc3, 0xa4, 0xb9, 0x67, 0xe6, 0x84, 0xae, 0xb2, 0xd7, 0x12, 0xf2,
0x2f, 0x3b, 0xde, 0xf8, 0xe6, 0xf8, 0xf8, 0xde, 0x1f, 0xb5, 0x1b, 0x19, 0xc3, 0xfa, 0x17, 0x85, 0xcb, 0xb6, 0x37, 0xbe, 0x39, 0x3e, 0xbe, 0xf7, 0x46, 0xed, 0x46, 0xc6, 0xb0, 0xfe, 0x55, 0x21,
0x90, 0xf9, 0xcc, 0x5d, 0xe8, 0x19, 0x94, 0x6d, 0xa2, 0x91, 0x0e, 0x35, 0x2c, 0x61, 0xfe, 0xfb, 0x64, 0x3e, 0x73, 0x17, 0x7a, 0x06, 0x65, 0x9b, 0x68, 0xa4, 0x43, 0x0d, 0x4b, 0x98, 0xff, 0xfe,
0x23, 0x9a, 0xaf, 0x1c, 0x12, 0xad, 0x25, 0x44, 0x1b, 0xb7, 0x99, 0xfd, 0xde, 0x17, 0xf6, 0x21, 0x88, 0xe6, 0x2b, 0x07, 0x44, 0x6b, 0x09, 0xd1, 0xc6, 0x4d, 0x66, 0xbf, 0xf7, 0x85, 0x7d, 0x48,
0xd1, 0xa7, 0x50, 0xa6, 0xa4, 0x6f, 0x6a, 0x0a, 0xf5, 0xd6, 0xc5, 0x9b, 0xe1, 0x2e, 0xb0, 0x64, 0xf4, 0x29, 0x94, 0x29, 0xe9, 0x9b, 0x9a, 0x42, 0xbd, 0x75, 0xf1, 0x66, 0xb8, 0x0b, 0x2c, 0x99,
0xc2, 0xc0, 0x9a, 0x46, 0xb7, 0x2d, 0xd8, 0xf8, 0x90, 0xfa, 0x2e, 0xf1, 0x5a, 0xb1, 0x0f, 0x83, 0x30, 0xb0, 0xa6, 0xd1, 0x6d, 0x0b, 0x36, 0x3e, 0xa4, 0xbe, 0x4b, 0xbc, 0x56, 0xec, 0xc3, 0xa0,
0x4e, 0x61, 0xc6, 0x31, 0xbb, 0x8c, 0x93, 0xb2, 0x50, 0xda, 0x1b, 0x88, 0x21, 0xfe, 0x70, 0x54, 0x13, 0x98, 0x71, 0xcc, 0x2e, 0xe3, 0xa4, 0x2c, 0x94, 0xf6, 0x06, 0x62, 0x88, 0x3f, 0x1c, 0xd5,
0xdf, 0xec, 0x47, 0xa4, 0x1b, 0x8b, 0x42, 0xd7, 0x4c, 0xb4, 0x1d, 0xc7, 0xb4, 0xa0, 0x75, 0x98, 0x37, 0x7b, 0x11, 0xe9, 0xc6, 0xa2, 0xd0, 0x35, 0x13, 0x6d, 0xc7, 0x31, 0x2d, 0x68, 0x0d, 0x66,
0xed, 0xab, 0x3a, 0x26, 0x4a, 0x77, 0xd0, 0x22, 0x1d, 0x43, 0xef, 0xda, 0x95, 0xc2, 0xaa, 0x74, 0xfb, 0xaa, 0x8e, 0x89, 0xd2, 0x1d, 0xb4, 0x48, 0xc7, 0xd0, 0xbb, 0x76, 0xa5, 0x70, 0x47, 0xba,
0xbf, 0xd8, 0x58, 0x12, 0x00, 0xb3, 0x3b, 0x51, 0x32, 0x8e, 0xf3, 0xa3, 0x6f, 0x02, 0xf2, 0xba, 0x57, 0x6c, 0x2c, 0x09, 0x80, 0xd9, 0xed, 0x28, 0x19, 0xc7, 0xf9, 0xd1, 0xb7, 0x01, 0x79, 0xdd,
0xf1, 0xc4, 0xcd, 0x04, 0xaa, 0xa1, 0x57, 0x8a, 0xab, 0xd2, 0xfd, 0x7c, 0x63, 0x59, 0xa0, 0xa0, 0x78, 0xec, 0x66, 0x02, 0xd5, 0xd0, 0x2b, 0xc5, 0x3b, 0xd2, 0xbd, 0x7c, 0x63, 0x59, 0xa0, 0xa0,
0x76, 0x82, 0x03, 0xa7, 0x48, 0xa1, 0x6d, 0xb8, 0x63, 0x91, 0x53, 0x95, 0xf5, 0xf1, 0xa9, 0x6a, 0x76, 0x82, 0x03, 0xa7, 0x48, 0xa1, 0x2d, 0xb8, 0x65, 0x91, 0x13, 0x95, 0xf5, 0xf1, 0x89, 0x6a,
0x53, 0xc3, 0x1a, 0x6c, 0xab, 0x7d, 0x95, 0x56, 0x26, 0xb8, 0x4d, 0x95, 0x8b, 0xf3, 0xea, 0x1d, 0x53, 0xc3, 0x1a, 0x6c, 0xa9, 0x7d, 0x95, 0x56, 0x26, 0xb8, 0x4d, 0x95, 0xf3, 0xb3, 0xea, 0x2d,
0x9c, 0x42, 0xc7, 0xa9, 0x52, 0xf2, 0x8f, 0x8b, 0x30, 0x1b, 0x5b, 0x03, 0xe8, 0x00, 0x16, 0x3b, 0x9c, 0x42, 0xc7, 0xa9, 0x52, 0xf2, 0x4f, 0x8a, 0x30, 0x1b, 0x5b, 0x03, 0x68, 0x1f, 0x16, 0x3b,
0x6e, 0xc0, 0xdc, 0x75, 0xfa, 0x87, 0xc4, 0x6a, 0x75, 0x8e, 0x49, 0xd7, 0xd1, 0x48, 0x97, 0x4f, 0x6e, 0xc0, 0xdc, 0x71, 0xfa, 0x07, 0xc4, 0x6a, 0x75, 0x8e, 0x48, 0xd7, 0xd1, 0x48, 0x97, 0x4f,
0x94, 0x62, 0x63, 0x45, 0x58, 0xbc, 0xb8, 0x91, 0xca, 0x85, 0x33, 0xa4, 0x99, 0x17, 0x74, 0xde, 0x94, 0x62, 0x63, 0x45, 0x58, 0xbc, 0xb8, 0x9e, 0xca, 0x85, 0x33, 0xa4, 0x99, 0x17, 0x74, 0xde,
0xb4, 0xa3, 0xda, 0xb6, 0x8f, 0x99, 0xe3, 0x98, 0xbe, 0x17, 0x76, 0x13, 0x1c, 0x38, 0x45, 0x8a, 0xb4, 0xad, 0xda, 0xb6, 0x8f, 0x99, 0xe3, 0x98, 0xbe, 0x17, 0x76, 0x12, 0x1c, 0x38, 0x45, 0x8a,
0xd9, 0xd8, 0x25, 0xb6, 0x6a, 0x91, 0x6e, 0xdc, 0xc6, 0x7c, 0xd4, 0xc6, 0xcd, 0x54, 0x2e, 0x9c, 0xd9, 0xd8, 0x25, 0xb6, 0x6a, 0x91, 0x6e, 0xdc, 0xc6, 0x7c, 0xd4, 0xc6, 0x8d, 0x54, 0x2e, 0x9c,
0x21, 0x8d, 0x1e, 0xc2, 0x94, 0xab, 0x8d, 0x8f, 0x9f, 0x18, 0x68, 0x3f, 0x44, 0xef, 0x06, 0x24, 0x21, 0x8d, 0x1e, 0xc0, 0x94, 0xab, 0x8d, 0x8f, 0x9f, 0x18, 0x68, 0x3f, 0x44, 0xef, 0x04, 0x24,
0x1c, 0xe6, 0x63, 0x5d, 0x33, 0x0e, 0x6d, 0x62, 0x9d, 0x92, 0x6e, 0xf6, 0x00, 0xef, 0x25, 0x38, 0x1c, 0xe6, 0x63, 0x5d, 0x33, 0x0e, 0x6c, 0x62, 0x9d, 0x90, 0x6e, 0xf6, 0x00, 0xef, 0x26, 0x38,
0x70, 0x8a, 0x14, 0xeb, 0x9a, 0x3b, 0x03, 0x13, 0x5d, 0x9b, 0x88, 0x76, 0x6d, 0x3f, 0x95, 0x0b, 0x70, 0x8a, 0x14, 0xeb, 0x9a, 0x3b, 0x03, 0x13, 0x5d, 0x9b, 0x88, 0x76, 0x6d, 0x2f, 0x95, 0x0b,
0x67, 0x48, 0xb3, 0x79, 0xec, 0x9a, 0xbc, 0x7e, 0xaa, 0xa8, 0x9a, 0x72, 0xa8, 0x91, 0x4a, 0x29, 0x67, 0x48, 0xb3, 0x79, 0xec, 0x9a, 0xbc, 0x76, 0xa2, 0xa8, 0x9a, 0x72, 0xa0, 0x91, 0x4a, 0x29,
0x3a, 0x8f, 0x77, 0xa3, 0x64, 0x1c, 0xe7, 0x47, 0x4f, 0x60, 0xde, 0x6d, 0xda, 0xd7, 0x15, 0x1f, 0x3a, 0x8f, 0x77, 0xa2, 0x64, 0x1c, 0xe7, 0x47, 0x8f, 0x61, 0xde, 0x6d, 0xda, 0xd3, 0x15, 0x1f,
0xa4, 0xcc, 0x41, 0xbe, 0x22, 0x40, 0xe6, 0x77, 0xe3, 0x0c, 0x38, 0x29, 0x83, 0x3e, 0x86, 0x99, 0xa4, 0xcc, 0x41, 0xbe, 0x26, 0x40, 0xe6, 0x77, 0xe2, 0x0c, 0x38, 0x29, 0x83, 0x3e, 0x86, 0x99,
0x8e, 0xa1, 0x69, 0x7c, 0x3e, 0x6e, 0x18, 0x8e, 0x4e, 0x2b, 0x93, 0x1c, 0x05, 0xb1, 0xf5, 0xb8, 0x8e, 0xa1, 0x69, 0x7c, 0x3e, 0xae, 0x1b, 0x8e, 0x4e, 0x2b, 0x93, 0x1c, 0x05, 0xb1, 0xf5, 0xb8,
0x11, 0xa1, 0xe0, 0x18, 0xa7, 0xfc, 0x2f, 0x12, 0x2c, 0x65, 0xac, 0x69, 0xf4, 0x0d, 0x28, 0xd0, 0x1e, 0xa1, 0xe0, 0x18, 0xa7, 0xfc, 0xaf, 0x12, 0x2c, 0x65, 0xac, 0x69, 0xf4, 0x2d, 0x28, 0xd0,
0x81, 0xe9, 0x65, 0xeb, 0x5f, 0xf7, 0x12, 0x44, 0x7b, 0x60, 0x92, 0x97, 0xe7, 0xd5, 0xd7, 0x33, 0x81, 0xe9, 0x65, 0xeb, 0xdf, 0xf4, 0x12, 0x44, 0x7b, 0x60, 0x92, 0x97, 0x67, 0xd5, 0xd7, 0x33,
0xc4, 0x18, 0x19, 0x73, 0x41, 0xa4, 0xc3, 0xb4, 0xc5, 0xd4, 0xe9, 0x3d, 0x97, 0x45, 0x04, 0xaf, 0xc4, 0x18, 0x19, 0x73, 0x41, 0xa4, 0xc3, 0xb4, 0xc5, 0xd4, 0xe9, 0x3d, 0x97, 0x45, 0x04, 0xaf,
0x87, 0x43, 0x62, 0x0c, 0x0e, 0xcb, 0x04, 0xc1, 0x78, 0xfe, 0xe2, 0xbc, 0x3a, 0x1d, 0xa1, 0xe1, 0x07, 0x43, 0x62, 0x0c, 0x0e, 0xcb, 0x04, 0xc1, 0x78, 0xfe, 0xfc, 0xac, 0x3a, 0x1d, 0xa1, 0xe1,
0x28, 0xbc, 0xfc, 0xc3, 0x1c, 0xc0, 0x26, 0x31, 0x35, 0x63, 0xd0, 0x27, 0xfa, 0x4d, 0x24, 0xdc, 0x28, 0xbc, 0xfc, 0x79, 0x0e, 0x60, 0x83, 0x98, 0x9a, 0x31, 0xe8, 0x13, 0xfd, 0x3a, 0x12, 0xee,
0xbd, 0x48, 0xc2, 0x7d, 0x77, 0x58, 0xec, 0xf4, 0x4d, 0xcb, 0xcc, 0xb8, 0xdf, 0x8a, 0x65, 0xdc, 0x6e, 0x24, 0xe1, 0xbe, 0x3b, 0x2c, 0x76, 0xfa, 0xa6, 0x65, 0x66, 0xdc, 0xef, 0xc4, 0x32, 0x6e,
0xfa, 0xe8, 0x90, 0x97, 0xa7, 0xdc, 0xff, 0xc8, 0xc3, 0x42, 0xc0, 0xbc, 0x61, 0xe8, 0x5d, 0x95, 0x7d, 0x74, 0xc8, 0x8b, 0x53, 0xee, 0x7f, 0xe6, 0x61, 0x21, 0x60, 0x5e, 0x37, 0xf4, 0xae, 0xca,
0xaf, 0x8f, 0x4f, 0x22, 0x63, 0xfc, 0x6b, 0xb1, 0x31, 0x5e, 0x4a, 0x11, 0x09, 0x8d, 0xef, 0xb6, 0xd7, 0xc7, 0x27, 0x91, 0x31, 0xfe, 0x8d, 0xd8, 0x18, 0x2f, 0xa5, 0x88, 0x84, 0xc6, 0x77, 0xcb,
0x6f, 0x6d, 0x8e, 0x8b, 0x7f, 0x10, 0x55, 0xfe, 0xf2, 0xbc, 0x9a, 0xb2, 0xe7, 0xa9, 0xf9, 0x48, 0xb7, 0x36, 0xc7, 0xc5, 0x3f, 0x88, 0x2a, 0x7f, 0x79, 0x56, 0x4d, 0xd9, 0xf3, 0xd4, 0x7c, 0xa4,
0x51, 0x13, 0xd1, 0x3d, 0x98, 0xb0, 0x88, 0x62, 0x1b, 0x3a, 0x0f, 0x14, 0x93, 0x41, 0x57, 0x30, 0xa8, 0x89, 0xe8, 0x2e, 0x4c, 0x58, 0x44, 0xb1, 0x0d, 0x9d, 0x07, 0x8a, 0xc9, 0xa0, 0x2b, 0x98,
0x6f, 0xc5, 0x82, 0x8a, 0xde, 0x82, 0x52, 0x9f, 0xd8, 0xb6, 0xd2, 0x23, 0x3c, 0x26, 0x4c, 0x36, 0xb7, 0x62, 0x41, 0x45, 0x6f, 0x41, 0xa9, 0x4f, 0x6c, 0x5b, 0xe9, 0x11, 0x1e, 0x13, 0x26, 0x1b,
0x66, 0x05, 0x63, 0x69, 0xc7, 0x6d, 0xc6, 0x1e, 0x1d, 0x3d, 0x87, 0x19, 0x4d, 0xb1, 0xc5, 0x04, 0xb3, 0x82, 0xb1, 0xb4, 0xed, 0x36, 0x63, 0x8f, 0x8e, 0x9e, 0xc3, 0x8c, 0xa6, 0xd8, 0x62, 0x82,
0x6d, 0xab, 0x7d, 0xc2, 0x57, 0xfd, 0xd4, 0xda, 0xdb, 0xa3, 0xcd, 0x03, 0x26, 0x11, 0x64, 0xb6, 0xb6, 0xd5, 0x3e, 0xe1, 0xab, 0x7e, 0x6a, 0xf5, 0xed, 0xd1, 0xe6, 0x01, 0x93, 0x08, 0x32, 0xdb,
0xed, 0x08, 0x12, 0x8e, 0x21, 0xa3, 0x53, 0x40, 0xac, 0xa5, 0x6d, 0x29, 0xba, 0xed, 0x3a, 0x8a, 0x56, 0x04, 0x09, 0xc7, 0x90, 0xd1, 0x09, 0x20, 0xd6, 0xd2, 0xb6, 0x14, 0xdd, 0x76, 0x1d, 0xc5,
0xe9, 0x2b, 0x8d, 0xad, 0xcf, 0x8f, 0x70, 0xdb, 0x09, 0x34, 0x9c, 0xa2, 0x41, 0xfe, 0xa9, 0x04, 0xf4, 0x95, 0xc6, 0xd6, 0xe7, 0x47, 0xb8, 0xad, 0x04, 0x1a, 0x4e, 0xd1, 0x20, 0xff, 0x4c, 0x82,
0x33, 0xc1, 0x30, 0xdd, 0x40, 0x35, 0xb5, 0x1b, 0xad, 0xa6, 0xde, 0x1a, 0x79, 0x8a, 0x66, 0x94, 0x99, 0x60, 0x98, 0xae, 0xa1, 0x9a, 0xda, 0x89, 0x56, 0x53, 0x6f, 0x8d, 0x3c, 0x45, 0x33, 0xca,
0x53, 0xff, 0x9d, 0x03, 0x14, 0x30, 0xb1, 0x05, 0x7e, 0xa8, 0x74, 0x4e, 0x46, 0xd8, 0x2b, 0xfc, 0xa9, 0xff, 0xc9, 0x01, 0x0a, 0x98, 0xd8, 0x02, 0x3f, 0x50, 0x3a, 0xc7, 0x23, 0xec, 0x15, 0x7e,
0x40, 0x02, 0x24, 0xc2, 0xf3, 0xba, 0xae, 0x1b, 0x94, 0x47, 0x7c, 0xcf, 0xac, 0xad, 0x91, 0xcd, 0x24, 0x01, 0x12, 0xe1, 0x79, 0x4d, 0xd7, 0x0d, 0xca, 0x23, 0xbe, 0x67, 0xd6, 0xe6, 0xc8, 0x66,
0xf2, 0x34, 0xd6, 0xf6, 0x13, 0x58, 0x8f, 0x74, 0x6a, 0x0d, 0x82, 0x11, 0x49, 0x32, 0xe0, 0x14, 0x79, 0x1a, 0x6b, 0x7b, 0x09, 0xac, 0x87, 0x3a, 0xb5, 0x06, 0xc1, 0x88, 0x24, 0x19, 0x70, 0x8a,
0x03, 0x90, 0x02, 0x60, 0x09, 0xcc, 0xb6, 0x21, 0x16, 0xf2, 0xbb, 0x23, 0xc4, 0x3c, 0x26, 0xb0, 0x01, 0x48, 0x01, 0xb0, 0x04, 0x66, 0xdb, 0x10, 0x0b, 0xf9, 0xdd, 0x11, 0x62, 0x1e, 0x13, 0x58,
0x61, 0xe8, 0x47, 0x6a, 0x2f, 0x08, 0x3b, 0xd8, 0x07, 0xc2, 0x21, 0xd0, 0xe5, 0x47, 0xb0, 0x94, 0x37, 0xf4, 0x43, 0xb5, 0x17, 0x84, 0x1d, 0xec, 0x03, 0xe1, 0x10, 0xe8, 0xf2, 0x43, 0x58, 0xca,
0x61, 0x2d, 0x9a, 0x83, 0xfc, 0x09, 0x19, 0xb8, 0x6e, 0xc3, 0xec, 0x27, 0xba, 0x13, 0xde, 0x53, 0xb0, 0x16, 0xcd, 0x41, 0xfe, 0x98, 0x0c, 0x5c, 0xb7, 0x61, 0xf6, 0x13, 0xdd, 0x0a, 0xef, 0xa9,
0x4d, 0x8a, 0xed, 0xd0, 0xc7, 0xb9, 0x8f, 0x24, 0xf9, 0x27, 0xc5, 0xf0, 0xdc, 0xe1, 0xa5, 0xec, 0x26, 0xc5, 0x76, 0xe8, 0xe3, 0xdc, 0x47, 0x92, 0xfc, 0xd3, 0x62, 0x78, 0xee, 0xf0, 0x52, 0xf6,
0x7d, 0x28, 0x5b, 0xc4, 0xd4, 0xd4, 0x8e, 0x62, 0x8b, 0x0a, 0x85, 0x57, 0xa5, 0x58, 0xb4, 0x61, 0x1e, 0x94, 0x2d, 0x62, 0x6a, 0x6a, 0x47, 0xb1, 0x45, 0x85, 0xc2, 0xab, 0x52, 0x2c, 0xda, 0xb0,
0x9f, 0x1a, 0x29, 0x7a, 0x73, 0xd7, 0x5b, 0xf4, 0xe6, 0x5f, 0x4d, 0xd1, 0xfb, 0x7b, 0x50, 0xb6, 0x4f, 0x8d, 0x14, 0xbd, 0xb9, 0xab, 0x2d, 0x7a, 0xf3, 0xaf, 0xa6, 0xe8, 0xfd, 0x03, 0x28, 0xdb,
0xbd, 0x72, 0xb7, 0xc0, 0x21, 0x1f, 0x8c, 0x11, 0x5f, 0x45, 0xa5, 0xeb, 0x2b, 0xf0, 0x6b, 0x5c, 0x5e, 0xb9, 0x5b, 0xe0, 0x90, 0xf7, 0xc7, 0x88, 0xaf, 0xa2, 0xd2, 0xf5, 0x15, 0xf8, 0x35, 0xae,
0x1f, 0x34, 0xad, 0xba, 0x2d, 0x8e, 0x59, 0xdd, 0xbe, 0xd2, 0x8a, 0x94, 0xc5, 0x54, 0x53, 0x71, 0x0f, 0x9a, 0x56, 0xdd, 0x16, 0xc7, 0xac, 0x6e, 0x5f, 0x69, 0x45, 0xca, 0x62, 0xaa, 0xa9, 0x38,
0x6c, 0xd2, 0xe5, 0x81, 0xa8, 0x1c, 0xc4, 0xd4, 0x26, 0x6f, 0xc5, 0x82, 0x8a, 0x9e, 0x45, 0xa6, 0x36, 0xe9, 0xf2, 0x40, 0x54, 0x0e, 0x62, 0x6a, 0x93, 0xb7, 0x62, 0x41, 0x45, 0xcf, 0x22, 0x53,
0x6c, 0xf9, 0x2a, 0x53, 0x76, 0x26, 0x7b, 0xba, 0xa2, 0x7d, 0x58, 0x32, 0x2d, 0xa3, 0x67, 0x11, 0xb6, 0x7c, 0x99, 0x29, 0x3b, 0x93, 0x3d, 0x5d, 0xd1, 0x1e, 0x2c, 0x99, 0x96, 0xd1, 0xb3, 0x88,
0xdb, 0xde, 0x24, 0x4a, 0x57, 0x53, 0x75, 0xe2, 0xf9, 0xc7, 0x2d, 0x55, 0x5e, 0xbf, 0x38, 0xaf, 0x6d, 0x6f, 0x10, 0xa5, 0xab, 0xa9, 0x3a, 0xf1, 0xfc, 0xe3, 0x96, 0x2a, 0xaf, 0x9f, 0x9f, 0x55,
0x2e, 0x35, 0xd3, 0x59, 0x70, 0x96, 0xac, 0xfc, 0xa2, 0x00, 0x73, 0xf1, 0x0c, 0x98, 0x51, 0x3d, 0x97, 0x9a, 0xe9, 0x2c, 0x38, 0x4b, 0x56, 0x7e, 0x51, 0x80, 0xb9, 0x78, 0x06, 0xcc, 0xa8, 0x1e,
0x4a, 0x57, 0xaa, 0x1e, 0xdf, 0x09, 0x2d, 0x06, 0xb7, 0xb4, 0xf6, 0x47, 0x3f, 0x65, 0x41, 0xac, 0xa5, 0x4b, 0x55, 0x8f, 0xef, 0x84, 0x16, 0x83, 0x5b, 0x5a, 0xfb, 0xa3, 0x9f, 0xb2, 0x20, 0xd6,
0xc3, 0xac, 0x88, 0x06, 0x1e, 0x51, 0xd4, 0xcf, 0xfe, 0xe8, 0xef, 0x47, 0xc9, 0x38, 0xce, 0xcf, 0x60, 0x56, 0x44, 0x03, 0x8f, 0x28, 0xea, 0x67, 0x7f, 0xf4, 0xf7, 0xa2, 0x64, 0x1c, 0xe7, 0x67,
0x6a, 0xc2, 0xa0, 0xd4, 0xf3, 0x40, 0x0a, 0xd1, 0x9a, 0x70, 0x3d, 0xce, 0x80, 0x93, 0x32, 0x68, 0x35, 0x61, 0x50, 0xea, 0x79, 0x20, 0x85, 0x68, 0x4d, 0xb8, 0x16, 0x67, 0xc0, 0x49, 0x19, 0xb4,
0x07, 0x16, 0x1c, 0x3d, 0x09, 0xe5, 0xce, 0xc6, 0xd7, 0x05, 0xd4, 0xc2, 0x7e, 0x92, 0x05, 0xa7, 0x0d, 0x0b, 0x8e, 0x9e, 0x84, 0x72, 0x67, 0xe3, 0xeb, 0x02, 0x6a, 0x61, 0x2f, 0xc9, 0x82, 0xd3,
0xc9, 0xa1, 0x23, 0x80, 0x8e, 0x97, 0xb6, 0xed, 0xca, 0x04, 0x8f, 0xb0, 0x6b, 0x23, 0xaf, 0x1d, 0xe4, 0xd0, 0x21, 0x40, 0xc7, 0x4b, 0xdb, 0x76, 0x65, 0x82, 0x47, 0xd8, 0xd5, 0x91, 0xd7, 0x8e,
0x3f, 0xe3, 0x07, 0x71, 0xcd, 0x6f, 0xb2, 0x71, 0x08, 0x19, 0x7d, 0x02, 0xd3, 0x16, 0xdf, 0x10, 0x9f, 0xf1, 0x83, 0xb8, 0xe6, 0x37, 0xd9, 0x38, 0x84, 0x8c, 0x3e, 0x81, 0x69, 0x8b, 0x6f, 0x08,
0x78, 0x06, 0xbb, 0x45, 0xf5, 0x6b, 0x42, 0x6c, 0x1a, 0x87, 0x89, 0x38, 0xca, 0x9b, 0x52, 0x07, 0x3c, 0x83, 0xdd, 0xa2, 0xfa, 0x35, 0x21, 0x36, 0x8d, 0xc3, 0x44, 0x1c, 0xe5, 0x4d, 0xa9, 0x83,
0x97, 0x47, 0xae, 0x83, 0xff, 0x49, 0x0a, 0x27, 0x21, 0xbf, 0x04, 0xfe, 0x38, 0x52, 0x1e, 0xdd, 0xcb, 0x23, 0xd7, 0xc1, 0xff, 0x2c, 0x85, 0x93, 0x90, 0x5f, 0x02, 0x7f, 0x1c, 0x29, 0x8f, 0xee,
0x8b, 0x95, 0x47, 0x8b, 0x49, 0x89, 0x50, 0x75, 0x64, 0xa4, 0x57, 0xbf, 0x1f, 0x8e, 0x55, 0xfd, 0xc6, 0xca, 0xa3, 0xc5, 0xa4, 0x44, 0xa8, 0x3a, 0x32, 0xd2, 0xab, 0xdf, 0x0f, 0xc7, 0xaa, 0x7e,
0x06, 0xc9, 0x73, 0x78, 0xf9, 0xfb, 0x23, 0x09, 0x16, 0x1f, 0xb7, 0x9e, 0x58, 0x86, 0x63, 0x7a, 0x83, 0xe4, 0x39, 0xbc, 0xfc, 0xfd, 0xb1, 0x04, 0x8b, 0x8f, 0x5a, 0x8f, 0x2d, 0xc3, 0x31, 0x3d,
0xe6, 0xec, 0x99, 0xae, 0x5f, 0xbf, 0x06, 0x05, 0xcb, 0xd1, 0xbc, 0x7e, 0xbc, 0xe9, 0xf5, 0x03, 0x73, 0x76, 0x4d, 0xd7, 0xaf, 0xdf, 0x80, 0x82, 0xe5, 0x68, 0x5e, 0x3f, 0xde, 0xf4, 0xfa, 0x81,
0x3b, 0x1a, 0xeb, 0xc7, 0x42, 0x4c, 0xca, 0xed, 0x04, 0x13, 0x40, 0xbb, 0x30, 0x61, 0x29, 0x7a, 0x1d, 0x8d, 0xf5, 0x63, 0x21, 0x26, 0xe5, 0x76, 0x82, 0x09, 0xa0, 0x1d, 0x98, 0xb0, 0x14, 0xbd,
0x8f, 0x78, 0x69, 0xf5, 0xde, 0x10, 0xeb, 0xb7, 0x36, 0x31, 0x63, 0x0f, 0x15, 0x6f, 0x5c, 0x1a, 0x47, 0xbc, 0xb4, 0x7a, 0x77, 0x88, 0xf5, 0x9b, 0x1b, 0x98, 0xb1, 0x87, 0x8a, 0x37, 0x2e, 0x8d,
0x0b, 0x14, 0xf9, 0x4f, 0x24, 0x98, 0x7d, 0xda, 0x6e, 0x37, 0xb7, 0x74, 0xbe, 0xa2, 0xf9, 0xe1, 0x05, 0x8a, 0xfc, 0x67, 0x12, 0xcc, 0x3e, 0x69, 0xb7, 0x9b, 0x9b, 0x3a, 0x5f, 0xd1, 0xfc, 0xf0,
0xeb, 0x2a, 0x14, 0x4c, 0x85, 0x1e, 0xc7, 0x33, 0x3d, 0xa3, 0x61, 0x4e, 0x41, 0xbf, 0x03, 0x25, 0xf5, 0x0e, 0x14, 0x4c, 0x85, 0x1e, 0xc5, 0x33, 0x3d, 0xa3, 0x61, 0x4e, 0x41, 0xdf, 0x85, 0x12,
0x16, 0x49, 0x88, 0xde, 0x1d, 0xb1, 0xd4, 0x16, 0xf0, 0x0d, 0x57, 0x28, 0xa8, 0x10, 0x45, 0x03, 0x8b, 0x24, 0x44, 0xef, 0x8e, 0x58, 0x6a, 0x0b, 0xf8, 0x86, 0x2b, 0x14, 0x54, 0x88, 0xa2, 0x01,
0xf6, 0xe0, 0xe4, 0x13, 0xb8, 0x13, 0x32, 0x87, 0xf9, 0x83, 0x9f, 0x19, 0xa2, 0x16, 0x14, 0x99, 0x7b, 0x70, 0xf2, 0x31, 0xdc, 0x0a, 0x99, 0xc3, 0xfc, 0xc1, 0xcf, 0x0c, 0x51, 0x0b, 0x8a, 0x4c,
0x66, 0xef, 0x48, 0x70, 0xd8, 0xc9, 0x57, 0xac, 0x4b, 0x41, 0xa5, 0xc3, 0xbe, 0x6c, 0xec, 0x62, 0xb3, 0x77, 0x24, 0x38, 0xec, 0xe4, 0x2b, 0xd6, 0xa5, 0xa0, 0xd2, 0x61, 0x5f, 0x36, 0x76, 0xb1,
0xc9, 0x3b, 0x30, 0xcd, 0x4f, 0x9c, 0x0d, 0x8b, 0x72, 0xb7, 0xa0, 0xbb, 0x90, 0xef, 0xab, 0xba, 0xe4, 0x6d, 0x98, 0xe6, 0x27, 0xce, 0x86, 0x45, 0xb9, 0x5b, 0xd0, 0x6d, 0xc8, 0xf7, 0x55, 0x5d,
0xc8, 0xb3, 0x53, 0x42, 0x26, 0xcf, 0x72, 0x04, 0x6b, 0xe7, 0x64, 0xe5, 0x4c, 0x44, 0x9e, 0x80, 0xe4, 0xd9, 0x29, 0x21, 0x93, 0x67, 0x39, 0x82, 0xb5, 0x73, 0xb2, 0x72, 0x2a, 0x22, 0x4f, 0x40,
0xac, 0x9c, 0x61, 0xd6, 0x2e, 0x3f, 0x81, 0x92, 0x70, 0x77, 0x18, 0x28, 0x7f, 0x39, 0x50, 0x3e, 0x56, 0x4e, 0x31, 0x6b, 0x97, 0x1f, 0x43, 0x49, 0xb8, 0x3b, 0x0c, 0x94, 0xbf, 0x18, 0x28, 0x9f,
0x05, 0x68, 0x0f, 0x4a, 0x5b, 0xcd, 0x86, 0x66, 0xb8, 0x55, 0x57, 0x47, 0xed, 0x5a, 0xf1, 0xb1, 0x02, 0xb4, 0x0b, 0xa5, 0xcd, 0x66, 0x43, 0x33, 0xdc, 0xaa, 0xab, 0xa3, 0x76, 0xad, 0xf8, 0x58,
0xd8, 0xd8, 0xda, 0xc4, 0x98, 0x53, 0x90, 0x0c, 0x13, 0xe4, 0xac, 0x43, 0x4c, 0xca, 0x67, 0xc4, 0xac, 0x6f, 0x6e, 0x60, 0xcc, 0x29, 0x48, 0x86, 0x09, 0x72, 0xda, 0x21, 0x26, 0xe5, 0x33, 0x62,
0x64, 0x03, 0xd8, 0x28, 0x3f, 0xe2, 0x2d, 0x58, 0x50, 0xe4, 0x3f, 0xcd, 0x41, 0x49, 0xb8, 0xe3, 0xb2, 0x01, 0x6c, 0x94, 0x1f, 0xf2, 0x16, 0x2c, 0x28, 0xf2, 0x9f, 0xe7, 0xa0, 0x24, 0xdc, 0x71,
0x06, 0x76, 0x61, 0xdb, 0x91, 0x5d, 0xd8, 0xdb, 0xa3, 0x4d, 0x8d, 0xcc, 0x2d, 0x58, 0x3b, 0xb6, 0x0d, 0xbb, 0xb0, 0xad, 0xc8, 0x2e, 0xec, 0xed, 0xd1, 0xa6, 0x46, 0xe6, 0x16, 0xac, 0x1d, 0xdb,
0x05, 0x7b, 0x67, 0x44, 0xbc, 0xcb, 0xf7, 0x5f, 0x3f, 0x96, 0x60, 0x26, 0x3a, 0x29, 0xd1, 0x43, 0x82, 0xbd, 0x33, 0x22, 0xde, 0xc5, 0xfb, 0xaf, 0x9f, 0x48, 0x30, 0x13, 0x9d, 0x94, 0xe8, 0x01,
0x98, 0x62, 0x09, 0x47, 0xed, 0x90, 0xdd, 0xa0, 0xce, 0xf5, 0x4f, 0x47, 0x5a, 0x01, 0x09, 0x87, 0x4c, 0xb1, 0x84, 0xa3, 0x76, 0xc8, 0x4e, 0x50, 0xe7, 0xfa, 0xa7, 0x23, 0xad, 0x80, 0x84, 0xc3,
0xf9, 0x50, 0xcf, 0x17, 0x63, 0xf3, 0x48, 0x74, 0x3a, 0xdb, 0xa5, 0x0e, 0x55, 0xb5, 0x9a, 0x7b, 0x7c, 0xa8, 0xe7, 0x8b, 0xb1, 0x79, 0x24, 0x3a, 0x9d, 0xed, 0x52, 0x87, 0xaa, 0x5a, 0xcd, 0xbd,
0x71, 0x52, 0xdb, 0xd2, 0xe9, 0x9e, 0xd5, 0xa2, 0x96, 0xaa, 0xf7, 0x12, 0x8a, 0xf8, 0xa4, 0x0c, 0x38, 0xa9, 0x6d, 0xea, 0x74, 0xd7, 0x6a, 0x51, 0x4b, 0xd5, 0x7b, 0x09, 0x45, 0x7c, 0x52, 0x86,
0x23, 0xcb, 0xff, 0x28, 0xc1, 0x94, 0x30, 0xf9, 0x06, 0x76, 0x15, 0xbf, 0x1d, 0xdd, 0x55, 0xdc, 0x91, 0xe5, 0x7f, 0x92, 0x60, 0x4a, 0x98, 0x7c, 0x0d, 0xbb, 0x8a, 0xdf, 0x8d, 0xee, 0x2a, 0xee,
0x1b, 0x71, 0x81, 0xa7, 0x6f, 0x29, 0xfe, 0x26, 0x30, 0x9d, 0x2d, 0x69, 0x36, 0xab, 0x8f, 0x0d, 0x8e, 0xb8, 0xc0, 0xd3, 0xb7, 0x14, 0x7f, 0x1b, 0x98, 0xce, 0x96, 0x34, 0x9b, 0xd5, 0x47, 0x86,
0x9b, 0xc6, 0x67, 0x35, 0x5b, 0x8c, 0x98, 0x53, 0x90, 0x03, 0x73, 0x6a, 0x2c, 0x06, 0x08, 0xd7, 0x4d, 0xe3, 0xb3, 0x9a, 0x2d, 0x46, 0xcc, 0x29, 0xc8, 0x81, 0x39, 0x35, 0x16, 0x03, 0x84, 0x6b,
0xd6, 0x47, 0xb3, 0xc4, 0x17, 0x6b, 0x54, 0x04, 0xfc, 0x5c, 0x9c, 0x82, 0x13, 0x2a, 0x64, 0x02, 0xeb, 0xa3, 0x59, 0xe2, 0x8b, 0x35, 0x2a, 0x02, 0x7e, 0x2e, 0x4e, 0xc1, 0x09, 0x15, 0x32, 0x81,
0x09, 0x2e, 0xf4, 0x29, 0x14, 0x8e, 0x29, 0x35, 0x53, 0x0e, 0x92, 0x87, 0x44, 0x9e, 0xc0, 0x84, 0x04, 0x17, 0xfa, 0x14, 0x0a, 0x47, 0x94, 0x9a, 0x29, 0x07, 0xc9, 0x43, 0x22, 0x4f, 0x60, 0x42,
0x32, 0xef, 0x5d, 0xbb, 0xdd, 0xc4, 0x1c, 0x4a, 0xfe, 0x9f, 0xc0, 0x1f, 0x2d, 0x77, 0x8e, 0xfb, 0x99, 0xf7, 0xae, 0xdd, 0x6e, 0x62, 0x0e, 0x25, 0xff, 0x6f, 0xe0, 0x8f, 0x96, 0x3b, 0xc7, 0xfd,
0xf1, 0x54, 0xba, 0x4a, 0x3c, 0x9d, 0x4a, 0x8b, 0xa5, 0xe8, 0x29, 0xe4, 0xa9, 0x36, 0xea, 0xb6, 0x78, 0x2a, 0x5d, 0x26, 0x9e, 0x4e, 0xa5, 0xc5, 0x52, 0xf4, 0x04, 0xf2, 0x54, 0x1b, 0x75, 0x5b,
0x50, 0x20, 0xb6, 0xb7, 0x5b, 0x41, 0x40, 0x6a, 0x6f, 0xb7, 0x30, 0x83, 0x40, 0x7b, 0x50, 0x64, 0x28, 0x10, 0xdb, 0x5b, 0xad, 0x20, 0x20, 0xb5, 0xb7, 0x5a, 0x98, 0x41, 0xa0, 0x5d, 0x28, 0xb2,
0xd9, 0x87, 0x2d, 0xc1, 0xfc, 0xe8, 0x4b, 0x9a, 0xf5, 0x3f, 0x98, 0x10, 0xec, 0xcb, 0xc6, 0x2e, 0xec, 0xc3, 0x96, 0x60, 0x7e, 0xf4, 0x25, 0xcd, 0xfa, 0x1f, 0x4c, 0x08, 0xf6, 0x65, 0x63, 0x17,
0x8e, 0xfc, 0x5d, 0x98, 0x8e, 0xac, 0x53, 0xf4, 0x39, 0xdc, 0xd6, 0x0c, 0xa5, 0xdb, 0x50, 0x34, 0x47, 0xfe, 0x3e, 0x4c, 0x47, 0xd6, 0x29, 0xfa, 0x0c, 0x6e, 0x6a, 0x86, 0xd2, 0x6d, 0x28, 0x9a,
0x45, 0xef, 0x10, 0xef, 0xd4, 0xfe, 0x5e, 0xda, 0x0e, 0x63, 0x3b, 0xc4, 0x27, 0x56, 0xb9, 0x7f, 0xa2, 0x77, 0x88, 0x77, 0x6a, 0x7f, 0x37, 0x6d, 0x87, 0xb1, 0x15, 0xe2, 0x13, 0xab, 0xdc, 0xbf,
0xf7, 0x16, 0xa6, 0xe1, 0x08, 0xa2, 0xac, 0x00, 0x04, 0x7d, 0x44, 0x55, 0x28, 0xb2, 0x79, 0xe6, 0x7b, 0x0b, 0xd3, 0x70, 0x04, 0x51, 0x56, 0x00, 0x82, 0x3e, 0xa2, 0x2a, 0x14, 0xd9, 0x3c, 0x73,
0xe6, 0x93, 0xc9, 0xc6, 0x24, 0xb3, 0x90, 0x4d, 0x3f, 0x1b, 0xbb, 0xed, 0x68, 0x0d, 0xc0, 0x26, 0xf3, 0xc9, 0x64, 0x63, 0x92, 0x59, 0xc8, 0xa6, 0x9f, 0x8d, 0xdd, 0x76, 0xb4, 0x0a, 0x60, 0x93,
0x1d, 0x8b, 0x50, 0x1e, 0x0c, 0x72, 0xd1, 0x1b, 0xc8, 0x96, 0x4f, 0xc1, 0x21, 0x2e, 0xf9, 0x9f, 0x8e, 0x45, 0x28, 0x0f, 0x06, 0xb9, 0xe8, 0x0d, 0x64, 0xcb, 0xa7, 0xe0, 0x10, 0x97, 0xfc, 0x2f,
0x25, 0x98, 0xde, 0x25, 0xf4, 0x7b, 0x86, 0x75, 0xd2, 0xe4, 0x97, 0xc5, 0x37, 0x10, 0x6c, 0x71, 0x12, 0x4c, 0xef, 0x10, 0xfa, 0x03, 0xc3, 0x3a, 0x6e, 0xf2, 0xcb, 0xe2, 0x6b, 0x08, 0xb6, 0x38,
0x24, 0xd8, 0xbe, 0x37, 0x64, 0x64, 0x22, 0xd6, 0x65, 0x85, 0x5c, 0xf9, 0xa7, 0x12, 0x2c, 0x45, 0x12, 0x6c, 0xdf, 0x1b, 0x32, 0x32, 0x11, 0xeb, 0xb2, 0x42, 0xae, 0xfc, 0x33, 0x09, 0x96, 0x22,
0x38, 0x1f, 0x05, 0x4b, 0x77, 0x1f, 0x8a, 0xa6, 0x61, 0x51, 0x2f, 0x11, 0x8f, 0xa5, 0x90, 0x85, 0x9c, 0x0f, 0x83, 0xa5, 0xbb, 0x07, 0x45, 0xd3, 0xb0, 0xa8, 0x97, 0x88, 0xc7, 0x52, 0xc8, 0xc2,
0xb1, 0x50, 0x2a, 0x66, 0x30, 0xd8, 0x45, 0x43, 0xdb, 0x90, 0xa3, 0x86, 0x98, 0xaa, 0xe3, 0x61, 0x58, 0x28, 0x15, 0x33, 0x18, 0xec, 0xa2, 0xa1, 0x2d, 0xc8, 0x51, 0x43, 0x4c, 0xd5, 0xf1, 0x30,
0x12, 0x62, 0x35, 0x40, 0x60, 0xe6, 0xda, 0x06, 0xce, 0x51, 0x83, 0x0d, 0x44, 0x25, 0xc2, 0x15, 0x09, 0xb1, 0x1a, 0x20, 0x30, 0x73, 0x6d, 0x03, 0xe7, 0xa8, 0xc1, 0x06, 0xa2, 0x12, 0xe1, 0x0a,
0x0e, 0x3e, 0xd7, 0xd4, 0x03, 0x0c, 0x85, 0x23, 0xcb, 0xe8, 0x5f, 0xb9, 0x0f, 0xfe, 0x40, 0x3c, 0x07, 0x9f, 0x2b, 0xea, 0x01, 0x86, 0xc2, 0xa1, 0x65, 0xf4, 0x2f, 0xdd, 0x07, 0x7f, 0x20, 0x1e,
0xb6, 0x8c, 0x3e, 0xe6, 0x58, 0xf2, 0xcf, 0x24, 0x98, 0x8f, 0x70, 0xde, 0x40, 0xe0, 0xff, 0x34, 0x59, 0x46, 0x1f, 0x73, 0x2c, 0xf9, 0xe7, 0x12, 0xcc, 0x47, 0x38, 0xaf, 0x21, 0xf0, 0x7f, 0x1a,
0x1a, 0xf8, 0xdf, 0x19, 0xa7, 0x23, 0x19, 0xe1, 0xff, 0x67, 0xb9, 0x58, 0x37, 0x58, 0x87, 0xd1, 0x0d, 0xfc, 0xef, 0x8c, 0xd3, 0x91, 0x8c, 0xf0, 0xff, 0xf3, 0x5c, 0xac, 0x1b, 0xac, 0xc3, 0xe8,
0x11, 0x4c, 0x99, 0x46, 0xb7, 0xf5, 0x0a, 0xee, 0xe9, 0x66, 0x59, 0xde, 0x6c, 0x06, 0x58, 0x38, 0x10, 0xa6, 0x4c, 0xa3, 0xdb, 0x7a, 0x05, 0xf7, 0x74, 0xb3, 0x2c, 0x6f, 0x36, 0x03, 0x2c, 0x1c,
0x0c, 0x8c, 0xce, 0x60, 0x5e, 0x57, 0xfa, 0xc4, 0x36, 0x95, 0x0e, 0x69, 0xbd, 0x82, 0x03, 0x92, 0x06, 0x46, 0xa7, 0x30, 0xaf, 0x2b, 0x7d, 0x62, 0x9b, 0x4a, 0x87, 0xb4, 0x5e, 0xc1, 0x01, 0xc9,
0xd7, 0xf8, 0x45, 0x40, 0x1c, 0x11, 0x27, 0x95, 0xa0, 0x1d, 0x28, 0xa9, 0x26, 0xaf, 0xe3, 0x44, 0x6b, 0xfc, 0x22, 0x20, 0x8e, 0x88, 0x93, 0x4a, 0xd0, 0x36, 0x94, 0x54, 0x93, 0xd7, 0x71, 0xa2,
0xed, 0x32, 0x34, 0x8b, 0xba, 0x55, 0x9f, 0x1b, 0xcf, 0xc5, 0x07, 0xf6, 0x30, 0xe4, 0xbf, 0x8d, 0x76, 0x19, 0x9a, 0x45, 0xdd, 0xaa, 0xcf, 0x8d, 0xe7, 0xe2, 0x03, 0x7b, 0x18, 0xf2, 0xdf, 0xc5,
0xcf, 0x06, 0x36, 0xff, 0xd0, 0x13, 0x28, 0xf3, 0x67, 0x17, 0x1d, 0x43, 0xf3, 0x6e, 0x06, 0xd8, 0x67, 0x03, 0x9b, 0x7f, 0xe8, 0x31, 0x94, 0xf9, 0xb3, 0x8b, 0x8e, 0xa1, 0x79, 0x37, 0x03, 0x6c,
0xc8, 0x36, 0x45, 0xdb, 0xcb, 0xf3, 0xea, 0xeb, 0x29, 0x87, 0xbe, 0x1e, 0x19, 0xfb, 0xc2, 0x68, 0x64, 0x9b, 0xa2, 0xed, 0xe5, 0x59, 0xf5, 0xf5, 0x94, 0x43, 0x5f, 0x8f, 0x8c, 0x7d, 0x61, 0xb4,
0x17, 0x0a, 0xe6, 0x97, 0xa9, 0x60, 0x78, 0x92, 0xe3, 0x65, 0x0b, 0xc7, 0x91, 0xff, 0x30, 0x1f, 0x03, 0x05, 0xf3, 0xab, 0x54, 0x30, 0x3c, 0xc9, 0xf1, 0xb2, 0x85, 0xe3, 0xc8, 0x7f, 0x9c, 0x8f,
0x33, 0x97, 0xa7, 0xba, 0xe7, 0xaf, 0x6c, 0xd4, 0xfd, 0x8a, 0x29, 0x73, 0xe4, 0x0f, 0xa1, 0x24, 0x99, 0xcb, 0x53, 0xdd, 0xf3, 0x57, 0x36, 0xea, 0x7e, 0xc5, 0x94, 0x39, 0xf2, 0x07, 0x50, 0x12,
0x32, 0xbc, 0x98, 0xcc, 0x5f, 0x1b, 0x67, 0x32, 0x87, 0xb3, 0x98, 0xbf, 0x61, 0xf1, 0x1a, 0x3d, 0x19, 0x5e, 0x4c, 0xe6, 0x6f, 0x8c, 0x33, 0x99, 0xc3, 0x59, 0xcc, 0xdf, 0xb0, 0x78, 0x8d, 0x1e,
0x60, 0xf4, 0x1d, 0x98, 0x20, 0xae, 0x0a, 0x37, 0x37, 0x7e, 0x38, 0x8e, 0x8a, 0x20, 0xae, 0x06, 0x30, 0xfa, 0x1e, 0x4c, 0x10, 0x57, 0x85, 0x9b, 0x1b, 0x3f, 0x1c, 0x47, 0x45, 0x10, 0x57, 0x83,
0x85, 0xaa, 0x68, 0x13, 0xa8, 0xe8, 0x1b, 0xcc, 0x5f, 0x8c, 0x97, 0x6d, 0x02, 0xed, 0x4a, 0x81, 0x42, 0x55, 0xb4, 0x09, 0x54, 0xf4, 0x2d, 0xe6, 0x2f, 0xc6, 0xcb, 0x36, 0x81, 0x76, 0xa5, 0xc0,
0xa7, 0xab, 0xbb, 0x6e, 0xb7, 0xfd, 0xe6, 0x97, 0xe7, 0x55, 0x08, 0x3e, 0x71, 0x58, 0x42, 0xfe, 0xd3, 0xd5, 0x6d, 0xb7, 0xdb, 0x7e, 0xf3, 0xcb, 0xb3, 0x2a, 0x04, 0x9f, 0x38, 0x2c, 0x21, 0xff,
0x57, 0x09, 0xe6, 0xb9, 0x87, 0x3a, 0x8e, 0xa5, 0xd2, 0xc1, 0x8d, 0x25, 0xa6, 0x83, 0x48, 0x62, 0x9b, 0x04, 0xf3, 0xdc, 0x43, 0x1d, 0xc7, 0x52, 0xe9, 0xe0, 0xda, 0x12, 0xd3, 0x7e, 0x24, 0x31,
0xfa, 0x60, 0x88, 0x5b, 0x12, 0x16, 0x66, 0x26, 0xa7, 0x9f, 0x4b, 0xf0, 0x5a, 0x82, 0xfb, 0x06, 0x7d, 0x30, 0xc4, 0x2d, 0x09, 0x0b, 0x33, 0x93, 0xd3, 0x2f, 0x24, 0x78, 0x2d, 0xc1, 0x7d, 0x0d,
0xe2, 0xe2, 0x7e, 0x34, 0x2e, 0xbe, 0x37, 0x6e, 0x87, 0x32, 0x62, 0xe3, 0x5f, 0xdd, 0x4e, 0xe9, 0x71, 0x71, 0x2f, 0x1a, 0x17, 0xdf, 0x1b, 0xb7, 0x43, 0x19, 0xb1, 0xf1, 0xf3, 0x9b, 0x29, 0xdd,
0x0e, 0x5f, 0x29, 0x6b, 0x00, 0xa6, 0xa5, 0x9e, 0xaa, 0x1a, 0xe9, 0x89, 0xdb, 0xe9, 0x72, 0xe8, 0xe1, 0x2b, 0x65, 0x15, 0xc0, 0xb4, 0xd4, 0x13, 0x55, 0x23, 0x3d, 0x71, 0x3b, 0x5d, 0x0e, 0xbd,
0x0d, 0x94, 0x4f, 0xc1, 0x21, 0x2e, 0x64, 0xc3, 0x62, 0x97, 0x1c, 0x29, 0x8e, 0x46, 0xd7, 0xbb, 0x81, 0xf2, 0x29, 0x38, 0xc4, 0x85, 0x6c, 0x58, 0xec, 0x92, 0x43, 0xc5, 0xd1, 0xe8, 0x5a, 0xb7,
0xdd, 0x0d, 0xc5, 0x54, 0x0e, 0x55, 0x4d, 0xa5, 0xaa, 0x38, 0x2e, 0x98, 0x6c, 0x7c, 0xe2, 0xde, 0xbb, 0xae, 0x98, 0xca, 0x81, 0xaa, 0xa9, 0x54, 0x15, 0xc7, 0x05, 0x93, 0x8d, 0x4f, 0xdc, 0x5b,
0x1a, 0xa7, 0x71, 0xbc, 0x3c, 0xaf, 0xde, 0x4d, 0xbb, 0x1d, 0xf2, 0x58, 0x06, 0x38, 0x03, 0x1a, 0xe3, 0x34, 0x8e, 0x97, 0x67, 0xd5, 0xdb, 0x69, 0xb7, 0x43, 0x1e, 0xcb, 0x00, 0x67, 0x40, 0xa3,
0x0d, 0xa0, 0x62, 0x91, 0xef, 0x3a, 0xaa, 0x45, 0xba, 0x9b, 0x96, 0x61, 0x46, 0xd4, 0xe6, 0xb9, 0x01, 0x54, 0x2c, 0xf2, 0x7d, 0x47, 0xb5, 0x48, 0x77, 0xc3, 0x32, 0xcc, 0x88, 0xda, 0x3c, 0x57,
0xda, 0xdf, 0xbc, 0x38, 0xaf, 0x56, 0x70, 0x06, 0xcf, 0x70, 0xc5, 0x99, 0xf0, 0xe8, 0x39, 0x2c, 0xfb, 0xdb, 0xe7, 0x67, 0xd5, 0x0a, 0xce, 0xe0, 0x19, 0xae, 0x38, 0x13, 0x1e, 0x3d, 0x87, 0x05,
0x28, 0xee, 0xd3, 0xb1, 0x88, 0x56, 0x77, 0x95, 0x7c, 0x74, 0x71, 0x5e, 0x5d, 0x58, 0x4f, 0x92, 0xc5, 0x7d, 0x3a, 0x16, 0xd1, 0xea, 0xae, 0x92, 0x8f, 0xce, 0xcf, 0xaa, 0x0b, 0x6b, 0x49, 0xf2,
0x87, 0x2b, 0x4c, 0x03, 0x45, 0x75, 0x28, 0x9d, 0x1a, 0x9a, 0xd3, 0x27, 0x76, 0xa5, 0xc8, 0xf1, 0x70, 0x85, 0x69, 0xa0, 0xa8, 0x0e, 0xa5, 0x13, 0x43, 0x73, 0xfa, 0xc4, 0xae, 0x14, 0x39, 0x3e,
0x59, 0x22, 0x28, 0x1d, 0xb8, 0x4d, 0x2f, 0xcf, 0xab, 0x13, 0x8f, 0x5b, 0x7c, 0xf5, 0x79, 0x5c, 0x4b, 0x04, 0xa5, 0x7d, 0xb7, 0xe9, 0xe5, 0x59, 0x75, 0xe2, 0x51, 0x8b, 0xaf, 0x3e, 0x8f, 0x8b,
0x6c, 0x43, 0xc9, 0x6a, 0x49, 0xb1, 0xe2, 0xf9, 0x89, 0x71, 0x39, 0x88, 0x5a, 0x4f, 0x03, 0x12, 0x6d, 0x28, 0x59, 0x2d, 0x29, 0x56, 0x3c, 0x3f, 0x31, 0x2e, 0x07, 0x51, 0xeb, 0x49, 0x40, 0xc2,
0x0e, 0xf3, 0xa1, 0x67, 0x30, 0x79, 0x2c, 0x4e, 0x25, 0xec, 0x4a, 0x69, 0xa4, 0x24, 0x1c, 0x39, 0x61, 0x3e, 0xf4, 0x0c, 0x26, 0x8f, 0xc4, 0xa9, 0x84, 0x5d, 0x29, 0x8d, 0x94, 0x84, 0x23, 0xa7,
0xc5, 0x68, 0xcc, 0x0b, 0x15, 0x93, 0x5e, 0xb3, 0x8d, 0x03, 0x44, 0xf4, 0x16, 0x94, 0xf8, 0xc7, 0x18, 0x8d, 0x79, 0xa1, 0x62, 0xd2, 0x6b, 0xb6, 0x71, 0x80, 0x88, 0xde, 0x82, 0x12, 0xff, 0xd8,
0xd6, 0x26, 0x3f, 0x8e, 0x2b, 0x07, 0xb1, 0xed, 0xa9, 0xdb, 0x8c, 0x3d, 0xba, 0xc7, 0xba, 0xd5, 0xdc, 0xe0, 0xc7, 0x71, 0xe5, 0x20, 0xb6, 0x3d, 0x71, 0x9b, 0xb1, 0x47, 0xf7, 0x58, 0x37, 0x9b,
0xdc, 0xe0, 0xc7, 0xc2, 0x31, 0xd6, 0xad, 0xe6, 0x06, 0xf6, 0xe8, 0xe8, 0x73, 0x28, 0xd9, 0x64, 0xeb, 0xfc, 0x58, 0x38, 0xc6, 0xba, 0xd9, 0x5c, 0xc7, 0x1e, 0x1d, 0x7d, 0x06, 0x25, 0x9b, 0x6c,
0x5b, 0xd5, 0x9d, 0xb3, 0x0a, 0x8c, 0x74, 0xa9, 0xdc, 0x7a, 0xc4, 0xb9, 0x63, 0x07, 0x63, 0x81, 0xa9, 0xba, 0x73, 0x5a, 0x81, 0x91, 0x2e, 0x95, 0x5b, 0x0f, 0x39, 0x77, 0xec, 0x60, 0x2c, 0xd0,
0x06, 0x41, 0xc7, 0x1e, 0x2c, 0x3a, 0x86, 0x49, 0xcb, 0xd1, 0xd7, 0xed, 0x7d, 0x9b, 0x58, 0x95, 0x20, 0xe8, 0xd8, 0x83, 0x45, 0x47, 0x30, 0x69, 0x39, 0xfa, 0x9a, 0xbd, 0x67, 0x13, 0xab, 0x32,
0x29, 0xae, 0x63, 0x58, 0x38, 0xc7, 0x1e, 0x7f, 0x5c, 0x8b, 0xef, 0x21, 0x9f, 0x03, 0x07, 0xe0, 0xc5, 0x75, 0x0c, 0x0b, 0xe7, 0xd8, 0xe3, 0x8f, 0x6b, 0xf1, 0x3d, 0xe4, 0x73, 0xe0, 0x00, 0x1c,
0xe8, 0x8f, 0x25, 0x40, 0xb6, 0x63, 0x9a, 0x1a, 0xe9, 0x13, 0x9d, 0x2a, 0x1a, 0x3f, 0x8b, 0xb3, 0xfd, 0xa9, 0x04, 0xc8, 0x76, 0x4c, 0x53, 0x23, 0x7d, 0xa2, 0x53, 0x45, 0xe3, 0x67, 0x71, 0x76,
0x2b, 0xb7, 0xb9, 0xce, 0xdf, 0x1a, 0xd6, 0xaf, 0x84, 0x60, 0x5c, 0xb9, 0x7f, 0xe8, 0x9d, 0x64, 0xe5, 0x26, 0xd7, 0xf9, 0x3b, 0xc3, 0xfa, 0x95, 0x10, 0x8c, 0x2b, 0xf7, 0x0f, 0xbd, 0x93, 0xac,
0xc5, 0x29, 0x7a, 0x99, 0x6b, 0x8f, 0x6c, 0xfe, 0xbb, 0x32, 0x3d, 0x92, 0x6b, 0xd3, 0xcf, 0x1c, 0x38, 0x45, 0x2f, 0x73, 0xed, 0xa1, 0xcd, 0x7f, 0x57, 0xa6, 0x47, 0x72, 0x6d, 0xfa, 0x99, 0x63,
0x03, 0xd7, 0x0a, 0x3a, 0xf6, 0x60, 0xd1, 0x01, 0x2c, 0x5a, 0x44, 0xe9, 0xee, 0xe9, 0xda, 0x00, 0xe0, 0x5a, 0x41, 0xc7, 0x1e, 0x2c, 0xda, 0x87, 0x45, 0x8b, 0x28, 0xdd, 0x5d, 0x5d, 0x1b, 0x60,
0x1b, 0x06, 0x7d, 0xac, 0x6a, 0xc4, 0x1e, 0xd8, 0x94, 0xf4, 0x2b, 0x33, 0x7c, 0xd8, 0xfd, 0x47, 0xc3, 0xa0, 0x8f, 0x54, 0x8d, 0xd8, 0x03, 0x9b, 0x92, 0x7e, 0x65, 0x86, 0x0f, 0xbb, 0xff, 0x28,
0x19, 0x38, 0x95, 0x0b, 0x67, 0x48, 0xa3, 0x3e, 0x54, 0xbd, 0x90, 0xc1, 0xd6, 0x93, 0x1f, 0xb3, 0x03, 0xa7, 0x72, 0xe1, 0x0c, 0x69, 0xd4, 0x87, 0xaa, 0x17, 0x32, 0xd8, 0x7a, 0xf2, 0x63, 0xd6,
0x1e, 0xd9, 0x1d, 0x45, 0x73, 0xef, 0x01, 0x66, 0xb9, 0x82, 0x37, 0x2f, 0xce, 0xab, 0xd5, 0xcd, 0x43, 0xbb, 0xa3, 0x68, 0xee, 0x3d, 0xc0, 0x2c, 0x57, 0xf0, 0xe6, 0xf9, 0x59, 0xb5, 0xba, 0x71,
0xcb, 0x59, 0xf1, 0x30, 0x2c, 0xf4, 0x6d, 0xa8, 0x28, 0x59, 0x7a, 0xe6, 0xb8, 0x9e, 0x55, 0xd1, 0x31, 0x2b, 0x1e, 0x86, 0x85, 0xbe, 0x0b, 0x15, 0x25, 0x4b, 0xcf, 0x1c, 0xd7, 0xf3, 0x06, 0x8b,
0x91, 0x4a, 0xa6, 0x92, 0x4c, 0x04, 0x44, 0x61, 0x4e, 0x89, 0xbe, 0x52, 0xb5, 0x2b, 0xf3, 0x23, 0x43, 0x99, 0x0a, 0x32, 0xa5, 0x11, 0x85, 0x39, 0x25, 0xfa, 0x42, 0xd5, 0xae, 0xcc, 0x8f, 0x74,
0x1d, 0x46, 0xc6, 0x1e, 0xb7, 0x06, 0x07, 0x12, 0x31, 0x82, 0x8d, 0x13, 0x1a, 0xf8, 0x13, 0x0a, 0x10, 0x19, 0x7b, 0xd8, 0x1a, 0x1c, 0x46, 0xc4, 0x08, 0x36, 0x4e, 0x68, 0xe0, 0xcf, 0x27, 0xc4,
0x71, 0xa0, 0x7e, 0x33, 0x6f, 0x16, 0xc7, 0x7b, 0x42, 0x11, 0x98, 0xf6, 0xca, 0x9e, 0x50, 0x84, 0x61, 0xfa, 0xf5, 0xbc, 0x57, 0x1c, 0xef, 0xf9, 0x44, 0x60, 0xda, 0x2b, 0x7b, 0x3e, 0x11, 0x82,
0x20, 0x2f, 0x3f, 0xc2, 0xfb, 0xaf, 0x1c, 0x2c, 0x04, 0xcc, 0x23, 0x3f, 0xa1, 0x48, 0x11, 0xb9, 0xbc, 0xf8, 0xf8, 0xee, 0xbf, 0x73, 0xb0, 0x10, 0x30, 0x8f, 0xfc, 0x7c, 0x22, 0x45, 0xe4, 0xca,
0xb6, 0x27, 0x14, 0xe9, 0x6f, 0x10, 0xf2, 0xd7, 0xfd, 0x06, 0xe1, 0x1a, 0x9e, 0x6e, 0xf0, 0x67, 0x9e, 0x4f, 0xa4, 0xbf, 0x3f, 0xc8, 0x5f, 0xf5, 0xfb, 0x83, 0x2b, 0x78, 0xb6, 0xc1, 0x9f, 0x34,
0x0d, 0x81, 0xeb, 0xfe, 0xef, 0x3d, 0x6b, 0x08, 0x6c, 0xcb, 0x28, 0xb4, 0xfe, 0x3e, 0x17, 0xee, 0x04, 0xae, 0xfb, 0xff, 0xf7, 0xa4, 0x21, 0xb0, 0x2d, 0xa3, 0xc8, 0xfa, 0x87, 0x5c, 0xb8, 0x03,
0xc0, 0xff, 0xfb, 0xbb, 0xf5, 0x2f, 0xff, 0xb0, 0x53, 0xfe, 0x79, 0x1e, 0xe6, 0xe2, 0xab, 0x31, 0xbf, 0xf6, 0xf7, 0xea, 0x5f, 0xfd, 0x51, 0xa7, 0xfc, 0x8b, 0x3c, 0xcc, 0xc5, 0x57, 0x63, 0xe4,
0x72, 0x05, 0x2b, 0x0d, 0xbd, 0x82, 0x6d, 0xc2, 0x9d, 0x23, 0x47, 0xd3, 0x06, 0xdc, 0x0d, 0xa1, 0xfa, 0x55, 0x1a, 0x7a, 0xfd, 0xda, 0x84, 0x5b, 0x87, 0x8e, 0xa6, 0x0d, 0xb8, 0x1b, 0x42, 0x77,
0x7b, 0x58, 0xf7, 0x0a, 0xe5, 0x0d, 0x21, 0x79, 0xe7, 0x71, 0x0a, 0x0f, 0x4e, 0x95, 0xcc, 0xb8, 0xb0, 0xee, 0xf5, 0xc9, 0x1b, 0x42, 0xf2, 0xd6, 0xa3, 0x14, 0x1e, 0x9c, 0x2a, 0x99, 0x71, 0x95,
0x4e, 0xce, 0x5f, 0xe9, 0x3a, 0x39, 0x71, 0xbb, 0x59, 0x18, 0xe3, 0x76, 0x33, 0xf5, 0x6a, 0xb8, 0x9c, 0xbf, 0xd4, 0x55, 0x72, 0xe2, 0x66, 0xb3, 0x30, 0xc6, 0xcd, 0x66, 0xea, 0xb5, 0x70, 0xf1,
0x78, 0x85, 0xab, 0xe1, 0xab, 0xdc, 0xe5, 0xa6, 0x04, 0xb1, 0x61, 0x77, 0xb9, 0xf2, 0x1b, 0xb0, 0x12, 0xd7, 0xc2, 0x97, 0xb9, 0xc7, 0x4d, 0x09, 0x62, 0xc3, 0xee, 0x71, 0xe5, 0x37, 0x60, 0x59,
0x2c, 0xc4, 0x28, 0xbf, 0x66, 0xd5, 0xa9, 0x65, 0x68, 0x1a, 0xb1, 0x36, 0x9d, 0x7e, 0x7f, 0x20, 0x88, 0x51, 0x7e, 0xc5, 0xaa, 0x53, 0xcb, 0xd0, 0x34, 0x62, 0x6d, 0x38, 0xfd, 0xfe, 0x40, 0xfe,
0x7f, 0x1d, 0x66, 0xa2, 0x0f, 0x08, 0xdc, 0x91, 0x76, 0xdf, 0x30, 0x88, 0x8b, 0xac, 0xd0, 0x48, 0x26, 0xcc, 0x44, 0x1f, 0x0f, 0xb8, 0x23, 0xed, 0xbe, 0x5f, 0x10, 0x97, 0x58, 0xa1, 0x91, 0x76,
0xbb, 0xed, 0xd8, 0xe7, 0x90, 0xbf, 0x2f, 0xc1, 0x62, 0xfa, 0x43, 0x41, 0xa4, 0xc1, 0x4c, 0x5f, 0xdb, 0xb1, 0xcf, 0x21, 0xff, 0x50, 0x82, 0xc5, 0xf4, 0x47, 0x82, 0x48, 0x83, 0x99, 0xbe, 0x72,
0x39, 0x0b, 0xbf, 0xaa, 0x94, 0xae, 0x78, 0xc4, 0xc0, 0x6f, 0x8e, 0x77, 0x22, 0x58, 0x38, 0x86, 0x1a, 0x7e, 0x51, 0x29, 0x5d, 0xf2, 0x78, 0x81, 0xdf, 0x1a, 0x6f, 0x47, 0xb0, 0x70, 0x0c, 0x5b,
0x2d, 0xff, 0x4a, 0x82, 0xa5, 0x8c, 0x3b, 0xdb, 0x9b, 0xb5, 0x04, 0x7d, 0x06, 0xe5, 0xbe, 0x72, 0xfe, 0x52, 0x82, 0xa5, 0x8c, 0xfb, 0xda, 0xeb, 0xb5, 0x04, 0x3d, 0x85, 0x72, 0x5f, 0x39, 0x6d,
0xd6, 0x72, 0xac, 0x1e, 0xb9, 0xf2, 0xa1, 0x0a, 0x8f, 0x18, 0x3b, 0x02, 0x05, 0xfb, 0x78, 0xf2, 0x39, 0x56, 0x8f, 0x5c, 0xfa, 0x40, 0x85, 0x47, 0x8c, 0x6d, 0x81, 0x82, 0x7d, 0x3c, 0xf9, 0xc7,
0x8f, 0x24, 0xa8, 0x64, 0x95, 0xb7, 0xe8, 0x61, 0xe4, 0x76, 0xf9, 0xab, 0xb1, 0xdb, 0xe5, 0xf9, 0x12, 0x54, 0xb2, 0x4a, 0x5b, 0xf4, 0x20, 0x72, 0xb3, 0xfc, 0xf5, 0xd8, 0xcd, 0xf2, 0x7c, 0x42,
0x84, 0xdc, 0x35, 0xdd, 0x2d, 0xff, 0x9d, 0x04, 0x8b, 0xe9, 0x65, 0x3e, 0x7a, 0x3f, 0x62, 0x61, 0xee, 0x8a, 0xee, 0x95, 0xff, 0x5e, 0x82, 0xc5, 0xf4, 0x12, 0x1f, 0xbd, 0x1f, 0xb1, 0xb0, 0x1a,
0x35, 0x66, 0xe1, 0x6c, 0x4c, 0x4a, 0xd8, 0xf7, 0x1d, 0x98, 0x11, 0x9b, 0x01, 0x01, 0x23, 0xbc, 0xb3, 0x70, 0x36, 0x26, 0x25, 0xec, 0xfb, 0x1e, 0xcc, 0x88, 0x8d, 0x80, 0x80, 0x11, 0x5e, 0x95,
0x2a, 0xa7, 0xc5, 0x4a, 0x01, 0xe1, 0x15, 0xbf, 0x7c, 0xbc, 0xa2, 0x6d, 0x38, 0x86, 0x26, 0xff, 0xd3, 0x62, 0xa5, 0x80, 0xf0, 0x0a, 0x5f, 0x3e, 0x5e, 0xd1, 0x36, 0x1c, 0x43, 0x93, 0xff, 0x24,
0x51, 0x0e, 0x8a, 0xad, 0x8e, 0xa2, 0x91, 0x1b, 0x28, 0xb3, 0xbe, 0x19, 0x29, 0xb3, 0x86, 0xfd, 0x07, 0xc5, 0x56, 0x47, 0xd1, 0xc8, 0x35, 0x94, 0x59, 0xdf, 0x8e, 0x94, 0x59, 0xc3, 0xfe, 0xfd,
0x03, 0x82, 0x5b, 0x95, 0x59, 0x61, 0xe1, 0x58, 0x85, 0xf5, 0xf6, 0x48, 0x68, 0x97, 0x17, 0x57, 0xc0, 0xad, 0xca, 0xac, 0xb0, 0x70, 0xac, 0xc2, 0x7a, 0x7b, 0x24, 0xb4, 0x8b, 0x8b, 0xab, 0xdf,
0xbf, 0x01, 0x93, 0xbe, 0xd2, 0xf1, 0x62, 0xbe, 0xfc, 0xd7, 0x39, 0x98, 0x0a, 0xa9, 0x18, 0x33, 0x82, 0x49, 0x5f, 0xe9, 0x78, 0x31, 0x5f, 0xfe, 0x9b, 0x1c, 0x4c, 0x85, 0x54, 0x8c, 0x99, 0x31,
0x63, 0x1c, 0x45, 0x32, 0xed, 0x28, 0xff, 0x85, 0x0a, 0xe9, 0xaa, 0x79, 0xb9, 0xd5, 0x7d, 0x28, 0x0e, 0x23, 0x99, 0x76, 0x94, 0xff, 0x41, 0x85, 0x74, 0xd5, 0xbc, 0xdc, 0xea, 0x3e, 0x12, 0x0c,
0x18, 0x3c, 0x0d, 0x4b, 0xa6, 0xdc, 0xaf, 0xc3, 0x0c, 0xe5, 0xff, 0x15, 0xf2, 0x8f, 0x22, 0xf3, 0x9e, 0x85, 0x25, 0x53, 0xee, 0x37, 0x61, 0x86, 0xf2, 0xff, 0x09, 0xf9, 0xc7, 0x90, 0x79, 0x3e,
0x7c, 0x2e, 0xfa, 0xcf, 0x4b, 0xdb, 0x11, 0x2a, 0x8e, 0x71, 0x2f, 0x7f, 0x02, 0xd3, 0x11, 0x65, 0x17, 0xfd, 0xa7, 0xa5, 0xed, 0x08, 0x15, 0xc7, 0xb8, 0x97, 0x3f, 0x81, 0xe9, 0x88, 0xb2, 0xb1,
0x63, 0xbd, 0xf3, 0xfb, 0x07, 0x09, 0xbe, 0x3a, 0x74, 0xa3, 0x88, 0x1a, 0x91, 0x45, 0x52, 0x8b, 0xde, 0xf8, 0xfd, 0xa3, 0x04, 0x5f, 0x1f, 0xba, 0x49, 0x44, 0x8d, 0xc8, 0x22, 0xa9, 0xc5, 0x16,
0x2d, 0x92, 0x95, 0x6c, 0x80, 0x6b, 0x7c, 0x2f, 0xf2, 0xfd, 0x1c, 0xa0, 0xf6, 0xb1, 0x6a, 0x75, 0xc9, 0x4a, 0x36, 0xc0, 0x15, 0xbe, 0x15, 0xf9, 0x61, 0x0e, 0x50, 0xfb, 0x48, 0xb5, 0xba, 0x4d,
0x9b, 0x8a, 0x45, 0x07, 0x58, 0xfc, 0xe1, 0xeb, 0x06, 0x16, 0xcc, 0x43, 0x98, 0xea, 0x12, 0xbb, 0xc5, 0xa2, 0x03, 0x2c, 0xfe, 0xec, 0x75, 0x0d, 0x0b, 0xe6, 0x01, 0x4c, 0x75, 0x89, 0xdd, 0xb1,
0x63, 0xa9, 0xdc, 0x39, 0xa2, 0x3a, 0xf7, 0x0f, 0x53, 0x36, 0x03, 0x12, 0x0e, 0xf3, 0xa1, 0x6f, 0x54, 0xee, 0x1c, 0x51, 0x9d, 0xfb, 0x07, 0x29, 0x1b, 0x01, 0x09, 0x87, 0xf9, 0xd0, 0x77, 0xa0,
0x41, 0xf9, 0xd4, 0xfd, 0x23, 0xa2, 0x77, 0x40, 0x3b, 0xac, 0x90, 0x0c, 0xfe, 0xba, 0x18, 0xcc, 0x7c, 0xe2, 0xfe, 0x09, 0xd1, 0x3b, 0x9c, 0x1d, 0x56, 0x48, 0x06, 0x7f, 0x5b, 0x0c, 0xe6, 0x8f,
0x1f, 0xd1, 0x60, 0x63, 0x1f, 0x4c, 0xfe, 0xa1, 0x04, 0x8b, 0x49, 0x47, 0x6c, 0x32, 0x53, 0xaf, 0x68, 0xb0, 0xb1, 0x0f, 0x26, 0x7f, 0x2e, 0xc1, 0x62, 0xd2, 0x11, 0x1b, 0xcc, 0xd4, 0xab, 0x77,
0xdf, 0x19, 0x6f, 0x40, 0x81, 0xa3, 0x33, 0x2f, 0xdc, 0x76, 0x0f, 0xde, 0x99, 0x66, 0xcc, 0x5b, 0xc6, 0x1b, 0x50, 0xe0, 0xe8, 0xcc, 0x0b, 0x37, 0xdd, 0x43, 0x77, 0xa6, 0x19, 0xf3, 0x56, 0xf9,
0xe5, 0x7f, 0x97, 0x60, 0x39, 0xdd, 0xb4, 0x1b, 0x28, 0xdb, 0x3f, 0x8b, 0x96, 0xed, 0xc3, 0xce, 0x3f, 0x24, 0x58, 0x4e, 0x37, 0xed, 0x1a, 0xca, 0xf6, 0xa7, 0xd1, 0xb2, 0x7d, 0xd8, 0x39, 0x45,
0x2a, 0xd2, 0xed, 0xcc, 0x28, 0xe1, 0xff, 0x2d, 0xd5, 0xe7, 0x37, 0xd0, 0xa9, 0x83, 0x68, 0xa7, 0xba, 0x9d, 0x19, 0x25, 0xfc, 0xbf, 0xa7, 0xfa, 0xfc, 0x1a, 0x3a, 0xb5, 0x1f, 0xed, 0xd4, 0xfd,
0x1e, 0x8c, 0xdd, 0xa9, 0xf4, 0x0e, 0x35, 0xde, 0x7d, 0xf1, 0xc5, 0xca, 0xad, 0x5f, 0x7c, 0xb1, 0xb1, 0x3b, 0x95, 0xde, 0xa1, 0xc6, 0xbb, 0x2f, 0xbe, 0x58, 0xb9, 0xf1, 0xcb, 0x2f, 0x56, 0x6e,
0x72, 0xeb, 0x97, 0x5f, 0xac, 0xdc, 0xfa, 0x83, 0x8b, 0x15, 0xe9, 0xc5, 0xc5, 0x8a, 0xf4, 0x8b, 0xfc, 0xea, 0x8b, 0x95, 0x1b, 0x7f, 0x74, 0xbe, 0x22, 0xbd, 0x38, 0x5f, 0x91, 0x7e, 0x79, 0xbe,
0x8b, 0x15, 0xe9, 0x97, 0x17, 0x2b, 0xd2, 0x7f, 0x5e, 0xac, 0x48, 0x7f, 0xf6, 0xab, 0x95, 0x5b, 0x22, 0xfd, 0xea, 0x7c, 0x45, 0xfa, 0xaf, 0xf3, 0x15, 0xe9, 0x2f, 0xbe, 0x5c, 0xb9, 0xf1, 0xb4,
0x9f, 0x95, 0x04, 0xee, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x57, 0xe8, 0x50, 0x1c, 0x48, 0x3d, 0x24, 0x70, 0xff, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x55, 0xc4, 0x0b, 0x53, 0x44, 0x3d, 0x00, 0x00,
0x00, 0x00,
} }

View file

@ -815,7 +815,7 @@ message PodSecurityPolicySpec {
optional bool defaultAllowPrivilegeEscalation = 15; optional bool defaultAllowPrivilegeEscalation = 15;
// AllowPrivilegeEscalation determines if a pod can request to allow // AllowPrivilegeEscalation determines if a pod can request to allow
// privilege escalation. // privilege escalation. If unspecified, defaults to true.
// +optional // +optional
optional bool allowPrivilegeEscalation = 16; optional bool allowPrivilegeEscalation = 16;
@ -826,7 +826,7 @@ message PodSecurityPolicySpec {
// DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for // DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for
// more information. // more information.
// ReplicaSet represents the configuration of a ReplicaSet. // ReplicaSet ensures that a specified number of pod replicas are running at any given time.
message ReplicaSet { message ReplicaSet {
// If the Labels of a ReplicaSet are empty, they are defaulted to // If the Labels of a ReplicaSet are empty, they are defaulted to
// be the same as the Pod(s) that the ReplicaSet manages. // be the same as the Pod(s) that the ReplicaSet manages.
@ -1016,7 +1016,7 @@ message SELinuxStrategyOptions {
optional string rule = 1; optional string rule = 1;
// seLinuxOptions required to run as; required for MustRunAs // seLinuxOptions required to run as; required for MustRunAs
// More info: https://git.k8s.io/community/contributors/design-proposals/security_context.md // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
// +optional // +optional
optional k8s.io.api.core.v1.SELinuxOptions seLinuxOptions = 2; optional k8s.io.api.core.v1.SELinuxOptions seLinuxOptions = 2;
} }

View file

@ -778,7 +778,7 @@ type IngressBackend struct {
// DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for // DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for
// more information. // more information.
// ReplicaSet represents the configuration of a ReplicaSet. // ReplicaSet ensures that a specified number of pod replicas are running at any given time.
type ReplicaSet struct { type ReplicaSet struct {
metav1.TypeMeta `json:",inline"` metav1.TypeMeta `json:",inline"`
@ -975,9 +975,9 @@ type PodSecurityPolicySpec struct {
// +optional // +optional
DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty" protobuf:"varint,15,opt,name=defaultAllowPrivilegeEscalation"` DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty" protobuf:"varint,15,opt,name=defaultAllowPrivilegeEscalation"`
// AllowPrivilegeEscalation determines if a pod can request to allow // AllowPrivilegeEscalation determines if a pod can request to allow
// privilege escalation. // privilege escalation. If unspecified, defaults to true.
// +optional // +optional
AllowPrivilegeEscalation bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,16,opt,name=allowPrivilegeEscalation"` AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,16,opt,name=allowPrivilegeEscalation"`
// is a white list of allowed host paths. Empty indicates that all host paths may be used. // is a white list of allowed host paths. Empty indicates that all host paths may be used.
// +optional // +optional
AllowedHostPaths []AllowedHostPath `json:"allowedHostPaths,omitempty" protobuf:"bytes,17,rep,name=allowedHostPaths"` AllowedHostPaths []AllowedHostPath `json:"allowedHostPaths,omitempty" protobuf:"bytes,17,rep,name=allowedHostPaths"`
@ -1038,7 +1038,7 @@ type SELinuxStrategyOptions struct {
// type is the strategy that will dictate the allowable labels that may be set. // type is the strategy that will dictate the allowable labels that may be set.
Rule SELinuxStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=SELinuxStrategy"` Rule SELinuxStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=SELinuxStrategy"`
// seLinuxOptions required to run as; required for MustRunAs // seLinuxOptions required to run as; required for MustRunAs
// More info: https://git.k8s.io/community/contributors/design-proposals/security_context.md // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
// +optional // +optional
SELinuxOptions *v1.SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,2,opt,name=seLinuxOptions"` SELinuxOptions *v1.SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,2,opt,name=seLinuxOptions"`
} }

View file

@ -458,7 +458,7 @@ var map_PodSecurityPolicySpec = map[string]string{
"fsGroup": "FSGroup is the strategy that will dictate what fs group is used by the SecurityContext.", "fsGroup": "FSGroup is the strategy that will dictate what fs group is used by the SecurityContext.",
"readOnlyRootFilesystem": "ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", "readOnlyRootFilesystem": "ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.",
"defaultAllowPrivilegeEscalation": "DefaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.", "defaultAllowPrivilegeEscalation": "DefaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.",
"allowPrivilegeEscalation": "AllowPrivilegeEscalation determines if a pod can request to allow privilege escalation.", "allowPrivilegeEscalation": "AllowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.",
"allowedHostPaths": "is a white list of allowed host paths. Empty indicates that all host paths may be used.", "allowedHostPaths": "is a white list of allowed host paths. Empty indicates that all host paths may be used.",
} }
@ -467,7 +467,7 @@ func (PodSecurityPolicySpec) SwaggerDoc() map[string]string {
} }
var map_ReplicaSet = map[string]string{ var map_ReplicaSet = map[string]string{
"": "DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for more information. ReplicaSet represents the configuration of a ReplicaSet.", "": "DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for more information. ReplicaSet ensures that a specified number of pod replicas are running at any given time.",
"metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
"spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
"status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
@ -575,7 +575,7 @@ func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string {
var map_SELinuxStrategyOptions = map[string]string{ var map_SELinuxStrategyOptions = map[string]string{
"": "SELinux Strategy Options defines the strategy type and any options used to create the strategy.", "": "SELinux Strategy Options defines the strategy type and any options used to create the strategy.",
"rule": "type is the strategy that will dictate the allowable labels that may be set.", "rule": "type is the strategy that will dictate the allowable labels that may be set.",
"seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs More info: https://git.k8s.io/community/contributors/design-proposals/security_context.md", "seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/",
} }
func (SELinuxStrategyOptions) SwaggerDoc() map[string]string { func (SELinuxStrategyOptions) SwaggerDoc() map[string]string {

View file

@ -1431,6 +1431,15 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) {
**out = **in **out = **in
} }
} }
if in.AllowPrivilegeEscalation != nil {
in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation
if *in == nil {
*out = nil
} else {
*out = new(bool)
**out = **in
}
}
if in.AllowedHostPaths != nil { if in.AllowedHostPaths != nil {
in, out := &in.AllowedHostPaths, &out.AllowedHostPaths in, out := &in.AllowedHostPaths, &out.AllowedHostPaths
*out = make([]AllowedHostPath, len(*in)) *out = make([]AllowedHostPath, len(*in))

View file

@ -2504,52 +2504,52 @@ func init() {
} }
var fileDescriptorGenerated = []byte{ var fileDescriptorGenerated = []byte{
// 751 bytes of a gzipped FileDescriptorProto // 743 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x94, 0xcd, 0x6e, 0xd3, 0x4a, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x94, 0x4f, 0x6b, 0x13, 0x4f,
0x14, 0xc7, 0xe3, 0x7c, 0x28, 0xf1, 0xe4, 0x46, 0xb9, 0xf5, 0x95, 0xee, 0xb5, 0x2a, 0x5d, 0x27, 0x18, 0xc7, 0x33, 0xf9, 0x43, 0xb3, 0x93, 0x5f, 0xc8, 0xaf, 0x2b, 0xc8, 0x52, 0x61, 0x13, 0x72,
0x0a, 0x2c, 0x2a, 0x95, 0xda, 0xb4, 0x20, 0x60, 0x83, 0x04, 0x66, 0x01, 0x55, 0x4b, 0xa8, 0x06, 0x90, 0x80, 0xba, 0x6b, 0xaa, 0xa8, 0x20, 0x3d, 0xb8, 0x15, 0xa5, 0xb4, 0xd6, 0x32, 0xa2, 0x07,
0xc1, 0x02, 0xb1, 0x60, 0xe2, 0x4c, 0xd3, 0x21, 0xf1, 0x87, 0x66, 0xc6, 0x91, 0x2a, 0x36, 0x3c, 0xf1, 0xe0, 0x66, 0x33, 0x4d, 0xc7, 0x64, 0xff, 0x30, 0x33, 0x1b, 0x28, 0x5e, 0xc4, 0x9b, 0x37,
0x00, 0x0b, 0x24, 0x5e, 0x83, 0x15, 0x3b, 0x78, 0x82, 0x2c, 0xbb, 0xec, 0x2a, 0xa2, 0xe6, 0x41, 0xdf, 0x85, 0x17, 0xbd, 0xe9, 0x2b, 0xf0, 0xd2, 0x63, 0x8f, 0x3d, 0x05, 0xbb, 0xbe, 0x10, 0x65,
0x40, 0x33, 0xb6, 0xe3, 0xa4, 0x69, 0xda, 0xac, 0x22, 0x21, 0xb1, 0x4a, 0xe6, 0x9c, 0xdf, 0xf9, 0x66, 0x77, 0xb3, 0x49, 0x93, 0xd8, 0x9e, 0x02, 0xe2, 0x29, 0x99, 0xe7, 0xf9, 0x7c, 0x9f, 0xf9,
0x9f, 0x0f, 0xcf, 0x1c, 0xf0, 0xa0, 0x7f, 0x8f, 0x99, 0xc4, 0xb7, 0xfa, 0x61, 0x07, 0x53, 0x0f, 0xce, 0xb3, 0x33, 0x0f, 0xbc, 0xdf, 0xbf, 0xc7, 0x0c, 0xe2, 0x9b, 0xfd, 0xb0, 0x83, 0xa9, 0x87,
0x73, 0xcc, 0xac, 0x21, 0xf6, 0xba, 0x3e, 0xb5, 0x12, 0x07, 0x0a, 0x88, 0x45, 0x3b, 0xc8, 0xb1, 0x39, 0x66, 0xe6, 0x10, 0x7b, 0x5d, 0x9f, 0x9a, 0x49, 0xc2, 0x0e, 0x88, 0x49, 0x3b, 0xb6, 0x63,
0x86, 0xdb, 0x1d, 0xcc, 0xd1, 0xb6, 0xd5, 0xc3, 0x1e, 0xa6, 0x88, 0xe3, 0xae, 0x19, 0x50, 0x9f, 0x0e, 0xdb, 0x66, 0x0f, 0x7b, 0x98, 0xda, 0x1c, 0x77, 0x8d, 0x80, 0xfa, 0xdc, 0x57, 0xd5, 0x98,
0xfb, 0xda, 0x7f, 0x31, 0x68, 0xa2, 0x80, 0x98, 0x02, 0x34, 0x13, 0x70, 0x7d, 0xab, 0x47, 0xf8, 0x31, 0xec, 0x80, 0x18, 0x82, 0x31, 0x86, 0xed, 0xb5, 0x1b, 0x3d, 0xc2, 0x0f, 0xc2, 0x8e, 0xe1,
0x51, 0xd8, 0x31, 0x1d, 0xdf, 0xb5, 0x7a, 0x7e, 0xcf, 0xb7, 0x24, 0xdf, 0x09, 0x0f, 0xe5, 0x49, 0xf8, 0xae, 0xd9, 0xf3, 0x7b, 0xbe, 0x29, 0xd1, 0x4e, 0xb8, 0x2f, 0x57, 0x72, 0x21, 0xff, 0xc5,
0x1e, 0xe4, 0xbf, 0x58, 0x67, 0xfd, 0x76, 0x96, 0xd0, 0x45, 0xce, 0x11, 0xf1, 0x30, 0x3d, 0xb6, 0x25, 0xd6, 0x6e, 0x67, 0xdb, 0xb8, 0xb6, 0x73, 0x40, 0x3c, 0x4c, 0x0f, 0xcd, 0xa0, 0xdf, 0x13,
0x82, 0x7e, 0x4f, 0x18, 0x98, 0xe5, 0x62, 0x8e, 0xac, 0xe1, 0x5c, 0xf6, 0x75, 0x6b, 0x51, 0x14, 0x01, 0x66, 0xba, 0x98, 0xdb, 0x73, 0x36, 0x5e, 0x33, 0x17, 0xa9, 0x68, 0xe8, 0x71, 0xe2, 0xe2,
0x0d, 0x3d, 0x4e, 0x5c, 0x3c, 0x17, 0x70, 0xe7, 0xaa, 0x00, 0xe6, 0x1c, 0x61, 0x17, 0xcd, 0xc5, 0x19, 0xc1, 0x9d, 0xf3, 0x04, 0xcc, 0x39, 0xc0, 0xae, 0x3d, 0xa3, 0xbb, 0xb5, 0x48, 0x17, 0x72,
0xdd, 0x5a, 0x14, 0x17, 0x72, 0x32, 0xb0, 0x88, 0xc7, 0x19, 0xa7, 0xe7, 0x83, 0x5a, 0x5f, 0x15, 0x32, 0x30, 0x89, 0xc7, 0x19, 0xa7, 0x67, 0x45, 0xcd, 0xaf, 0x00, 0x56, 0x36, 0x07, 0x21, 0xe3,
0x50, 0x7d, 0x34, 0x08, 0x19, 0xc7, 0x14, 0xfa, 0x03, 0xac, 0xbd, 0x01, 0x15, 0xd1, 0x48, 0x17, 0x98, 0x22, 0x7f, 0x80, 0xd5, 0xd7, 0xb0, 0x2c, 0x0e, 0xd2, 0xb5, 0xb9, 0xad, 0x81, 0x06, 0x68,
0x71, 0xa4, 0x2b, 0x4d, 0x65, 0xa3, 0xba, 0x73, 0xd3, 0xcc, 0xc6, 0x37, 0xd1, 0x35, 0x83, 0x7e, 0x55, 0xd6, 0x6f, 0x1a, 0x59, 0xe7, 0xc6, 0x75, 0x8d, 0xa0, 0xdf, 0x13, 0x01, 0x66, 0x08, 0xda,
0x4f, 0x18, 0x98, 0x29, 0x68, 0x73, 0xb8, 0x6d, 0x3e, 0xeb, 0xbc, 0xc5, 0x0e, 0x7f, 0x8a, 0x39, 0x18, 0xb6, 0x8d, 0xa7, 0x9d, 0x37, 0xd8, 0xe1, 0x4f, 0x30, 0xb7, 0x2d, 0xf5, 0x68, 0x54, 0xcf,
0xb2, 0xb5, 0xd1, 0xb8, 0x91, 0x8b, 0xc6, 0x0d, 0x90, 0xd9, 0xe0, 0x44, 0x55, 0x7b, 0x02, 0x4a, 0x45, 0xa3, 0x3a, 0xcc, 0x62, 0x68, 0x5c, 0x55, 0xdd, 0x84, 0x25, 0x1a, 0x0e, 0x30, 0xd3, 0xf2,
0x34, 0x1c, 0x60, 0xa6, 0xe7, 0x9b, 0x85, 0x8d, 0xea, 0xce, 0x35, 0x73, 0xc1, 0xd7, 0x31, 0x0f, 0x8d, 0x42, 0xab, 0xb2, 0xae, 0x1b, 0xb3, 0x1f, 0xc6, 0xd8, 0xf3, 0x07, 0xc4, 0x39, 0x44, 0xe1,
0xfc, 0x01, 0x71, 0x8e, 0x61, 0x38, 0xc0, 0x76, 0x2d, 0x51, 0x2c, 0x89, 0x13, 0x83, 0xb1, 0x40, 0x00, 0x5b, 0xd5, 0xa4, 0x58, 0x49, 0xac, 0x18, 0x8a, 0xb5, 0xcd, 0x0f, 0x79, 0xa8, 0x4e, 0xd8,
0xeb, 0x53, 0x1e, 0x68, 0x53, 0xb5, 0xdb, 0xc4, 0xeb, 0x12, 0xaf, 0xb7, 0x82, 0x16, 0xda, 0xa0, 0xb6, 0x88, 0xd7, 0x25, 0x5e, 0x6f, 0x09, 0xee, 0xb7, 0x60, 0x99, 0x85, 0x32, 0x91, 0x1e, 0xe0,
0xc2, 0x42, 0xe9, 0x48, 0xbb, 0x68, 0x2e, 0xec, 0xe2, 0x79, 0x0c, 0xda, 0x7f, 0x27, 0x8a, 0x95, 0xca, 0xbc, 0x03, 0x3c, 0x8b, 0x19, 0xeb, 0xff, 0xa4, 0x58, 0x39, 0x09, 0x30, 0x34, 0x96, 0xab,
0xc4, 0xc0, 0xe0, 0x44, 0x43, 0xdb, 0x03, 0x65, 0xea, 0x0f, 0x30, 0xc4, 0x87, 0x7a, 0x41, 0x16, 0x8f, 0xe0, 0x0a, 0xf5, 0x07, 0x18, 0xe1, 0x7d, 0xad, 0x20, 0xbd, 0xce, 0xad, 0x84, 0x62, 0xc4,
0xbc, 0x58, 0x0e, 0xc6, 0x9c, 0x5d, 0x4f, 0xe4, 0xca, 0x89, 0x01, 0xa6, 0x0a, 0xad, 0x91, 0x02, 0xaa, 0x25, 0x95, 0x56, 0x92, 0x00, 0x4a, 0xc5, 0xcd, 0xef, 0x00, 0x5e, 0x9e, 0xed, 0xc5, 0x0e,
0xfe, 0x9d, 0x9f, 0xca, 0x3e, 0x61, 0x5c, 0x7b, 0x3d, 0x37, 0x19, 0x73, 0xb9, 0xc9, 0x88, 0x68, 0x61, 0x5c, 0x7d, 0x35, 0xd3, 0x0f, 0xe3, 0x62, 0xfd, 0x10, 0x6a, 0xd9, 0x8d, 0xf1, 0x01, 0xd2,
0x39, 0x97, 0x49, 0x17, 0xa9, 0x65, 0x6a, 0x2a, 0x07, 0xa0, 0x44, 0x38, 0x76, 0xd3, 0x91, 0x6c, 0xc8, 0x44, 0x2f, 0xb6, 0x61, 0x89, 0x70, 0xec, 0xa6, 0x8d, 0xb8, 0x3a, 0xcf, 0xfe, 0xac, 0xb1,
0x2e, 0xec, 0x61, 0xbe, 0xba, 0xec, 0x03, 0xef, 0x0a, 0x05, 0x18, 0x0b, 0xb5, 0xbe, 0x29, 0xa0, 0xec, 0x8b, 0x6e, 0x09, 0x31, 0x8a, 0x6b, 0x34, 0xbf, 0x01, 0x58, 0x9b, 0x80, 0x97, 0x60, 0xff,
0x3e, 0x05, 0xaf, 0xa0, 0x87, 0xdd, 0xd9, 0x1e, 0xae, 0x2f, 0xd5, 0xc3, 0xc5, 0xc5, 0xff, 0x54, 0xe1, 0xb4, 0xfd, 0xfa, 0x79, 0xf6, 0xe7, 0xfb, 0xfe, 0x05, 0x20, 0xcc, 0xae, 0xab, 0x5a, 0x87,
0x00, 0xc8, 0xae, 0xb0, 0xd6, 0x00, 0xa5, 0x21, 0xa6, 0x1d, 0xa6, 0x2b, 0xcd, 0xc2, 0x86, 0x6a, 0xa5, 0x21, 0xa6, 0x1d, 0xa6, 0x81, 0x46, 0xa1, 0xa5, 0x58, 0x8a, 0xe0, 0x5f, 0x88, 0x00, 0x8a,
0xab, 0x82, 0x7f, 0x29, 0x0c, 0x30, 0xb6, 0x6b, 0x9b, 0x40, 0x45, 0x01, 0x79, 0x4c, 0xfd, 0x30, 0xe3, 0xea, 0x35, 0xa8, 0xd8, 0x01, 0x79, 0x4c, 0xfd, 0x30, 0x88, 0x77, 0x56, 0xac, 0x6a, 0x34,
0x88, 0xd3, 0xab, 0x76, 0x2d, 0x1a, 0x37, 0xd4, 0x87, 0x07, 0xbb, 0xb1, 0x11, 0x66, 0x7e, 0x01, 0xaa, 0x2b, 0x0f, 0xf6, 0xb6, 0xe2, 0x20, 0xca, 0xf2, 0x02, 0xa6, 0x98, 0xf9, 0x21, 0x75, 0x30,
0x53, 0xcc, 0xfc, 0x90, 0x3a, 0x98, 0xe9, 0x85, 0x0c, 0x86, 0xa9, 0x11, 0x66, 0x7e, 0xed, 0x2e, 0xd3, 0x0a, 0x19, 0x8c, 0xd2, 0x20, 0xca, 0xf2, 0xea, 0x5d, 0x58, 0x4d, 0x17, 0xbb, 0xb6, 0x8b,
0xa8, 0xa5, 0x87, 0x36, 0x72, 0x31, 0xd3, 0x8b, 0x32, 0x60, 0x2d, 0x1a, 0x37, 0x6a, 0x70, 0xda, 0x99, 0x56, 0x94, 0x82, 0xd5, 0x68, 0x54, 0xaf, 0xa2, 0xc9, 0x04, 0x9a, 0xe6, 0xd4, 0x0d, 0x58,
0x01, 0x67, 0x39, 0xed, 0x3e, 0xa8, 0x7b, 0xbe, 0x97, 0x22, 0x2f, 0xe0, 0x3e, 0xd3, 0x4b, 0x32, 0xf3, 0x7c, 0x2f, 0x45, 0x9e, 0xa3, 0x1d, 0xa6, 0x95, 0xa4, 0xf4, 0x52, 0x34, 0xaa, 0xd7, 0x76,
0xf4, 0x9f, 0x68, 0xdc, 0xa8, 0xb7, 0x67, 0x5d, 0xf0, 0x3c, 0xdb, 0xfa, 0xa2, 0x80, 0xe2, 0x6f, 0xa7, 0x53, 0xe8, 0x2c, 0xdb, 0xfc, 0x02, 0x60, 0xf1, 0x6f, 0x9a, 0x1d, 0xef, 0xf3, 0xb0, 0xf2,
0xb7, 0x54, 0x3e, 0xe4, 0x41, 0xf5, 0xcf, 0x36, 0x99, 0x6c, 0x13, 0xf1, 0x04, 0x57, 0xbb, 0x46, 0xcf, 0x0f, 0x0d, 0xf1, 0xdc, 0x96, 0x3b, 0x2d, 0x2e, 0xf2, 0xdc, 0xce, 0x1f, 0x13, 0x9f, 0x00,
0x96, 0x7e, 0x82, 0x57, 0xef, 0x8f, 0xcf, 0x0a, 0xa8, 0xac, 0x68, 0x71, 0xd8, 0xb3, 0x55, 0xff, 0x2c, 0x2f, 0x69, 0x3e, 0x6c, 0x4c, 0x1b, 0xd6, 0x16, 0x1a, 0x9e, 0xef, 0xf4, 0x2d, 0x4c, 0xbb,
0x7f, 0x79, 0xd5, 0x17, 0x97, 0xfb, 0x0e, 0xa4, 0xf3, 0xd7, 0x6e, 0x80, 0x4a, 0xfa, 0xd8, 0x65, 0xae, 0x5e, 0x87, 0xe5, 0xf4, 0x4d, 0x4b, 0x9f, 0x4a, 0xb6, 0x6f, 0xfa, 0xec, 0xd1, 0x98, 0x50,
0xb1, 0x6a, 0x96, 0x3c, 0xdd, 0x07, 0x70, 0x42, 0x68, 0x4d, 0x50, 0xec, 0x13, 0xaf, 0xab, 0xe7, 0x1b, 0xb0, 0xd8, 0x27, 0x5e, 0x57, 0xcb, 0x4b, 0xf2, 0xbf, 0x84, 0x2c, 0x6e, 0x13, 0xaf, 0x8b,
0x25, 0xf9, 0x57, 0x42, 0x16, 0xf7, 0x88, 0xd7, 0x85, 0xd2, 0x23, 0x08, 0x0f, 0xb9, 0x58, 0x5e, 0x64, 0x46, 0x10, 0x9e, 0xed, 0x62, 0x79, 0x03, 0x26, 0x08, 0xf1, 0x9a, 0x91, 0xcc, 0x34, 0x3f,
0x88, 0x29, 0x42, 0x3c, 0x73, 0x28, 0x3d, 0x62, 0x56, 0xe5, 0xe4, 0x32, 0x4d, 0xf4, 0x94, 0x85, 0x03, 0xb8, 0x92, 0xdc, 0x9e, 0x71, 0x3d, 0xb0, 0xb0, 0xde, 0xa4, 0xbf, 0xfc, 0x45, 0xfc, 0xfd,
0x7a, 0xd3, 0xf5, 0xe5, 0x97, 0xa9, 0xef, 0xf2, 0xec, 0x9a, 0x05, 0x54, 0xf1, 0xcb, 0x02, 0xe4, 0x79, 0x77, 0xd5, 0x84, 0x8a, 0xf8, 0x65, 0x81, 0xed, 0x60, 0xad, 0x28, 0xb1, 0xd5, 0x04, 0x53,
0x60, 0xbd, 0x28, 0xb1, 0xb5, 0x04, 0x53, 0xdb, 0xa9, 0x03, 0x66, 0x8c, 0xbd, 0x35, 0x3a, 0x33, 0x76, 0xd3, 0x04, 0xca, 0x18, 0xab, 0x75, 0x74, 0xaa, 0xe7, 0x8e, 0x4f, 0xf5, 0xdc, 0xc9, 0xa9,
0x72, 0x27, 0x67, 0x46, 0xee, 0xf4, 0xcc, 0xc8, 0xbd, 0x8f, 0x0c, 0x65, 0x14, 0x19, 0xca, 0x49, 0x9e, 0x7b, 0x17, 0xe9, 0xe0, 0x28, 0xd2, 0xc1, 0x71, 0xa4, 0x83, 0x93, 0x48, 0x07, 0x3f, 0x22,
0x64, 0x28, 0xa7, 0x91, 0xa1, 0x7c, 0x8f, 0x0c, 0xe5, 0xe3, 0x0f, 0x23, 0xf7, 0xaa, 0x9c, 0x4c, 0x1d, 0x7c, 0xfc, 0xa9, 0xe7, 0x5e, 0xe6, 0x87, 0xed, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x66,
0xfd, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0x74, 0x24, 0x6a, 0xfa, 0x45, 0x0a, 0x00, 0x00, 0x92, 0x08, 0x1d, 0x04, 0x0a, 0x00, 0x00,
} }

View file

@ -85,7 +85,8 @@ message PolicyRule {
// +optional // +optional
repeated string apiGroups = 2; repeated string apiGroups = 2;
// Resources is a list of resources this rule applies to. ResourceAll represents all resources. // Resources is a list of resources this rule applies to. '*' represents all resources in the specified apiGroups.
// '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups.
// +optional // +optional
repeated string resources = 3; repeated string resources = 3;

View file

@ -54,7 +54,8 @@ type PolicyRule struct {
// the enumerated resources in any API group will be allowed. // the enumerated resources in any API group will be allowed.
// +optional // +optional
APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"` APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"`
// Resources is a list of resources this rule applies to. ResourceAll represents all resources. // Resources is a list of resources this rule applies to. '*' represents all resources in the specified apiGroups.
// '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups.
// +optional // +optional
Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"`
// ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.

View file

@ -72,7 +72,7 @@ var map_PolicyRule = map[string]string{
"": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.",
"verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.", "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.",
"apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.", "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.",
"resources": "Resources is a list of resources this rule applies to. ResourceAll represents all resources.", "resources": "Resources is a list of resources this rule applies to. '*' represents all resources in the specified apiGroups. '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups.",
"resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.",
"nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.", "nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.",
} }

View file

@ -1,6 +1,6 @@
# apiextensions-apiserver # apiextensions-apiserver
Implements https://github.com/kubernetes/community/blob/master/contributors/design-proposals/thirdpartyresources.md Implements: https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/thirdpartyresources.md
It provides an API for registering `CustomResourceDefinitions`. It provides an API for registering `CustomResourceDefinitions`.
@ -12,7 +12,7 @@ delegate server inside of `kube-apiserver`.
## Compatibility ## Compatibility
HEAD of this repo will match HEAD of k8s.io/apiserver, k8s.io/apimachinvery, and k8s.io/client-go. HEAD of this repo will match HEAD of k8s.io/apiserver, k8s.io/apimachinery, and k8s.io/client-go.
## Where does it come from? ## Where does it come from?

View file

@ -87,6 +87,7 @@ option go_package = "resource";
// +protobuf.embed=string // +protobuf.embed=string
// +protobuf.options.marshal=false // +protobuf.options.marshal=false
// +protobuf.options.(gogoproto.goproto_stringer)=false // +protobuf.options.(gogoproto.goproto_stringer)=false
// +k8s:deepcopy-gen=true
// +k8s:openapi-gen=true // +k8s:openapi-gen=true
message Quantity { message Quantity {
optional string string = 1; optional string string = 1;

View file

@ -93,6 +93,7 @@ import (
// +protobuf.embed=string // +protobuf.embed=string
// +protobuf.options.marshal=false // +protobuf.options.marshal=false
// +protobuf.options.(gogoproto.goproto_stringer)=false // +protobuf.options.(gogoproto.goproto_stringer)=false
// +k8s:deepcopy-gen=true
// +k8s:openapi-gen=true // +k8s:openapi-gen=true
type Quantity struct { type Quantity struct {
// i is the quantity in int64 scaled form, if d.Dec == nil // i is the quantity in int64 scaled form, if d.Dec == nil
@ -415,7 +416,7 @@ func (_ Quantity) OpenAPIDefinition() openapi.OpenAPIDefinition {
// Note about BinarySI: // Note about BinarySI:
// * If q.Format is set to BinarySI and q.Amount represents a non-zero value between // * If q.Format is set to BinarySI and q.Amount represents a non-zero value between
// -1 and +1, it will be emitted as if q.Format were DecimalSI. // -1 and +1, it will be emitted as if q.Format were DecimalSI.
// * Otherwise, if q.Format is set to BinarySI, frational parts of q.Amount will be // * Otherwise, if q.Format is set to BinarySI, fractional parts of q.Amount will be
// rounded up. (1.1i becomes 2i.) // rounded up. (1.1i becomes 2i.)
func (q *Quantity) CanonicalizeBytes(out []byte) (result, suffix []byte) { func (q *Quantity) CanonicalizeBytes(out []byte) (result, suffix []byte) {
if q.IsZero() { if q.IsZero() {

View file

@ -0,0 +1,44 @@
// +build !ignore_autogenerated
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
package resource
import (
conversion "k8s.io/apimachinery/pkg/conversion"
reflect "reflect"
)
// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them.
//
// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented.
func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc {
return []conversion.GeneratedDeepCopyFunc{
{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*Quantity).DeepCopyInto(out.(*Quantity))
return nil
}, InType: reflect.TypeOf(&Quantity{})},
}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Quantity) DeepCopyInto(out *Quantity) {
*out = in.DeepCopy()
return
}

View file

@ -38,9 +38,6 @@ var (
AddToScheme = localSchemeBuilder.AddToScheme AddToScheme = localSchemeBuilder.AddToScheme
) )
// Copier exposes copying on this scheme.
var Copier runtime.ObjectCopier = scheme
// Codecs provides access to encoding and decoding for the scheme. // Codecs provides access to encoding and decoding for the scheme.
var Codecs = serializer.NewCodecFactory(scheme) var Codecs = serializer.NewCodecFactory(scheme)

View file

@ -252,7 +252,6 @@ func Convert_map_to_unversioned_LabelSelector(in *map[string]string, out *LabelS
if in == nil { if in == nil {
return nil return nil
} }
out = new(LabelSelector)
for labelKey, labelValue := range *in { for labelKey, labelValue := range *in {
AddLabelToSelector(out, labelKey, labelValue) AddLabelToSelector(out, labelKey, labelValue)
} }

View file

@ -175,10 +175,10 @@ func (t *MicroTime) Fuzz(c fuzz.Continue) {
if t == nil { if t == nil {
return return
} }
// Allow for about 1000 years of randomness. Leave off nanoseconds // Allow for about 1000 years of randomness. Accurate to a tenth of
// because JSON doesn't represent them so they can't round-trip // micro second. Leave off nanoseconds because JSON doesn't
// properly. // represent them so they can't round-trip properly.
t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60*1000*1000), 0) t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 1000*c.Rand.Int63n(1000000))
} }
var _ fuzz.Interface = &MicroTime{} var _ fuzz.Interface = &MicroTime{}

View file

@ -80,8 +80,14 @@ func (t *Time) Before(u *Time) bool {
// Equal reports whether the time instant t is equal to u. // Equal reports whether the time instant t is equal to u.
func (t *Time) Equal(u *Time) bool { func (t *Time) Equal(u *Time) bool {
if t == nil && u == nil {
return true
}
if t != nil && u != nil {
return t.Time.Equal(u.Time) return t.Time.Equal(u.Time)
} }
return false
}
// Unix returns the local time corresponding to the given Unix time // Unix returns the local time corresponding to the given Unix time
// by wrapping time.Unix. // by wrapping time.Unix.

View file

@ -145,33 +145,13 @@ func (u *Unstructured) UnmarshalJSON(b []byte) error {
return err return err
} }
func deepCopyJSON(x interface{}) interface{} {
switch x := x.(type) {
case map[string]interface{}:
clone := make(map[string]interface{}, len(x))
for k, v := range x {
clone[k] = deepCopyJSON(v)
}
return clone
case []interface{}:
clone := make([]interface{}, len(x))
for i := range x {
clone[i] = deepCopyJSON(x[i])
}
return clone
default:
// only non-pointer values (float64, int64, bool, string) are left. These can be copied by-value.
return x
}
}
func (in *Unstructured) DeepCopy() *Unstructured { func (in *Unstructured) DeepCopy() *Unstructured {
if in == nil { if in == nil {
return nil return nil
} }
out := new(Unstructured) out := new(Unstructured)
*out = *in *out = *in
out.Object = deepCopyJSON(in.Object).(map[string]interface{}) out.Object = unstructured.DeepCopyJSON(in.Object)
return out return out
} }
@ -181,7 +161,7 @@ func (in *UnstructuredList) DeepCopy() *UnstructuredList {
} }
out := new(UnstructuredList) out := new(UnstructuredList)
*out = *in *out = *in
out.Object = deepCopyJSON(in.Object).(map[string]interface{}) out.Object = unstructured.DeepCopyJSON(in.Object)
out.Items = make([]Unstructured, len(in.Items)) out.Items = make([]Unstructured, len(in.Items))
for i := range in.Items { for i := range in.Items {
in.Items[i].DeepCopyInto(&out.Items[i]) in.Items[i].DeepCopyInto(&out.Items[i])

View file

@ -98,7 +98,7 @@ type TableRowCondition struct {
type RowConditionType string type RowConditionType string
// These are valid conditions of a row. This list is not exhaustive and new conditions may be // These are valid conditions of a row. This list is not exhaustive and new conditions may be
// inculded by other resources. // included by other resources.
const ( const (
// RowCompleted means the underlying resource has reached completion and may be given less // RowCompleted means the underlying resource has reached completion and may be given less
// visual priority than other resources. // visual priority than other resources.

View file

@ -89,9 +89,16 @@ func customMarshalValue(value reflect.Value) (reflect.Value, bool) {
} }
marshaler, ok := value.Interface().(Marshaler) marshaler, ok := value.Interface().(Marshaler)
if !ok {
if !isPointerKind(value.Kind()) && value.CanAddr() {
marshaler, ok = value.Addr().Interface().(Marshaler)
if !ok { if !ok {
return reflect.Value{}, false return reflect.Value{}, false
} }
} else {
return reflect.Value{}, false
}
}
// Don't invoke functions on nil pointers // Don't invoke functions on nil pointers
// If the type implements MarshalQueryParameter, AND the tag is not omitempty, AND the value is a nil pointer, "" seems like a reasonable response // If the type implements MarshalQueryParameter, AND the tag is not omitempty, AND the value is a nil pointer, "" seems like a reasonable response

View file

@ -29,6 +29,7 @@ import (
"sync/atomic" "sync/atomic"
apiequality "k8s.io/apimachinery/pkg/api/equality" apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/diff" "k8s.io/apimachinery/pkg/util/diff"
"k8s.io/apimachinery/pkg/util/json" "k8s.io/apimachinery/pkg/util/json"
utilruntime "k8s.io/apimachinery/pkg/util/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime"
@ -106,6 +107,8 @@ func NewConverter(mismatchDetection bool) Converter {
} }
} }
// FromUnstructured converts an object from map[string]interface{} representation into a concrete type.
// It uses encoding/json/Unmarshaler if object implements it or reflection if not.
func (c *converterImpl) FromUnstructured(u map[string]interface{}, obj interface{}) error { func (c *converterImpl) FromUnstructured(u map[string]interface{}, obj interface{}) error {
t := reflect.TypeOf(obj) t := reflect.TypeOf(obj)
value := reflect.ValueOf(obj) value := reflect.ValueOf(obj)
@ -388,19 +391,27 @@ func interfaceFromUnstructured(sv, dv reflect.Value) error {
return nil return nil
} }
// ToUnstructured converts an object into map[string]interface{} representation.
// It uses encoding/json/Marshaler if object implements it or reflection if not.
func (c *converterImpl) ToUnstructured(obj interface{}) (map[string]interface{}, error) { func (c *converterImpl) ToUnstructured(obj interface{}) (map[string]interface{}, error) {
var u map[string]interface{}
var err error
if unstr, ok := obj.(runtime.Unstructured); ok {
u = DeepCopyJSON(unstr.UnstructuredContent())
} else {
t := reflect.TypeOf(obj) t := reflect.TypeOf(obj)
value := reflect.ValueOf(obj) value := reflect.ValueOf(obj)
if t.Kind() != reflect.Ptr || value.IsNil() { if t.Kind() != reflect.Ptr || value.IsNil() {
return nil, fmt.Errorf("ToUnstructured requires a non-nil pointer to an object, got %v", t) return nil, fmt.Errorf("ToUnstructured requires a non-nil pointer to an object, got %v", t)
} }
u := &map[string]interface{}{} u = map[string]interface{}{}
err := toUnstructured(value.Elem(), reflect.ValueOf(u).Elem()) err = toUnstructured(value.Elem(), reflect.ValueOf(&u).Elem())
}
if c.mismatchDetection { if c.mismatchDetection {
newUnstr := &map[string]interface{}{} newUnstr := map[string]interface{}{}
newErr := toUnstructuredViaJSON(obj, newUnstr) newErr := toUnstructuredViaJSON(obj, &newUnstr)
if (err != nil) != (newErr != nil) { if (err != nil) != (newErr != nil) {
glog.Fatalf("ToUnstructured unexpected error for %v: error: %v", obj, err) glog.Fatalf("ToUnstructured unexpected error for %v: error: %v; newErr: %v", obj, err, newErr)
} }
if err == nil && !apiequality.Semantic.DeepEqual(u, newUnstr) { if err == nil && !apiequality.Semantic.DeepEqual(u, newUnstr) {
glog.Fatalf("ToUnstructured mismatch for %#v, diff: %v", u, diff.ObjectReflectDiff(u, newUnstr)) glog.Fatalf("ToUnstructured mismatch for %#v, diff: %v", u, diff.ObjectReflectDiff(u, newUnstr))
@ -409,7 +420,34 @@ func (c *converterImpl) ToUnstructured(obj interface{}) (map[string]interface{},
if err != nil { if err != nil {
return nil, err return nil, err
} }
return *u, nil return u, nil
}
// DeepCopyJSON deep copies the passed value, assuming it is a valid JSON representation i.e. only contains
// types produced by json.Unmarshal().
func DeepCopyJSON(x map[string]interface{}) map[string]interface{} {
return deepCopyJSON(x).(map[string]interface{})
}
func deepCopyJSON(x interface{}) interface{} {
switch x := x.(type) {
case map[string]interface{}:
clone := make(map[string]interface{}, len(x))
for k, v := range x {
clone[k] = deepCopyJSON(v)
}
return clone
case []interface{}:
clone := make([]interface{}, len(x))
for i, v := range x {
clone[i] = deepCopyJSON(v)
}
return clone
case string, int64, bool, float64, nil, encodingjson.Number:
return x
default:
panic(fmt.Errorf("cannot deep copy %T", x))
}
} }
func toUnstructuredViaJSON(obj interface{}, u *map[string]interface{}) error { func toUnstructuredViaJSON(obj interface{}, u *map[string]interface{}) error {

View file

@ -550,7 +550,7 @@ func (p *Parser) lookahead(context ParserContext) (Token, string) {
return tok, lit return tok, lit
} }
// consume returns current token and string. Increments the the position // consume returns current token and string. Increments the position
func (p *Parser) consume(context ParserContext) (Token, string) { func (p *Parser) consume(context ParserContext) (Token, string) {
p.position++ p.position++
tok, lit := p.scannedItems[p.position-1].tok, p.scannedItems[p.position-1].literal tok, lit := p.scannedItems[p.position-1].tok, p.scannedItems[p.position-1].literal

View file

@ -203,13 +203,6 @@ type ObjectCreater interface {
New(kind schema.GroupVersionKind) (out Object, err error) New(kind schema.GroupVersionKind) (out Object, err error)
} }
// ObjectCopier duplicates an object.
type ObjectCopier interface {
// Copy returns an exact copy of the provided Object, or an error if the
// copy could not be completed.
Copy(Object) (Object, error)
}
// ResourceVersioner provides methods for setting and retrieving // ResourceVersioner provides methods for setting and retrieving
// the resource version from an API object. // the resource version from an API object.
type ResourceVersioner interface { type ResourceVersioner interface {

View file

@ -420,20 +420,6 @@ func (s *Scheme) Default(src Object) {
} }
} }
// Copy does a deep copy of an API object.
func (s *Scheme) Copy(src Object) (Object, error) {
dst, err := s.DeepCopy(src)
if err != nil {
return nil, err
}
return dst.(Object), nil
}
// Performs a deep copy of the given object.
func (s *Scheme) DeepCopy(src interface{}) (interface{}, error) {
return s.cloner.DeepCopy(src)
}
// Convert will attempt to convert in into out. Both must be pointers. For easy // Convert will attempt to convert in into out. Both must be pointers. For easy
// testing of conversion functions. Returns an error if the conversion isn't // testing of conversion functions. Returns an error if the conversion isn't
// possible. You can call this with types that haven't been registered (for example, // possible. You can call this with types that haven't been registered (for example,
@ -501,9 +487,9 @@ func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) (
// TODO: when we move to server API versions, we should completely remove the unversioned concept // TODO: when we move to server API versions, we should completely remove the unversioned concept
if unversionedKind, ok := s.unversionedTypes[t]; ok { if unversionedKind, ok := s.unversionedTypes[t]; ok {
if gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{unversionedKind}); ok { if gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{unversionedKind}); ok {
return copyAndSetTargetKind(copy, s, in, gvk) return copyAndSetTargetKind(copy, in, gvk)
} }
return copyAndSetTargetKind(copy, s, in, unversionedKind) return copyAndSetTargetKind(copy, in, unversionedKind)
} }
return nil, NewNotRegisteredErrForTarget(t, target) return nil, NewNotRegisteredErrForTarget(t, target)
@ -512,16 +498,16 @@ func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) (
// target wants to use the existing type, set kind and return (no conversion necessary) // target wants to use the existing type, set kind and return (no conversion necessary)
for _, kind := range kinds { for _, kind := range kinds {
if gvk == kind { if gvk == kind {
return copyAndSetTargetKind(copy, s, in, gvk) return copyAndSetTargetKind(copy, in, gvk)
} }
} }
// type is unversioned, no conversion necessary // type is unversioned, no conversion necessary
if unversionedKind, ok := s.unversionedTypes[t]; ok { if unversionedKind, ok := s.unversionedTypes[t]; ok {
if gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{unversionedKind}); ok { if gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{unversionedKind}); ok {
return copyAndSetTargetKind(copy, s, in, gvk) return copyAndSetTargetKind(copy, in, gvk)
} }
return copyAndSetTargetKind(copy, s, in, unversionedKind) return copyAndSetTargetKind(copy, in, unversionedKind)
} }
out, err := s.New(gvk) out, err := s.New(gvk)
@ -549,7 +535,7 @@ func (s *Scheme) generateConvertMeta(in interface{}) (conversion.FieldMatchingFl
} }
// copyAndSetTargetKind performs a conditional copy before returning the object, or an error if copy was not successful. // copyAndSetTargetKind performs a conditional copy before returning the object, or an error if copy was not successful.
func copyAndSetTargetKind(copy bool, copier ObjectCopier, obj Object, kind schema.GroupVersionKind) (Object, error) { func copyAndSetTargetKind(copy bool, obj Object, kind schema.GroupVersionKind) (Object, error) {
if copy { if copy {
obj = obj.DeepCopyObject() obj = obj.DeepCopyObject()
} }

View file

@ -21,7 +21,6 @@ import (
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
) )
// NewCodecForScheme is a convenience method for callers that are using a scheme. // NewCodecForScheme is a convenience method for callers that are using a scheme.
@ -33,7 +32,7 @@ func NewCodecForScheme(
encodeVersion runtime.GroupVersioner, encodeVersion runtime.GroupVersioner,
decodeVersion runtime.GroupVersioner, decodeVersion runtime.GroupVersioner,
) runtime.Codec { ) runtime.Codec {
return NewCodec(encoder, decoder, runtime.UnsafeObjectConvertor(scheme), scheme, scheme, scheme, nil, encodeVersion, decodeVersion) return NewCodec(encoder, decoder, runtime.UnsafeObjectConvertor(scheme), scheme, scheme, nil, encodeVersion, decodeVersion)
} }
// NewDefaultingCodecForScheme is a convenience method for callers that are using a scheme. // NewDefaultingCodecForScheme is a convenience method for callers that are using a scheme.
@ -45,7 +44,7 @@ func NewDefaultingCodecForScheme(
encodeVersion runtime.GroupVersioner, encodeVersion runtime.GroupVersioner,
decodeVersion runtime.GroupVersioner, decodeVersion runtime.GroupVersioner,
) runtime.Codec { ) runtime.Codec {
return NewCodec(encoder, decoder, runtime.UnsafeObjectConvertor(scheme), scheme, scheme, scheme, scheme, encodeVersion, decodeVersion) return NewCodec(encoder, decoder, runtime.UnsafeObjectConvertor(scheme), scheme, scheme, scheme, encodeVersion, decodeVersion)
} }
// NewCodec takes objects in their internal versions and converts them to external versions before // NewCodec takes objects in their internal versions and converts them to external versions before
@ -56,7 +55,6 @@ func NewCodec(
decoder runtime.Decoder, decoder runtime.Decoder,
convertor runtime.ObjectConvertor, convertor runtime.ObjectConvertor,
creater runtime.ObjectCreater, creater runtime.ObjectCreater,
copier runtime.ObjectCopier,
typer runtime.ObjectTyper, typer runtime.ObjectTyper,
defaulter runtime.ObjectDefaulter, defaulter runtime.ObjectDefaulter,
encodeVersion runtime.GroupVersioner, encodeVersion runtime.GroupVersioner,
@ -67,7 +65,6 @@ func NewCodec(
decoder: decoder, decoder: decoder,
convertor: convertor, convertor: convertor,
creater: creater, creater: creater,
copier: copier,
typer: typer, typer: typer,
defaulter: defaulter, defaulter: defaulter,
@ -82,7 +79,6 @@ type codec struct {
decoder runtime.Decoder decoder runtime.Decoder
convertor runtime.ObjectConvertor convertor runtime.ObjectConvertor
creater runtime.ObjectCreater creater runtime.ObjectCreater
copier runtime.ObjectCopier
typer runtime.ObjectTyper typer runtime.ObjectTyper
defaulter runtime.ObjectDefaulter defaulter runtime.ObjectDefaulter
@ -123,12 +119,7 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru
if c.defaulter != nil { if c.defaulter != nil {
// create a copy to ensure defaulting is not applied to the original versioned objects // create a copy to ensure defaulting is not applied to the original versioned objects
if isVersioned { if isVersioned {
copied, err := c.copier.Copy(obj) versioned.Objects = []runtime.Object{obj.DeepCopyObject()}
if err != nil {
utilruntime.HandleError(err)
copied = obj
}
versioned.Objects = []runtime.Object{copied}
} }
c.defaulter.Default(obj) c.defaulter.Default(obj)
} else { } else {
@ -151,12 +142,7 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru
// Convert if needed. // Convert if needed.
if isVersioned { if isVersioned {
// create a copy, because ConvertToVersion does not guarantee non-mutation of objects // create a copy, because ConvertToVersion does not guarantee non-mutation of objects
copied, err := c.copier.Copy(obj) versioned.Objects = []runtime.Object{obj.DeepCopyObject()}
if err != nil {
utilruntime.HandleError(err)
copied = obj
}
versioned.Objects = []runtime.Object{copied}
} }
// perform defaulting if requested // perform defaulting if requested

View file

@ -142,10 +142,6 @@ func objectReflectDiff(path *field.Path, a, b reflect.Value) []diff {
} }
if sub := objectReflectDiff(path.Child(a.Type().Field(i).Name), a.Field(i), b.Field(i)); len(sub) > 0 { if sub := objectReflectDiff(path.Child(a.Type().Field(i).Name), a.Field(i), b.Field(i)); len(sub) > 0 {
changes = append(changes, sub...) changes = append(changes, sub...)
} else {
if !reflect.DeepEqual(a.Field(i).Interface(), b.Field(i).Interface()) {
changes = append(changes, diff{path: path, a: a.Field(i).Interface(), b: b.Field(i).Interface()})
}
} }
} }
return changes return changes
@ -178,21 +174,18 @@ func objectReflectDiff(path *field.Path, a, b reflect.Value) []diff {
} }
return nil return nil
} }
var diffs []diff
for i := 0; i < l; i++ { for i := 0; i < l; i++ {
if !reflect.DeepEqual(a.Index(i), b.Index(i)) { if !reflect.DeepEqual(a.Index(i), b.Index(i)) {
return objectReflectDiff(path.Index(i), a.Index(i), b.Index(i)) diffs = append(diffs, objectReflectDiff(path.Index(i), a.Index(i), b.Index(i))...)
} }
} }
var diffs []diff
for i := l; i < lA; i++ { for i := l; i < lA; i++ {
diffs = append(diffs, diff{path: path.Index(i), a: a.Index(i), b: nil}) diffs = append(diffs, diff{path: path.Index(i), a: a.Index(i), b: nil})
} }
for i := l; i < lB; i++ { for i := l; i < lB; i++ {
diffs = append(diffs, diff{path: path.Index(i), a: nil, b: b.Index(i)}) diffs = append(diffs, diff{path: path.Index(i), a: nil, b: b.Index(i)})
} }
if len(diffs) == 0 {
diffs = append(diffs, diff{path: path, a: a, b: b})
}
return diffs return diffs
case reflect.Map: case reflect.Map:
if reflect.DeepEqual(a.Interface(), b.Interface()) { if reflect.DeepEqual(a.Interface(), b.Interface()) {

View file

@ -110,7 +110,7 @@ func (s *SpdyRoundTripper) Dial(req *http.Request) (net.Conn, error) {
func (s *SpdyRoundTripper) dial(req *http.Request) (net.Conn, error) { func (s *SpdyRoundTripper) dial(req *http.Request) (net.Conn, error) {
proxier := s.proxier proxier := s.proxier
if proxier == nil { if proxier == nil {
proxier = http.ProxyFromEnvironment proxier = utilnet.NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment)
} }
proxyURL, err := proxier(req) proxyURL, err := proxier(req)
if err != nil { if err != nil {

View file

@ -26,6 +26,7 @@ import (
"net/http" "net/http"
"net/url" "net/url"
"os" "os"
"path"
"strconv" "strconv"
"strings" "strings"
@ -33,6 +34,26 @@ import (
"golang.org/x/net/http2" "golang.org/x/net/http2"
) )
// JoinPreservingTrailingSlash does a path.Join of the specified elements,
// preserving any trailing slash on the last non-empty segment
func JoinPreservingTrailingSlash(elem ...string) string {
// do the basic path join
result := path.Join(elem...)
// find the last non-empty segment
for i := len(elem) - 1; i >= 0; i-- {
if len(elem[i]) > 0 {
// if the last segment ended in a slash, ensure our result does as well
if strings.HasSuffix(elem[i], "/") && !strings.HasSuffix(result, "/") {
result += "/"
}
break
}
}
return result
}
// IsProbableEOF returns true if the given error resembles a connection termination // IsProbableEOF returns true if the given error resembles a connection termination
// scenario that would justify assuming that the watch is empty. // scenario that would justify assuming that the watch is empty.
// These errors are what the Go http stack returns back to us which are general // These errors are what the Go http stack returns back to us which are general
@ -235,8 +256,11 @@ func isDefault(transportProxier func(*http.Request) (*url.URL, error)) bool {
// NewProxierWithNoProxyCIDR constructs a Proxier function that respects CIDRs in NO_PROXY and delegates if // NewProxierWithNoProxyCIDR constructs a Proxier function that respects CIDRs in NO_PROXY and delegates if
// no matching CIDRs are found // no matching CIDRs are found
func NewProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error)) func(req *http.Request) (*url.URL, error) { func NewProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error)) func(req *http.Request) (*url.URL, error) {
// we wrap the default method, so we only need to perform our check if the NO_PROXY envvar has a CIDR in it // we wrap the default method, so we only need to perform our check if the NO_PROXY (or no_proxy) envvar has a CIDR in it
noProxyEnv := os.Getenv("NO_PROXY") noProxyEnv := os.Getenv("NO_PROXY")
if noProxyEnv == "" {
noProxyEnv = os.Getenv("no_proxy")
}
noProxyRules := strings.Split(noProxyEnv, ",") noProxyRules := strings.Split(noProxyEnv, ",")
cidrs := []*net.IPNet{} cidrs := []*net.IPNet{}

View file

@ -1,95 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package rand provides utilities related to randomization.
package rand
import (
"math/rand"
"sync"
"time"
)
var rng = struct {
sync.Mutex
rand *rand.Rand
}{
rand: rand.New(rand.NewSource(time.Now().UTC().UnixNano())),
}
// Intn generates an integer in range [0,max).
// By design this should panic if input is invalid, <= 0.
func Intn(max int) int {
rng.Lock()
defer rng.Unlock()
return rng.rand.Intn(max)
}
// IntnRange generates an integer in range [min,max).
// By design this should panic if input is invalid, <= 0.
func IntnRange(min, max int) int {
rng.Lock()
defer rng.Unlock()
return rng.rand.Intn(max-min) + min
}
// IntnRange generates an int64 integer in range [min,max).
// By design this should panic if input is invalid, <= 0.
func Int63nRange(min, max int64) int64 {
rng.Lock()
defer rng.Unlock()
return rng.rand.Int63n(max-min) + min
}
// Seed seeds the rng with the provided seed.
func Seed(seed int64) {
rng.Lock()
defer rng.Unlock()
rng.rand = rand.New(rand.NewSource(seed))
}
// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n)
// from the default Source.
func Perm(n int) []int {
rng.Lock()
defer rng.Unlock()
return rng.rand.Perm(n)
}
// We omit vowels from the set of available characters to reduce the chances
// of "bad words" being formed.
var alphanums = []rune("bcdfghjklmnpqrstvwxz2456789")
// String generates a random alphanumeric string, without vowels, which is n
// characters long. This will panic if n is less than zero.
func String(length int) string {
b := make([]rune, length)
for i := range b {
b[i] = alphanums[Intn(len(alphanums))]
}
return string(b)
}
// SafeEncodeString encodes s using the same characters as rand.String. This reduces the chances of bad words and
// ensures that strings generated from hash functions appear consistent throughout the API.
func SafeEncodeString(s string) string {
r := make([]rune, len(s))
for i, b := range []rune(s) {
r[i] = alphanums[(int(b) % len(alphanums))]
}
return string(r)
}

View file

@ -128,7 +128,9 @@ func (r *rudimentaryErrorBackoff) OnError(error) {
r.lastErrorTimeLock.Lock() r.lastErrorTimeLock.Lock()
defer r.lastErrorTimeLock.Unlock() defer r.lastErrorTimeLock.Unlock()
d := time.Since(r.lastErrorTime) d := time.Since(r.lastErrorTime)
if d < r.minPeriod { if d < r.minPeriod && d >= 0 {
// If the time moves backwards for any reason, do nothing
// TODO: remove check "d >= 0" after go 1.8 is no longer supported
time.Sleep(r.minPeriod - d) time.Sleep(r.minPeriod - d)
} }
r.lastErrorTime = time.Now() r.lastErrorTime = time.Now()

View file

@ -515,6 +515,9 @@ func normalizeSliceOrder(toSort, order []interface{}, mergeKey string, kind refl
return nil, err return nil, err
} }
toSort, toDelete, err = extractToDeleteItems(toSort) toSort, toDelete, err = extractToDeleteItems(toSort)
if err != nil {
return nil, err
}
} }
sort.SliceStable(toSort, func(i, j int) bool { sort.SliceStable(toSort, func(i, j int) bool {
@ -554,7 +557,13 @@ func diffLists(original, modified []interface{}, t reflect.Type, mergeKey string
switch kind { switch kind {
case reflect.Map: case reflect.Map:
patchList, deleteList, err = diffListsOfMaps(original, modified, t, mergeKey, diffOptions) patchList, deleteList, err = diffListsOfMaps(original, modified, t, mergeKey, diffOptions)
if err != nil {
return nil, nil, nil, err
}
patchList, err = normalizeSliceOrder(patchList, modified, mergeKey, kind) patchList, err = normalizeSliceOrder(patchList, modified, mergeKey, kind)
if err != nil {
return nil, nil, nil, err
}
orderSame, err := isOrderSame(original, modified, mergeKey) orderSame, err := isOrderSame(original, modified, mergeKey)
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, nil, err
@ -580,6 +589,9 @@ func diffLists(original, modified []interface{}, t reflect.Type, mergeKey string
return nil, nil, nil, mergepatch.ErrNoListOfLists return nil, nil, nil, mergepatch.ErrNoListOfLists
default: default:
patchList, deleteList, err = diffListsOfScalars(original, modified, diffOptions) patchList, deleteList, err = diffListsOfScalars(original, modified, diffOptions)
if err != nil {
return nil, nil, nil, err
}
patchList, err = normalizeSliceOrder(patchList, modified, mergeKey, kind) patchList, err = normalizeSliceOrder(patchList, modified, mergeKey, kind)
// generate the setElementOrder list when there are content changes or order changes // generate the setElementOrder list when there are content changes or order changes
if diffOptions.SetElementOrder && ((!diffOptions.IgnoreDeletions && len(deleteList) > 0) || if diffOptions.SetElementOrder && ((!diffOptions.IgnoreDeletions && len(deleteList) > 0) ||
@ -1054,7 +1066,7 @@ func applyRetainKeysDirective(original, patch map[string]interface{}, options Me
// Then, sort them by the relative order in setElementOrder, patch list and live list. // Then, sort them by the relative order in setElementOrder, patch list and live list.
// The precedence is $setElementOrder > order in patch list > order in live list. // The precedence is $setElementOrder > order in patch list > order in live list.
// This function will delete the item after merging it to prevent process it again in the future. // This function will delete the item after merging it to prevent process it again in the future.
// Ref: https://git.k8s.io/community/contributors/design-proposals/preserve-order-in-strategic-merge-patch.md // Ref: https://git.k8s.io/community/contributors/design-proposals/cli/preserve-order-in-strategic-merge-patch.md
func mergePatchIntoOriginal(original, patch map[string]interface{}, t reflect.Type, mergeOptions MergeOptions) error { func mergePatchIntoOriginal(original, patch map[string]interface{}, t reflect.Type, mergeOptions MergeOptions) error {
for key, patchV := range patch { for key, patchV := range patch {
// Do nothing if there is no ordering directive // Do nothing if there is no ordering directive

View file

@ -19,6 +19,7 @@ package field
import ( import (
"fmt" "fmt"
"reflect" "reflect"
"strconv"
"strings" "strings"
utilerrors "k8s.io/apimachinery/pkg/util/errors" utilerrors "k8s.io/apimachinery/pkg/util/errors"
@ -175,7 +176,11 @@ func Invalid(field *Path, value interface{}, detail string) *Error {
func NotSupported(field *Path, value interface{}, validValues []string) *Error { func NotSupported(field *Path, value interface{}, validValues []string) *Error {
detail := "" detail := ""
if validValues != nil && len(validValues) > 0 { if validValues != nil && len(validValues) > 0 {
detail = "supported values: " + strings.Join(validValues, ", ") quotedValues := make([]string, len(validValues))
for i, v := range validValues {
quotedValues[i] = strconv.Quote(v)
}
detail = "supported values: " + strings.Join(quotedValues, ", ")
} }
return &Error{ErrorTypeNotSupported, field.String(), value, detail} return &Error{ErrorTypeNotSupported, field.String(), value, detail}
} }

View file

@ -58,6 +58,7 @@ const (
// owner: @smarterclayton // owner: @smarterclayton
// alpha: v1.8 // alpha: v1.8
// beta: v1.9
// //
// Allow API clients to retrieve resource lists in chunks rather than // Allow API clients to retrieve resource lists in chunks rather than
// all at once. // all at once.
@ -76,5 +77,5 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS
AdvancedAuditing: {Default: true, PreRelease: utilfeature.Beta}, AdvancedAuditing: {Default: true, PreRelease: utilfeature.Beta},
APIResponseCompression: {Default: false, PreRelease: utilfeature.Alpha}, APIResponseCompression: {Default: false, PreRelease: utilfeature.Alpha},
Initializers: {Default: false, PreRelease: utilfeature.Alpha}, Initializers: {Default: false, PreRelease: utilfeature.Alpha},
APIListChunking: {Default: false, PreRelease: utilfeature.Alpha}, APIListChunking: {Default: true, PreRelease: utilfeature.Beta},
} }

View file

@ -22,6 +22,7 @@ import (
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
"sync/atomic"
"github.com/golang/glog" "github.com/golang/glog"
"github.com/spf13/pflag" "github.com/spf13/pflag"
@ -46,7 +47,7 @@ var (
} }
// Special handling for a few gates. // Special handling for a few gates.
specialFeatures = map[Feature]func(f *featureGate, val bool){ specialFeatures = map[Feature]func(known map[Feature]FeatureSpec, enabled map[Feature]bool, val bool){
allAlphaGate: setUnsetAlphaGates, allAlphaGate: setUnsetAlphaGates,
} }
@ -76,6 +77,8 @@ type FeatureGate interface {
// Set parses and stores flag gates for known features // Set parses and stores flag gates for known features
// from a string like feature1=true,feature2=false,... // from a string like feature1=true,feature2=false,...
Set(value string) error Set(value string) error
// SetFromMap stores flag gates for known features from a map[string]bool or returns an error
SetFromMap(m map[string]bool) error
// Enabled returns true if the key is enabled. // Enabled returns true if the key is enabled.
Enabled(key Feature) bool Enabled(key Feature) bool
// Add adds features to the featureGate. // Add adds features to the featureGate.
@ -86,21 +89,23 @@ type FeatureGate interface {
// featureGate implements FeatureGate as well as pflag.Value for flag parsing. // featureGate implements FeatureGate as well as pflag.Value for flag parsing.
type featureGate struct { type featureGate struct {
lock sync.RWMutex special map[Feature]func(map[Feature]FeatureSpec, map[Feature]bool, bool)
known map[Feature]FeatureSpec // lock guards writes to known, enabled, and reads/writes of closed
special map[Feature]func(*featureGate, bool) lock sync.Mutex
enabled map[Feature]bool // known holds a map[Feature]FeatureSpec
known *atomic.Value
// is set to true when AddFlag is called. Note: initialization is not go-routine safe, lookup is // enabled holds a map[Feature]bool
enabled *atomic.Value
// closed is set to true when AddFlag is called, and prevents subsequent calls to Add
closed bool closed bool
} }
func setUnsetAlphaGates(f *featureGate, val bool) { func setUnsetAlphaGates(known map[Feature]FeatureSpec, enabled map[Feature]bool, val bool) {
for k, v := range f.known { for k, v := range known {
if v.PreRelease == Alpha { if v.PreRelease == Alpha {
if _, found := f.enabled[k]; !found { if _, found := enabled[k]; !found {
f.enabled[k] = val enabled[k] = val
} }
} }
} }
@ -110,30 +115,49 @@ func setUnsetAlphaGates(f *featureGate, val bool) {
var _ pflag.Value = &featureGate{} var _ pflag.Value = &featureGate{}
func NewFeatureGate() *featureGate { func NewFeatureGate() *featureGate {
f := &featureGate{ known := map[Feature]FeatureSpec{}
known: map[Feature]FeatureSpec{},
special: specialFeatures,
enabled: map[Feature]bool{},
}
for k, v := range defaultFeatures { for k, v := range defaultFeatures {
f.known[k] = v known[k] = v
}
knownValue := &atomic.Value{}
knownValue.Store(known)
enabled := map[Feature]bool{}
enabledValue := &atomic.Value{}
enabledValue.Store(enabled)
f := &featureGate{
known: knownValue,
special: specialFeatures,
enabled: enabledValue,
} }
return f return f
} }
// Set Parses a string of the form "key1=value1,key2=value2,..." into a // Set parses a string of the form "key1=value1,key2=value2,..." into a
// map[string]bool of known keys or returns an error. // map[string]bool of known keys or returns an error.
func (f *featureGate) Set(value string) error { func (f *featureGate) Set(value string) error {
f.lock.Lock() f.lock.Lock()
defer f.lock.Unlock() defer f.lock.Unlock()
// Copy existing state
known := map[Feature]FeatureSpec{}
for k, v := range f.known.Load().(map[Feature]FeatureSpec) {
known[k] = v
}
enabled := map[Feature]bool{}
for k, v := range f.enabled.Load().(map[Feature]bool) {
enabled[k] = v
}
for _, s := range strings.Split(value, ",") { for _, s := range strings.Split(value, ",") {
if len(s) == 0 { if len(s) == 0 {
continue continue
} }
arr := strings.SplitN(s, "=", 2) arr := strings.SplitN(s, "=", 2)
k := Feature(strings.TrimSpace(arr[0])) k := Feature(strings.TrimSpace(arr[0]))
_, ok := f.known[Feature(k)] _, ok := known[k]
if !ok { if !ok {
return fmt.Errorf("unrecognized key: %s", k) return fmt.Errorf("unrecognized key: %s", k)
} }
@ -145,25 +169,62 @@ func (f *featureGate) Set(value string) error {
if err != nil { if err != nil {
return fmt.Errorf("invalid value of %s: %s, err: %v", k, v, err) return fmt.Errorf("invalid value of %s: %s, err: %v", k, v, err)
} }
f.enabled[k] = boolValue enabled[k] = boolValue
// Handle "special" features like "all alpha gates" // Handle "special" features like "all alpha gates"
if fn, found := f.special[k]; found { if fn, found := f.special[k]; found {
fn(f, boolValue) fn(known, enabled, boolValue)
} }
} }
// Persist changes
f.known.Store(known)
f.enabled.Store(enabled)
glog.Infof("feature gates: %v", enabled)
return nil
}
// SetFromMap stores flag gates for known features from a map[string]bool or returns an error
func (f *featureGate) SetFromMap(m map[string]bool) error {
f.lock.Lock()
defer f.lock.Unlock()
// Copy existing state
known := map[Feature]FeatureSpec{}
for k, v := range f.known.Load().(map[Feature]FeatureSpec) {
known[k] = v
}
enabled := map[Feature]bool{}
for k, v := range f.enabled.Load().(map[Feature]bool) {
enabled[k] = v
}
for k, v := range m {
k := Feature(k)
_, ok := known[k]
if !ok {
return fmt.Errorf("unrecognized key: %s", k)
}
enabled[k] = v
// Handle "special" features like "all alpha gates"
if fn, found := f.special[k]; found {
fn(known, enabled, v)
}
}
// Persist changes
f.known.Store(known)
f.enabled.Store(enabled)
glog.Infof("feature gates: %v", f.enabled) glog.Infof("feature gates: %v", f.enabled)
return nil return nil
} }
// String returns a string containing all enabled feature gates, formatted as "key1=value1,key2=value2,...". // String returns a string containing all enabled feature gates, formatted as "key1=value1,key2=value2,...".
func (f *featureGate) String() string { func (f *featureGate) String() string {
f.lock.RLock()
defer f.lock.RUnlock()
pairs := []string{} pairs := []string{}
for k, v := range f.enabled { for k, v := range f.enabled.Load().(map[Feature]bool) {
pairs = append(pairs, fmt.Sprintf("%s=%t", k, v)) pairs = append(pairs, fmt.Sprintf("%s=%t", k, v))
} }
sort.Strings(pairs) sort.Strings(pairs)
@ -183,31 +244,35 @@ func (f *featureGate) Add(features map[Feature]FeatureSpec) error {
return fmt.Errorf("cannot add a feature gate after adding it to the flag set") return fmt.Errorf("cannot add a feature gate after adding it to the flag set")
} }
// Copy existing state
known := map[Feature]FeatureSpec{}
for k, v := range f.known.Load().(map[Feature]FeatureSpec) {
known[k] = v
}
for name, spec := range features { for name, spec := range features {
if existingSpec, found := f.known[name]; found { if existingSpec, found := known[name]; found {
if existingSpec == spec { if existingSpec == spec {
continue continue
} }
return fmt.Errorf("feature gate %q with different spec already exists: %v", name, existingSpec) return fmt.Errorf("feature gate %q with different spec already exists: %v", name, existingSpec)
} }
f.known[name] = spec known[name] = spec
} }
// Persist updated state
f.known.Store(known)
return nil return nil
} }
// Enabled returns true if the key is enabled. // Enabled returns true if the key is enabled.
func (f *featureGate) Enabled(key Feature) bool { func (f *featureGate) Enabled(key Feature) bool {
f.lock.RLock() if v, ok := f.enabled.Load().(map[Feature]bool)[key]; ok {
defer f.lock.RUnlock()
defaultValue := f.known[key].Default
if f.enabled != nil {
if v, ok := f.enabled[key]; ok {
return v return v
} }
} return f.known.Load().(map[Feature]FeatureSpec)[key].Default
return defaultValue
} }
// AddFlag adds a flag for setting global feature gates to the specified FlagSet. // AddFlag adds a flag for setting global feature gates to the specified FlagSet.
@ -224,10 +289,8 @@ func (f *featureGate) AddFlag(fs *pflag.FlagSet) {
// KnownFeatures returns a slice of strings describing the FeatureGate's known features. // KnownFeatures returns a slice of strings describing the FeatureGate's known features.
func (f *featureGate) KnownFeatures() []string { func (f *featureGate) KnownFeatures() []string {
f.lock.RLock()
defer f.lock.RUnlock()
var known []string var known []string
for k, v := range f.known { for k, v := range f.known.Load().(map[Feature]FeatureSpec) {
pre := "" pre := ""
if v.PreRelease != GA { if v.PreRelease != GA {
pre = fmt.Sprintf("%s - ", v.PreRelease) pre = fmt.Sprintf("%s - ", v.PreRelease)

Some files were not shown because too many files have changed in this diff Show more