Merge pull request #646 from crosbymichael/container-metrics

Add container level metrics
This commit is contained in:
Michael Crosby 2017-03-22 13:42:37 -07:00 committed by GitHub
commit beda443bcf
50 changed files with 4479 additions and 68 deletions

View file

@ -3,6 +3,7 @@ package main
// register containerd builtins here
import (
_ "github.com/docker/containerd/linux"
_ "github.com/docker/containerd/metrics/cgroups"
_ "github.com/docker/containerd/services/content"
_ "github.com/docker/containerd/services/execution"
_ "github.com/docker/containerd/services/healthcheck"

View file

@ -104,7 +104,11 @@ func main() {
if err := serveDebugAPI(); err != nil {
return err
}
runtimes, err := loadRuntimes()
monitor, err := loadMonitor()
if err != nil {
return err
}
runtimes, err := loadRuntimes(monitor)
if err != nil {
return err
}
@ -237,7 +241,7 @@ func resolveContentStore() (*content.Store, error) {
return content.NewStore(cp)
}
func loadRuntimes() (map[string]containerd.Runtime, error) {
func loadRuntimes(monitor plugin.ContainerMonitor) (map[string]containerd.Runtime, error) {
o := make(map[string]containerd.Runtime)
for name, rr := range plugin.Registrations() {
if rr.Type != plugin.RuntimePlugin {
@ -248,6 +252,7 @@ func loadRuntimes() (map[string]containerd.Runtime, error) {
Root: conf.Root,
State: conf.State,
Context: log.WithModule(global, fmt.Sprintf("runtime-%s", name)),
Monitor: monitor,
}
if rr.Config != nil {
if err := conf.decodePlugin(name, rr.Config); err != nil {
@ -264,6 +269,30 @@ func loadRuntimes() (map[string]containerd.Runtime, error) {
return o, nil
}
func loadMonitor() (plugin.ContainerMonitor, error) {
var monitors []plugin.ContainerMonitor
for name, m := range plugin.Registrations() {
if m.Type != plugin.ContainerMonitorPlugin {
continue
}
log.G(global).Infof("loading monitor plugin %q...", name)
ic := &plugin.InitContext{
Root: conf.Root,
State: conf.State,
Context: log.WithModule(global, fmt.Sprintf("monitor-%s", name)),
}
mm, err := m.Init(ic)
if err != nil {
return nil, err
}
monitors = append(monitors, mm.(plugin.ContainerMonitor))
}
if len(monitors) == 0 {
return plugin.NewNoopMonitor(), nil
}
return plugin.NewMultiContainerMonitor(monitors...), nil
}
func loadSnapshotter(store *content.Store) (snapshot.Snapshotter, error) {
for name, sr := range plugin.Registrations() {
if sr.Type != plugin.SnapshotPlugin {

View file

@ -113,7 +113,7 @@ func spec(id string, args []string, tty bool) *specs.Spec {
Devices: []specs.LinuxDeviceCgroup{
{
Allow: false,
Access: &rwm,
Access: rwm,
},
},
},

View file

@ -16,6 +16,13 @@ type Container interface {
State(context.Context) (State, error)
}
type LinuxContainer interface {
Container
Pause(context.Context) error
Resume(context.Context) error
}
type ContainerStatus int
const (
@ -32,7 +39,3 @@ type State interface {
// Pid is the main process id for the container
Pid() uint32
}
type ContainerMonitor interface {
Monitor(context.Context, Container) error
}

View file

@ -60,3 +60,13 @@ func (c *Container) State(ctx context.Context) (containerd.State, error) {
status: status,
}, nil
}
func (c *Container) Pause(ctx context.Context) error {
_, err := c.shim.Pause(ctx, &shim.PauseRequest{})
return err
}
func (c *Container) Resume(ctx context.Context) error {
_, err := c.shim.Resume(ctx, &shim.ResumeRequest{})
return err
}

View file

@ -54,6 +54,7 @@ func New(ic *plugin.InitContext) (interface{}, error) {
events: make(chan *containerd.Event, 2048),
eventsContext: c,
eventsCancel: cancel,
monitor: ic.Monitor,
}, nil
}
@ -64,6 +65,7 @@ type Runtime struct {
events chan *containerd.Event
eventsContext context.Context
eventsCancel func()
monitor plugin.ContainerMonitor
}
func (r *Runtime) Create(ctx context.Context, id string, opts containerd.CreateOpts) (containerd.Container, error) {
@ -100,10 +102,15 @@ func (r *Runtime) Create(ctx context.Context, id string, opts containerd.CreateO
os.RemoveAll(path)
return nil, err
}
return &Container{
c := &Container{
id: id,
shim: s,
}, nil
}
// after the container is create add it to the monitor
if err := r.monitor.Monitor(c); err != nil {
return nil, err
}
return c, nil
}
func (r *Runtime) Delete(ctx context.Context, c containerd.Container) (uint32, error) {
@ -111,6 +118,11 @@ func (r *Runtime) Delete(ctx context.Context, c containerd.Container) (uint32, e
if !ok {
return 0, fmt.Errorf("container cannot be cast as *linux.Container")
}
// remove the container from the monitor
if err := r.monitor.Stop(lc); err != nil {
// TODO: log error here
return 0, err
}
rsp, err := lc.shim.Delete(ctx, &shim.DeleteRequest{})
if err != nil {
return 0, err

View file

@ -0,0 +1,70 @@
package cgroups
import (
"github.com/crosbymichael/cgroups"
"github.com/crosbymichael/cgroups/prometheus"
"github.com/docker/containerd"
"github.com/docker/containerd/plugin"
metrics "github.com/docker/go-metrics"
"golang.org/x/net/context"
)
const name = "cgroups"
func init() {
plugin.Register(name, &plugin.Registration{
Type: plugin.ContainerMonitorPlugin,
Init: New,
})
}
func New(ic *plugin.InitContext) (interface{}, error) {
var (
ns = metrics.NewNamespace("container", "", nil)
collector = prometheus.New(ns)
)
oom, err := prometheus.NewOOMCollector(ns)
if err != nil {
return nil, err
}
metrics.Register(ns)
return &cgroupsMonitor{
collector: collector,
oom: oom,
context: ic.Context,
}, nil
}
type cgroupsMonitor struct {
collector *prometheus.Collector
oom *prometheus.OOMCollector
context context.Context
}
func (m *cgroupsMonitor) Monitor(c containerd.Container) error {
// skip non-linux containers
if _, ok := c.(containerd.LinuxContainer); !ok {
return nil
}
id := c.Info().ID
state, err := c.State(m.context)
if err != nil {
return err
}
cg, err := cgroups.Load(cgroups.V1, cgroups.PidPath(int(state.Pid())))
if err != nil {
return err
}
if err := m.collector.Add(id, cg); err != nil {
return err
}
return m.oom.Add(id, cg)
}
func (m *cgroupsMonitor) Stop(c containerd.Container) error {
if _, ok := c.(containerd.LinuxContainer); !ok {
return nil
}
m.collector.Remove(c.Info().ID)
return nil
}

54
plugin/monitor.go Normal file
View file

@ -0,0 +1,54 @@
package plugin
import "github.com/docker/containerd"
// ContainerMonitor provides an interface for monitoring of containers within containerd
type ContainerMonitor interface {
// Monitor adds the provided container to the monitor
Monitor(containerd.Container) error
// Stop stops and removes the provided container from the monitor
Stop(containerd.Container) error
}
func NewMultiContainerMonitor(monitors ...ContainerMonitor) ContainerMonitor {
return &multiContainerMonitor{
monitors: monitors,
}
}
func NewNoopMonitor() ContainerMonitor {
return &noopContainerMonitor{}
}
type noopContainerMonitor struct {
}
func (mm *noopContainerMonitor) Monitor(c containerd.Container) error {
return nil
}
func (mm *noopContainerMonitor) Stop(c containerd.Container) error {
return nil
}
type multiContainerMonitor struct {
monitors []ContainerMonitor
}
func (mm *multiContainerMonitor) Monitor(c containerd.Container) error {
for _, m := range mm.monitors {
if err := m.Monitor(c); err != nil {
return err
}
}
return nil
}
func (mm *multiContainerMonitor) Stop(c containerd.Container) error {
for _, m := range mm.monitors {
if err := m.Stop(c); err != nil {
return err
}
}
return nil
}

View file

@ -18,6 +18,7 @@ const (
RuntimePlugin PluginType = iota + 1
GRPCPlugin
SnapshotPlugin
ContainerMonitorPlugin
)
type Registration struct {
@ -26,6 +27,7 @@ type Registration struct {
Init func(*InitContext) (interface{}, error)
}
// TODO(@crosbymichael): how to we keep this struct from growing but support dependency injection for loaded plugins?
type InitContext struct {
Root string
State string
@ -34,6 +36,7 @@ type InitContext struct {
Snapshotter snapshot.Snapshotter
Config interface{}
Context context.Context
Monitor ContainerMonitor
}
type Service interface {

View file

@ -1,6 +1,7 @@
github.com/crosbymichael/go-runc bd9aef7cf4402a3a8728e3ef83dcca6a5a1be899
github.com/crosbymichael/console 4bf9d88357031b516b3794a2594b6d060a29c59c
github.com/docker/go-metrics 0f35294225552d968a13f9c5bc71a3fa44b2eb87
github.com/crosbymichael/cgroups 74ce513f69d6dbdb355668b4b4ca7ac3d0886664
github.com/docker/go-metrics 8fd5772bf1584597834c6f7961a530f06cbfbb87
github.com/prometheus/client_golang v0.8.0
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
github.com/prometheus/common 195bde7883f7c39ea62b0d92ab7359b5327065cb
@ -11,7 +12,7 @@ github.com/docker/go-units v0.3.1
github.com/gogo/protobuf d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8
github.com/golang/protobuf 8ee79997227bf9b34611aee7946ae64735e6fd93
github.com/opencontainers/runc ce450bcc6c135cae93ee2a99d41a308c179ff6dc
github.com/opencontainers/runtime-spec v1.0.0-rc3
github.com/opencontainers/runtime-spec 035da1dca3dfbb00d752eb58b0b158d6129f3776
github.com/Sirupsen/logrus v0.11.0
github.com/stevvooe/go-btrfs 8539a1d04898663b8eda14982e24b74e7a12388e
github.com/stretchr/testify v1.1.4

24
vendor/github.com/crosbymichael/cgroups/LICENSE generated vendored Normal file
View file

@ -0,0 +1,24 @@
Copyright (c) 2016 Michael Crosby. crosbymichael@gmail.com
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH
THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

135
vendor/github.com/crosbymichael/cgroups/README.md generated vendored Normal file
View file

@ -0,0 +1,135 @@
# cgroups
[![Build Status](https://travis-ci.org/crosbymichael/cgroups.svg?branch=master)](https://travis-ci.org/crosbymichael/cgroups)
[![codecov](https://codecov.io/gh/crosbymichael/cgroups/branch/master/graph/badge.svg)](https://codecov.io/gh/crosbymichael/cgroups)
Go package for creating, managing, inspecting, and destroying cgroups.
The resources format for settings on the cgroup uses the OCI runtime-spec found
[here](https://github.com/opencontainers/runtime-spec).
## Examples
### Create a new cgroup
This creates a new cgroup using a static path for all subsystems under `/test`.
* /sys/fs/cgroup/cpu/test
* /sys/fs/cgroup/memory/test
* etc....
It uses a single hierarchy and specifies cpu shares as a resource constraint and
uses the v1 implementation of cgroups.
```go
shares := uint64(100)
control, err := cgroups.New(cgroups.V1, cgroups.StaticPath("/test"), &specs.LinuxResources{
CPU: &specs.CPU{
Shares: &shares,
},
})
defer control.Delete()
```
### Create with systemd slice support
```go
control, err := cgroups.New(cgroups.Systemd, cgroups.Slice("system.slice", "runc-test"), &specs.LinuxResources{
CPU: &specs.CPU{
Shares: &shares,
},
})
```
### Load an existing cgroup
```go
control, err = cgroups.Load(cgroups.V1, cgroups.StaticPath("/test"))
```
### Add a process to the cgroup
```go
if err := control.Add(cgroups.Process{Pid:1234}); err != nil {
}
```
### Update the cgroup
To update the resources applied in the cgroup
```go
shares = uint64(200)
if err := control.Update(&specs.LinuxResources{
CPU: &specs.CPU{
Shares: &shares,
},
}); err != nil {
}
```
### Freeze and Thaw the cgroup
```go
if err := control.Freeze(); err != nil {
}
if err := control.Thaw(); err != nil {
}
```
### List all processes in the cgroup or recursively
```go
processes, err := control.Processes(cgroups.Devices, recursive)
```
### Get Stats on the cgroup
```go
stats, err := control.Stat()
```
### Move process across cgroups
This allows you to take processes from one cgroup and move them to another.
```go
err := control.MoveTo(destination)
```
### Create subcgroup
```go
subCgroup, err := control.New("child", resources)
```
## LICENSE
Copyright (c) 2016-2017 Michael Crosby. crosbymichael@gmail.com
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH
THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

323
vendor/github.com/crosbymichael/cgroups/blkio.go generated vendored Normal file
View file

@ -0,0 +1,323 @@
package cgroups
import (
"bufio"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"syscall"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
func NewBlkio(root string) *blkioController {
return &blkioController{
root: filepath.Join(root, string(Blkio)),
}
}
type blkioController struct {
root string
}
func (b *blkioController) Name() Name {
return Blkio
}
func (b *blkioController) Path(path string) string {
return filepath.Join(b.root, path)
}
func (b *blkioController) Create(path string, resources *specs.LinuxResources) error {
if err := os.MkdirAll(b.Path(path), defaultDirPerm); err != nil {
return err
}
if resources.BlockIO == nil {
return nil
}
for _, t := range createBlkioSettings(resources.BlockIO) {
if t.value != nil {
if err := ioutil.WriteFile(
filepath.Join(b.Path(path), fmt.Sprintf("blkio.%s", t.name)),
t.format(t.value),
defaultFilePerm,
); err != nil {
return err
}
}
}
return nil
}
func (b *blkioController) Update(path string, resources *specs.LinuxResources) error {
return b.Create(path, resources)
}
func (b *blkioController) Stat(path string, stats *Stats) error {
stats.Blkio = &BlkioStat{}
settings := []blkioStatSettings{
{
name: "throttle.io_serviced",
entry: &stats.Blkio.IoServicedRecursive,
},
{
name: "throttle.io_service_bytes",
entry: &stats.Blkio.IoServiceBytesRecursive,
},
}
// Try to read CFQ stats available on all CFQ enabled kernels first
if _, err := os.Lstat(filepath.Join(b.Path(path), fmt.Sprintf("blkio.io_serviced_recursive"))); err == nil {
settings = append(settings,
blkioStatSettings{
name: "sectors_recursive",
entry: &stats.Blkio.SectorsRecursive,
},
blkioStatSettings{
name: "io_service_bytes_recursive",
entry: &stats.Blkio.IoServiceBytesRecursive,
},
blkioStatSettings{
name: "io_serviced_recursive",
entry: &stats.Blkio.IoServicedRecursive,
},
blkioStatSettings{
name: "io_queued_recursive",
entry: &stats.Blkio.IoQueuedRecursive,
},
blkioStatSettings{
name: "io_service_time_recursive",
entry: &stats.Blkio.IoServiceTimeRecursive,
},
blkioStatSettings{
name: "io_wait_time_recursive",
entry: &stats.Blkio.IoWaitTimeRecursive,
},
blkioStatSettings{
name: "io_merged_recursive",
entry: &stats.Blkio.IoMergedRecursive,
},
blkioStatSettings{
name: "time_recursive",
entry: &stats.Blkio.IoTimeRecursive,
},
)
}
devices, err := getDevices("/dev")
if err != nil {
return err
}
for _, t := range settings {
if err := b.readEntry(devices, path, t.name, t.entry); err != nil {
return err
}
}
return nil
}
func (b *blkioController) readEntry(devices map[deviceKey]string, path, name string, entry *[]BlkioEntry) error {
f, err := os.Open(filepath.Join(b.Path(path), fmt.Sprintf("blkio.%s", name)))
if err != nil {
return err
}
defer f.Close()
sc := bufio.NewScanner(f)
for sc.Scan() {
if err := sc.Err(); err != nil {
return err
}
// format: dev type amount
fields := strings.FieldsFunc(sc.Text(), splitBlkioStatLine)
if len(fields) < 3 {
if len(fields) == 2 && fields[0] == "Total" {
// skip total line
continue
} else {
return fmt.Errorf("Invalid line found while parsing %s: %s", path, sc.Text())
}
}
major, err := strconv.ParseUint(fields[0], 10, 64)
if err != nil {
return err
}
minor, err := strconv.ParseUint(fields[1], 10, 64)
if err != nil {
return err
}
op := ""
valueField := 2
if len(fields) == 4 {
op = fields[2]
valueField = 3
}
v, err := strconv.ParseUint(fields[valueField], 10, 64)
if err != nil {
return err
}
*entry = append(*entry, BlkioEntry{
Device: devices[deviceKey{major, minor}],
Major: major,
Minor: minor,
Op: op,
Value: v,
})
}
return nil
}
func createBlkioSettings(blkio *specs.LinuxBlockIO) []blkioSettings {
settings := []blkioSettings{
{
name: "weight",
value: blkio.Weight,
format: uintf,
},
{
name: "leaf_weight",
value: blkio.LeafWeight,
format: uintf,
},
}
for _, wd := range blkio.WeightDevice {
settings = append(settings,
blkioSettings{
name: "weight_device",
value: wd,
format: weightdev,
},
blkioSettings{
name: "leaf_weight_device",
value: wd,
format: weightleafdev,
})
}
for _, t := range []struct {
name string
list []specs.LinuxThrottleDevice
}{
{
name: "throttle.read_bps_device",
list: blkio.ThrottleReadBpsDevice,
},
{
name: "throttle.read_iops_device",
list: blkio.ThrottleReadIOPSDevice,
},
{
name: "throttle.write_bps_device",
list: blkio.ThrottleWriteBpsDevice,
},
{
name: "throttle.write_iops_device",
list: blkio.ThrottleWriteIOPSDevice,
},
} {
for _, td := range t.list {
settings = append(settings, blkioSettings{
name: t.name,
value: td,
format: throttleddev,
})
}
}
return settings
}
type blkioSettings struct {
name string
value interface{}
format func(v interface{}) []byte
}
type blkioStatSettings struct {
name string
entry *[]BlkioEntry
}
func uintf(v interface{}) []byte {
return []byte(strconv.FormatUint(uint64(*v.(*uint16)), 10))
}
func weightdev(v interface{}) []byte {
wd := v.(specs.LinuxWeightDevice)
return []byte(fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, wd.Weight))
}
func weightleafdev(v interface{}) []byte {
wd := v.(specs.LinuxWeightDevice)
return []byte(fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, wd.LeafWeight))
}
func throttleddev(v interface{}) []byte {
td := v.(specs.LinuxThrottleDevice)
return []byte(fmt.Sprintf("%d:%d %d", td.Major, td.Minor, td.Rate))
}
func splitBlkioStatLine(r rune) bool {
return r == ' ' || r == ':'
}
type deviceKey struct {
major, minor uint64
}
// getDevices makes a best effort attempt to read all the devices into a map
// keyed by major and minor number. Since devices may be mapped multiple times,
// we err on taking the first occurrence.
func getDevices(path string) (map[deviceKey]string, error) {
// TODO(stevvooe): We are ignoring lots of errors. It might be kind of
// challenging to debug this if we aren't mapping devices correctly.
// Consider logging these errors.
devices := map[deviceKey]string{}
if err := filepath.Walk(path, func(p string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
switch {
case fi.IsDir():
switch fi.Name() {
case "pts", "shm", "fd", "mqueue", ".lxc", ".lxd-mounts":
return filepath.SkipDir
default:
return nil
}
case fi.Name() == "console":
return nil
default:
if fi.Mode()&os.ModeDevice == 0 {
// skip non-devices
return nil
}
st, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return fmt.Errorf("%s: unable to convert to system stat", p)
}
key := deviceKey{major(st.Rdev), minor(st.Rdev)}
if _, ok := devices[key]; ok {
return nil // skip it if we have already populated the path.
}
devices[key] = p
}
return nil
}); err != nil {
return nil, err
}
return devices, nil
}
func major(devNumber uint64) uint64 {
return (devNumber >> 8) & 0xfff
}
func minor(devNumber uint64) uint64 {
return (devNumber & 0xff) | ((devNumber >> 12) & 0xfff00)
}

386
vendor/github.com/crosbymichael/cgroups/cgroup.go generated vendored Normal file
View file

@ -0,0 +1,386 @@
package cgroups
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
// New returns a new control via the cgroup cgroups interface
func New(hierarchy Hierarchy, path Path, resources *specs.LinuxResources) (Cgroup, error) {
subsystems, err := hierarchy()
if err != nil {
return nil, err
}
for _, s := range subsystems {
if err := initializeSubsystem(s, path, resources); err != nil {
return nil, err
}
}
return &cgroup{
path: path,
subsystems: subsystems,
}, nil
}
// Load will load an existing cgroup and allow it to be controlled
func Load(hierarchy Hierarchy, path Path) (Cgroup, error) {
subsystems, err := hierarchy()
if err != nil {
return nil, err
}
// check the the subsystems still exist
for _, s := range pathers(subsystems) {
p, err := path(s.Name())
if err != nil {
return nil, err
}
if _, err := os.Lstat(s.Path(p)); err != nil {
if os.IsNotExist(err) {
return nil, ErrCgroupDeleted
}
return nil, err
}
}
return &cgroup{
path: path,
subsystems: subsystems,
}, nil
}
type cgroup struct {
path Path
subsystems []Subsystem
mu sync.Mutex
err error
}
// New returns a new sub cgroup
func (c *cgroup) New(name string, resources *specs.LinuxResources) (Cgroup, error) {
c.mu.Lock()
defer c.mu.Unlock()
if c.err != nil {
return nil, c.err
}
path := subPath(c.path, name)
for _, s := range c.subsystems {
if err := initializeSubsystem(s, path, resources); err != nil {
return nil, err
}
}
return &cgroup{
path: path,
subsystems: c.subsystems,
}, nil
}
// Subsystems returns all the subsystems that are currently being
// consumed by the group
func (c *cgroup) Subsystems() []Subsystem {
return c.subsystems
}
// Add moves the provided process into the new cgroup
func (c *cgroup) Add(process Process) error {
if process.Pid <= 0 {
return ErrInvalidPid
}
c.mu.Lock()
defer c.mu.Unlock()
if c.err != nil {
return c.err
}
return c.add(process)
}
func (c *cgroup) add(process Process) error {
for _, s := range pathers(c.subsystems) {
p, err := c.path(s.Name())
if err != nil {
return err
}
if err := ioutil.WriteFile(
filepath.Join(s.Path(p), cgroupProcs),
[]byte(strconv.Itoa(process.Pid)),
defaultFilePerm,
); err != nil {
return err
}
}
return nil
}
// Delete will remove the control group from each of the subsystems registered
func (c *cgroup) Delete() error {
c.mu.Lock()
defer c.mu.Unlock()
if c.err != nil {
return c.err
}
var errors []string
for _, s := range c.subsystems {
if d, ok := s.(deleter); ok {
sp, err := c.path(s.Name())
if err != nil {
return err
}
if err := d.Delete(sp); err != nil {
errors = append(errors, string(s.Name()))
}
continue
}
if p, ok := s.(pather); ok {
sp, err := c.path(s.Name())
if err != nil {
return err
}
path := p.Path(sp)
if err := remove(path); err != nil {
errors = append(errors, path)
}
}
}
if len(errors) > 0 {
return fmt.Errorf("cgroups: unable to remove paths %s", strings.Join(errors, ", "))
}
c.err = ErrCgroupDeleted
return nil
}
// Stat returns the current stats for the cgroup
func (c *cgroup) Stat(handlers ...ErrorHandler) (*Stats, error) {
c.mu.Lock()
defer c.mu.Unlock()
if c.err != nil {
return nil, c.err
}
if len(handlers) == 0 {
handlers = append(handlers, errPassthrough)
}
var (
stats = &Stats{}
wg = &sync.WaitGroup{}
errs = make(chan error, len(c.subsystems))
)
for _, s := range c.subsystems {
if ss, ok := s.(stater); ok {
sp, err := c.path(s.Name())
if err != nil {
return nil, err
}
wg.Add(1)
go func() {
defer wg.Done()
if err := ss.Stat(sp, stats); err != nil {
for _, eh := range handlers {
if herr := eh(err); herr != nil {
errs <- herr
}
}
}
}()
}
}
wg.Wait()
close(errs)
for err := range errs {
return nil, err
}
return stats, nil
}
// Update updates the cgroup with the new resource values provided
//
// Be prepared to handle EBUSY when trying to update a cgroup with
// live processes and other operations like Stats being performed at the
// same time
func (c *cgroup) Update(resources *specs.LinuxResources) error {
c.mu.Lock()
defer c.mu.Unlock()
if c.err != nil {
return c.err
}
for _, s := range c.subsystems {
if u, ok := s.(updater); ok {
sp, err := c.path(s.Name())
if err != nil {
return err
}
if err := u.Update(sp, resources); err != nil {
return err
}
}
}
return nil
}
// Processes returns the processes running inside the cgroup along
// with the subsystem used, pid, and path
func (c *cgroup) Processes(subsystem Name, recursive bool) ([]Process, error) {
c.mu.Lock()
defer c.mu.Unlock()
if c.err != nil {
return nil, c.err
}
return c.processes(subsystem, recursive)
}
func (c *cgroup) processes(subsystem Name, recursive bool) ([]Process, error) {
s := c.getSubsystem(subsystem)
sp, err := c.path(subsystem)
if err != nil {
return nil, err
}
path := s.(pather).Path(sp)
var processes []Process
err = filepath.Walk(path, func(p string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !recursive && info.IsDir() {
return filepath.SkipDir
}
dir, name := filepath.Split(p)
if name != cgroupProcs {
return nil
}
procs, err := readPids(dir, subsystem)
if err != nil {
return err
}
processes = append(processes, procs...)
return nil
})
return processes, err
}
// Freeze freezes the entire cgroup and all the processes inside it
func (c *cgroup) Freeze() error {
c.mu.Lock()
defer c.mu.Unlock()
if c.err != nil {
return c.err
}
s := c.getSubsystem(Freezer)
if s == nil {
return ErrFreezerNotSupported
}
sp, err := c.path(Freezer)
if err != nil {
return err
}
return s.(*freezerController).Freeze(sp)
}
// Thaw thaws out the cgroup and all the processes inside it
func (c *cgroup) Thaw() error {
c.mu.Lock()
defer c.mu.Unlock()
if c.err != nil {
return c.err
}
s := c.getSubsystem(Freezer)
if s == nil {
return ErrFreezerNotSupported
}
sp, err := c.path(Freezer)
if err != nil {
return err
}
return s.(*freezerController).Thaw(sp)
}
// OOMEventFD returns the memory cgroup's out of memory event fd that triggers
// when processes inside the cgroup receive an oom event
func (c *cgroup) OOMEventFD() (uintptr, error) {
c.mu.Lock()
defer c.mu.Unlock()
if c.err != nil {
return 0, c.err
}
s := c.getSubsystem(Memory)
if s == nil {
return 0, ErrMemoryNotSupported
}
sp, err := c.path(Memory)
if err != nil {
return 0, err
}
return s.(*memoryController).OOMEventFD(sp)
}
// State returns the state of the cgroup and its processes
func (c *cgroup) State() State {
c.mu.Lock()
defer c.mu.Unlock()
c.checkExists()
if c.err != nil && c.err == ErrCgroupDeleted {
return Deleted
}
s := c.getSubsystem(Freezer)
if s == nil {
return Thawed
}
sp, err := c.path(Freezer)
if err != nil {
return Unknown
}
state, err := s.(*freezerController).state(sp)
if err != nil {
return Unknown
}
return state
}
// MoveTo does a recursive move subsystem by subsystem of all the processes
// inside the group
func (c *cgroup) MoveTo(destination Cgroup) error {
c.mu.Lock()
defer c.mu.Unlock()
if c.err != nil {
return c.err
}
for _, s := range c.subsystems {
processes, err := c.processes(s.Name(), true)
if err != nil {
return err
}
for _, p := range processes {
if err := destination.Add(p); err != nil {
return err
}
}
}
return nil
}
func (c *cgroup) getSubsystem(n Name) Subsystem {
for _, s := range c.subsystems {
if s.Name() == n {
return s
}
}
return nil
}
func (c *cgroup) checkExists() {
for _, s := range pathers(c.subsystems) {
p, err := c.path(s.Name())
if err != nil {
return
}
if _, err := os.Lstat(s.Path(p)); err != nil {
if os.IsNotExist(err) {
c.err = ErrCgroupDeleted
return
}
}
}
}

58
vendor/github.com/crosbymichael/cgroups/control.go generated vendored Normal file
View file

@ -0,0 +1,58 @@
package cgroups
import (
"os"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
const (
cgroupProcs = "cgroup.procs"
defaultDirPerm = 0755
)
// defaultFilePerm is a var so that the test framework can change the filemode
// of all files created when the tests are running. The difference between the
// tests and real world use is that files like "cgroup.procs" will exist when writing
// to a read cgroup filesystem and do not exist prior when running in the tests.
// this is set to a non 0 value in the test code
var defaultFilePerm = os.FileMode(0)
type Process struct {
// Subsystem is the name of the subsystem that the process is in
Subsystem Name
// Pid is the process id of the process
Pid int
// Path is the full path of the subsystem and location that the process is in
Path string
}
// Cgroup handles interactions with the individual groups to perform
// actions on them as them main interface to this cgroup package
type Cgroup interface {
// New creates a new cgroup under the calling cgroup
New(string, *specs.LinuxResources) (Cgroup, error)
// Add adds a process to the cgroup
Add(Process) error
// Delete removes the cgroup as a whole
Delete() error
// MoveTo moves all the processes under the calling cgroup to the provided one
// subsystems are moved one at a time
MoveTo(Cgroup) error
// Stat returns the stats for all subsystems in the cgroup
Stat(...ErrorHandler) (*Stats, error)
// Update updates all the subsystems with the provided resource changes
Update(resources *specs.LinuxResources) error
// Processes returns all the processes in a select subsystem for the cgroup
Processes(Name, bool) ([]Process, error)
// Freeze freezes or pauses all processes inside the cgroup
Freeze() error
// Thaw thaw or resumes all processes inside the cgroup
Thaw() error
// OOMEventFD returns the memory subsystem's event fd for OOM events
OOMEventFD() (uintptr, error)
// State returns the cgroups current state
State() State
// Subsystems returns all the subsystems in the cgroup
Subsystems() []Subsystem
}

120
vendor/github.com/crosbymichael/cgroups/cpu.go generated vendored Normal file
View file

@ -0,0 +1,120 @@
package cgroups
import (
"bufio"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
func NewCpu(root string) *cpuController {
return &cpuController{
root: filepath.Join(root, string(Cpu)),
}
}
type cpuController struct {
root string
}
func (c *cpuController) Name() Name {
return Cpu
}
func (c *cpuController) Path(path string) string {
return filepath.Join(c.root, path)
}
func (c *cpuController) Create(path string, resources *specs.LinuxResources) error {
if err := os.MkdirAll(c.Path(path), defaultDirPerm); err != nil {
return err
}
if cpu := resources.CPU; cpu != nil {
for _, t := range []struct {
name string
ivalue *int64
uvalue *uint64
}{
{
name: "rt_period_us",
uvalue: cpu.RealtimePeriod,
},
{
name: "rt_runtime_us",
ivalue: cpu.RealtimeRuntime,
},
{
name: "shares",
uvalue: cpu.Shares,
},
{
name: "cfs_period_us",
uvalue: cpu.Period,
},
{
name: "cfs_quota_us",
ivalue: cpu.Quota,
},
} {
var value []byte
if t.uvalue != nil {
value = []byte(strconv.FormatUint(*t.uvalue, 10))
} else if t.ivalue != nil {
value = []byte(strconv.FormatInt(*t.ivalue, 10))
}
if value != nil {
if err := ioutil.WriteFile(
filepath.Join(c.Path(path), fmt.Sprintf("cpu.%s", t.name)),
value,
defaultFilePerm,
); err != nil {
return err
}
}
}
}
return nil
}
func (c *cpuController) Update(path string, resources *specs.LinuxResources) error {
return c.Create(path, resources)
}
func (c *cpuController) Stat(path string, stats *Stats) error {
f, err := os.Open(filepath.Join(c.Path(path), "cpu.stat"))
if err != nil {
return err
}
defer f.Close()
// get or create the cpu field because cpuacct can also set values on this struct
stats.cpuMu.Lock()
cpu := stats.Cpu
if cpu == nil {
cpu = &CpuStat{}
stats.Cpu = cpu
}
stats.cpuMu.Unlock()
sc := bufio.NewScanner(f)
for sc.Scan() {
if err := sc.Err(); err != nil {
return err
}
key, v, err := parseKV(sc.Text())
if err != nil {
return err
}
switch key {
case "nr_periods":
cpu.Throttling.Periods = v
case "nr_throttled":
cpu.Throttling.ThrottledPeriods = v
case "throttled_time":
cpu.Throttling.ThrottledTime = v
}
}
return nil
}

112
vendor/github.com/crosbymichael/cgroups/cpuacct.go generated vendored Normal file
View file

@ -0,0 +1,112 @@
package cgroups
import (
"fmt"
"io/ioutil"
"path/filepath"
"strconv"
"strings"
)
const nanosecondsInSecond = 1000000000
var clockTicks = getClockTicks()
func NewCpuacct(root string) *cpuacctController {
return &cpuacctController{
root: filepath.Join(root, string(Cpuacct)),
}
}
type cpuacctController struct {
root string
}
func (c *cpuacctController) Name() Name {
return Cpuacct
}
func (c *cpuacctController) Path(path string) string {
return filepath.Join(c.root, path)
}
func (c *cpuacctController) Stat(path string, stats *Stats) error {
user, kernel, err := c.getUsage(path)
if err != nil {
return err
}
total, err := readUint(filepath.Join(c.Path(path), "cpuacct.usage"))
if err != nil {
return err
}
percpu, err := c.percpuUsage(path)
if err != nil {
return err
}
stats.cpuMu.Lock()
cpu := stats.Cpu
if cpu == nil {
cpu = &CpuStat{}
stats.Cpu = cpu
}
stats.cpuMu.Unlock()
cpu.Usage.Total = total
cpu.Usage.User = user
cpu.Usage.Kernel = kernel
cpu.Usage.PerCpu = percpu
return nil
}
func (c *cpuacctController) percpuUsage(path string) ([]uint64, error) {
var usage []uint64
data, err := ioutil.ReadFile(filepath.Join(c.Path(path), "cpuacct.usage_percpu"))
if err != nil {
return nil, err
}
for _, v := range strings.Fields(string(data)) {
u, err := strconv.ParseUint(v, 10, 64)
if err != nil {
return nil, err
}
usage = append(usage, u)
}
return usage, nil
}
func (c *cpuacctController) getUsage(path string) (user uint64, kernel uint64, err error) {
statPath := filepath.Join(c.Path(path), "cpuacct.stat")
data, err := ioutil.ReadFile(statPath)
if err != nil {
return 0, 0, err
}
fields := strings.Fields(string(data))
if len(fields) != 4 {
return 0, 0, fmt.Errorf("%q is expected to have 4 fields", statPath)
}
for _, t := range []struct {
index int
name string
value *uint64
}{
{
index: 0,
name: "user",
value: &user,
},
{
index: 2,
name: "system",
value: &kernel,
},
} {
if fields[t.index] != t.name {
return 0, 0, fmt.Errorf("expected field %q but found %q in %q", t.name, fields[t.index], statPath)
}
v, err := strconv.ParseUint(fields[t.index+1], 10, 64)
if err != nil {
return 0, 0, err
}
*t.value = v
}
return (user * nanosecondsInSecond) / clockTicks, (kernel * nanosecondsInSecond) / clockTicks, nil
}

140
vendor/github.com/crosbymichael/cgroups/cpuset.go generated vendored Normal file
View file

@ -0,0 +1,140 @@
package cgroups
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path/filepath"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
func NewCputset(root string) *cpusetController {
return &cpusetController{
root: filepath.Join(root, string(Cpuset)),
}
}
type cpusetController struct {
root string
}
func (c *cpusetController) Name() Name {
return Cpuset
}
func (c *cpusetController) Path(path string) string {
return filepath.Join(c.root, path)
}
func (c *cpusetController) Create(path string, resources *specs.LinuxResources) error {
if err := c.ensureParent(c.Path(path), c.root); err != nil {
return err
}
if err := os.MkdirAll(c.Path(path), defaultDirPerm); err != nil {
return err
}
if err := c.copyIfNeeded(c.Path(path), filepath.Dir(c.Path(path))); err != nil {
return err
}
if resources.CPU != nil {
for _, t := range []struct {
name string
value *string
}{
{
name: "cpus",
value: &resources.CPU.Cpus,
},
{
name: "mems",
value: &resources.CPU.Mems,
},
} {
if t.value != nil {
if err := ioutil.WriteFile(
filepath.Join(c.Path(path), fmt.Sprintf("cpuset.%s", t.name)),
[]byte(*t.value),
defaultFilePerm,
); err != nil {
return err
}
}
}
}
return nil
}
func (c *cpusetController) getValues(path string) (cpus []byte, mems []byte, err error) {
if cpus, err = ioutil.ReadFile(filepath.Join(path, "cpuset.cpus")); err != nil && !os.IsNotExist(err) {
return
}
if mems, err = ioutil.ReadFile(filepath.Join(path, "cpuset.mems")); err != nil && !os.IsNotExist(err) {
return
}
return cpus, mems, nil
}
// ensureParent makes sure that the parent directory of current is created
// and populated with the proper cpus and mems files copied from
// it's parent.
func (c *cpusetController) ensureParent(current, root string) error {
parent := filepath.Dir(current)
if _, err := filepath.Rel(root, parent); err != nil {
return nil
}
if cleanPath(parent) == root {
return nil
}
// Avoid infinite recursion.
if parent == current {
return fmt.Errorf("cpuset: cgroup parent path outside cgroup root")
}
if err := c.ensureParent(parent, root); err != nil {
return err
}
if err := os.MkdirAll(current, defaultDirPerm); err != nil {
return err
}
return c.copyIfNeeded(current, parent)
}
// copyIfNeeded copies the cpuset.cpus and cpuset.mems from the parent
// directory to the current directory if the file's contents are 0
func (c *cpusetController) copyIfNeeded(current, parent string) error {
var (
err error
currentCpus, currentMems []byte
parentCpus, parentMems []byte
)
if currentCpus, currentMems, err = c.getValues(current); err != nil {
return err
}
if parentCpus, parentMems, err = c.getValues(parent); err != nil {
return err
}
if isEmpty(currentCpus) {
if err := ioutil.WriteFile(
filepath.Join(current, "cpuset.cpus"),
parentCpus,
defaultFilePerm,
); err != nil {
return err
}
}
if isEmpty(currentMems) {
if err := ioutil.WriteFile(
filepath.Join(current, "cpuset.mems"),
parentMems,
defaultFilePerm,
); err != nil {
return err
}
}
return nil
}
func isEmpty(b []byte) bool {
return len(bytes.Trim(b, "\n")) == 0
}

74
vendor/github.com/crosbymichael/cgroups/devices.go generated vendored Normal file
View file

@ -0,0 +1,74 @@
package cgroups
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
const (
allowDeviceFile = "devices.allow"
denyDeviceFile = "devices.deny"
wildcard = -1
)
func NewDevices(root string) *devicesController {
return &devicesController{
root: filepath.Join(root, string(Devices)),
}
}
type devicesController struct {
root string
}
func (d *devicesController) Name() Name {
return Devices
}
func (d *devicesController) Path(path string) string {
return filepath.Join(d.root, path)
}
func (d *devicesController) Create(path string, resources *specs.LinuxResources) error {
if err := os.MkdirAll(d.Path(path), defaultDirPerm); err != nil {
return err
}
for _, device := range resources.Devices {
file := denyDeviceFile
if device.Allow {
file = allowDeviceFile
}
if err := ioutil.WriteFile(
filepath.Join(d.Path(path), file),
[]byte(deviceString(device)),
defaultFilePerm,
); err != nil {
return err
}
}
return nil
}
func (d *devicesController) Update(path string, resources *specs.LinuxResources) error {
return d.Create(path, resources)
}
func deviceString(device specs.LinuxDeviceCgroup) string {
return fmt.Sprintf("%c %s:%s %s",
&device.Type,
deviceNumber(device.Major),
deviceNumber(device.Minor),
&device.Access,
)
}
func deviceNumber(number *int64) string {
if number == nil || *number == wildcard {
return "*"
}
return fmt.Sprint(*number)
}

31
vendor/github.com/crosbymichael/cgroups/errors.go generated vendored Normal file
View file

@ -0,0 +1,31 @@
package cgroups
import (
"errors"
"os"
)
var (
ErrInvalidPid = errors.New("cgroups: pid must be greater than 0")
ErrMountPointNotExist = errors.New("cgroups: cgroup mountpoint does not exist")
ErrInvalidFormat = errors.New("cgroups: parsing file with invalid format failed")
ErrFreezerNotSupported = errors.New("cgroups: freezer cgroup not supported on this system")
ErrMemoryNotSupported = errors.New("cgroups: memory cgroup not supported on this system")
ErrCgroupDeleted = errors.New("cgroups: cgroup deleted")
ErrNoCgroupMountDestination = errors.New("cgroups: cannot found cgroup mount destination")
)
// ErrorHandler is a function that handles and acts on errors
type ErrorHandler func(err error) error
// IgnoreNotExist ignores any errors that are for not existing files
func IgnoreNotExist(err error) error {
if os.IsNotExist(err) {
return nil
}
return err
}
func errPassthrough(err error) error {
return err
}

69
vendor/github.com/crosbymichael/cgroups/freezer.go generated vendored Normal file
View file

@ -0,0 +1,69 @@
package cgroups
import (
"io/ioutil"
"path/filepath"
"strings"
"time"
)
func NewFreezer(root string) *freezerController {
return &freezerController{
root: filepath.Join(root, string(Freezer)),
}
}
type freezerController struct {
root string
}
func (f *freezerController) Name() Name {
return Freezer
}
func (f *freezerController) Path(path string) string {
return filepath.Join(f.root, path)
}
func (f *freezerController) Freeze(path string) error {
if err := f.changeState(path, Frozen); err != nil {
return err
}
return f.waitState(path, Frozen)
}
func (f *freezerController) Thaw(path string) error {
if err := f.changeState(path, Thawed); err != nil {
return err
}
return f.waitState(path, Thawed)
}
func (f *freezerController) changeState(path string, state State) error {
return ioutil.WriteFile(
filepath.Join(f.root, path, "freezer.state"),
[]byte(strings.ToUpper(string(state))),
defaultFilePerm,
)
}
func (f *freezerController) state(path string) (State, error) {
current, err := ioutil.ReadFile(filepath.Join(f.root, path, "freezer.state"))
if err != nil {
return "", err
}
return State(strings.ToLower(strings.TrimSpace(string(current)))), nil
}
func (f *freezerController) waitState(path string, state State) error {
for {
current, err := f.state(path)
if err != nil {
return err
}
if current == state {
return nil
}
time.Sleep(1 * time.Millisecond)
}
}

4
vendor/github.com/crosbymichael/cgroups/hierarchy.go generated vendored Normal file
View file

@ -0,0 +1,4 @@
package cgroups
// Hierarchy enableds both unified and split hierarchy for cgroups
type Hierarchy func() ([]Subsystem, error)

92
vendor/github.com/crosbymichael/cgroups/hugetlb.go generated vendored Normal file
View file

@ -0,0 +1,92 @@
package cgroups
import (
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
func NewHugetlb(root string) (*hugetlbController, error) {
sizes, err := hugePageSizes()
if err != nil {
return nil, nil
}
return &hugetlbController{
root: filepath.Join(root, string(Hugetlb)),
sizes: sizes,
}, nil
}
type hugetlbController struct {
root string
sizes []string
}
func (h *hugetlbController) Name() Name {
return Hugetlb
}
func (h *hugetlbController) Path(path string) string {
return filepath.Join(h.root, path)
}
func (h *hugetlbController) Create(path string, resources *specs.LinuxResources) error {
if err := os.MkdirAll(h.Path(path), defaultDirPerm); err != nil {
return err
}
for _, limit := range resources.HugepageLimits {
if err := ioutil.WriteFile(
filepath.Join(h.Path(path), strings.Join([]string{"hugetlb", limit.Pagesize, "limit_in_bytes"}, ".")),
[]byte(strconv.FormatUint(limit.Limit, 10)),
defaultFilePerm,
); err != nil {
return err
}
}
return nil
}
func (h *hugetlbController) Stat(path string, stats *Stats) error {
stats.Hugetlb = make(map[string]HugetlbStat)
for _, size := range h.sizes {
s, err := h.readSizeStat(path, size)
if err != nil {
return err
}
stats.Hugetlb[size] = s
}
return nil
}
func (h *hugetlbController) readSizeStat(path, size string) (HugetlbStat, error) {
var s HugetlbStat
for _, t := range []struct {
name string
value *uint64
}{
{
name: "usage_in_bytes",
value: &s.Usage,
},
{
name: "max_usage_in_bytes",
value: &s.Max,
},
{
name: "failcnt",
value: &s.Failcnt,
},
} {
v, err := readUint(filepath.Join(h.Path(path), strings.Join([]string{"hugetlb", size, t.name}, ".")))
if err != nil {
return s, err
}
*t.value = v
}
return s, nil
}

302
vendor/github.com/crosbymichael/cgroups/memory.go generated vendored Normal file
View file

@ -0,0 +1,302 @@
package cgroups
import (
"bufio"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"syscall"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
func NewMemory(root string) *memoryController {
return &memoryController{
root: filepath.Join(root, string(Memory)),
}
}
type memoryController struct {
root string
}
func (m *memoryController) Name() Name {
return Memory
}
func (m *memoryController) Path(path string) string {
return filepath.Join(m.root, path)
}
func (m *memoryController) Create(path string, resources *specs.LinuxResources) error {
if err := os.MkdirAll(m.Path(path), defaultDirPerm); err != nil {
return err
}
if resources.Memory == nil {
return nil
}
if resources.Memory.Kernel != nil {
// Check if kernel memory is enabled
// We have to limit the kernel memory here as it won't be accounted at all
// until a limit is set on the cgroup and limit cannot be set once the
// cgroup has children, or if there are already tasks in the cgroup.
for _, i := range []int64{1, -1} {
if err := ioutil.WriteFile(
filepath.Join(m.Path(path), "memory.kmem.limit_in_bytes"),
[]byte(strconv.FormatInt(i, 10)),
defaultFilePerm,
); err != nil {
return checkEBUSY(err)
}
}
}
return m.set(path, getMemorySettings(resources))
}
func (m *memoryController) Update(path string, resources *specs.LinuxResources) error {
if resources.Memory == nil {
return nil
}
g := func(v *uint64) bool {
return v != nil && *v > 0
}
settings := getMemorySettings(resources)
if g(resources.Memory.Limit) && g(resources.Memory.Swap) {
// if the updated swap value is larger than the current memory limit set the swap changes first
// then set the memory limit as swap must always be larger than the current limit
current, err := readUint(filepath.Join(m.Path(path), "memory.limit_in_bytes"))
if err != nil {
return err
}
if current < uint64(*resources.Memory.Swap) {
settings[0], settings[1] = settings[1], settings[0]
}
}
return m.set(path, settings)
}
func (m *memoryController) Stat(path string, stats *Stats) error {
f, err := os.Open(filepath.Join(m.Path(path), "memory.stat"))
if err != nil {
return err
}
defer f.Close()
stats.Memory = &MemoryStat{}
if err := m.parseStats(f, stats.Memory); err != nil {
return err
}
for _, t := range []struct {
module string
entry *MemoryEntry
}{
{
module: "",
entry: &stats.Memory.Usage,
},
{
module: "memsw",
entry: &stats.Memory.Swap,
},
{
module: "kmem",
entry: &stats.Memory.Kernel,
},
{
module: "kmem.tcp",
entry: &stats.Memory.KernelTCP,
},
} {
for _, tt := range []struct {
name string
value *uint64
}{
{
name: "usage_in_bytes",
value: &t.entry.Usage,
},
{
name: "max_usage_in_bytes",
value: &t.entry.Max,
},
{
name: "failcnt",
value: &t.entry.Failcnt,
},
{
name: "limit_in_bytes",
value: &t.entry.Limit,
},
} {
parts := []string{"memory"}
if t.module != "" {
parts = append(parts, t.module)
}
parts = append(parts, tt.name)
v, err := readUint(filepath.Join(m.Path(path), strings.Join(parts, ".")))
if err != nil {
return err
}
*tt.value = v
}
}
return nil
}
func (m *memoryController) OOMEventFD(path string) (uintptr, error) {
root := m.Path(path)
f, err := os.Open(filepath.Join(root, "memory.oom_control"))
if err != nil {
return 0, err
}
defer f.Close()
fd, _, serr := syscall.RawSyscall(syscall.SYS_EVENTFD2, 0, syscall.FD_CLOEXEC, 0)
if serr != 0 {
return 0, serr
}
if err := writeEventFD(root, f.Fd(), fd); err != nil {
syscall.Close(int(fd))
return 0, err
}
return fd, nil
}
func writeEventFD(root string, cfd, efd uintptr) error {
f, err := os.OpenFile(filepath.Join(root, "cgroup.event_control"), os.O_WRONLY, 0)
if err != nil {
return err
}
_, err = f.WriteString(fmt.Sprintf("%d %d", efd, cfd))
f.Close()
return err
}
func (m *memoryController) parseStats(r io.Reader, stat *MemoryStat) error {
var (
raw = make(map[string]uint64)
sc = bufio.NewScanner(r)
line int
)
for sc.Scan() {
if err := sc.Err(); err != nil {
return err
}
key, v, err := parseKV(sc.Text())
if err != nil {
return fmt.Errorf("%d: %v", line, err)
}
raw[key] = v
line++
}
stat.Cache = raw["cache"]
stat.RSS = raw["rss"]
stat.RSSHuge = raw["rss_huge"]
stat.MappedFile = raw["mapped_file"]
stat.Dirty = raw["dirty"]
stat.Writeback = raw["writeback"]
stat.PgPgIn = raw["pgpgin"]
stat.PgPgOut = raw["pgpgout"]
stat.PgFault = raw["pgfault"]
stat.PgMajFault = raw["pgmajfault"]
stat.InactiveAnon = raw["inactive_anon"]
stat.ActiveAnon = raw["active_anon"]
stat.InactiveFile = raw["inactive_file"]
stat.ActiveFile = raw["active_file"]
stat.Unevictable = raw["unevictable"]
stat.HierarchicalMemoryLimit = raw["hierarchical_memory_limit"]
stat.HierarchicalSwapLimit = raw["hierarchical_memsw_limit"]
stat.TotalCache = raw["total_cache"]
stat.TotalRSS = raw["total_rss"]
stat.TotalRSSHuge = raw["total_rss_huge"]
stat.TotalMappedFile = raw["total_mapped_file"]
stat.TotalDirty = raw["total_dirty"]
stat.TotalWriteback = raw["total_writeback"]
stat.TotalPgPgIn = raw["total_pgpgin"]
stat.TotalPgPgOut = raw["total_pgpgout"]
stat.TotalPgFault = raw["total_pgfault"]
stat.TotalPgMajFault = raw["total_pgmajfault"]
stat.TotalInactiveAnon = raw["total_inactive_anon"]
stat.TotalActiveAnon = raw["total_active_anon"]
stat.TotalInactiveFile = raw["total_inactive_file"]
stat.TotalActiveFile = raw["total_active_file"]
stat.TotalUnevictable = raw["total_unevictable"]
return nil
}
func (m *memoryController) set(path string, settings []memorySettings) error {
for _, t := range settings {
if t.value != nil {
if err := ioutil.WriteFile(
filepath.Join(m.Path(path), fmt.Sprintf("memory.%s", t.name)),
[]byte(strconv.FormatUint(*t.value, 10)),
defaultFilePerm,
); err != nil {
return err
}
}
}
return nil
}
type memorySettings struct {
name string
value *uint64
}
func getMemorySettings(resources *specs.LinuxResources) []memorySettings {
mem := resources.Memory
var swappiness *uint64
if mem.Swappiness != nil {
v := uint64(*mem.Swappiness)
swappiness = &v
}
return []memorySettings{
{
name: "limit_in_bytes",
value: mem.Limit,
},
{
name: "memsw.limit_in_bytes",
value: mem.Swap,
},
{
name: "kmem.limit_in_bytes",
value: mem.Kernel,
},
{
name: "kmem.tcp.limit_in_bytes",
value: mem.KernelTCP,
},
{
name: "oom_control",
value: getOomControlValue(resources),
},
{
name: "swappiness",
value: swappiness,
},
}
}
func checkEBUSY(err error) error {
if pathErr, ok := err.(*os.PathError); ok {
if errNo, ok := pathErr.Err.(syscall.Errno); ok {
if errNo == syscall.EBUSY {
return fmt.Errorf(
"failed to set memory.kmem.limit_in_bytes, because either tasks have already joined this cgroup or it has children")
}
}
}
return err
}
func getOomControlValue(resources *specs.LinuxResources) *uint64 {
if resources.DisableOOMKiller != nil && *resources.DisableOOMKiller {
i := uint64(1)
return &i
}
return nil
}

23
vendor/github.com/crosbymichael/cgroups/named.go generated vendored Normal file
View file

@ -0,0 +1,23 @@
package cgroups
import "path/filepath"
func NewNamed(root string, name Name) *namedController {
return &namedController{
root: root,
name: name,
}
}
type namedController struct {
root string
name Name
}
func (n *namedController) Name() Name {
return n.name
}
func (n *namedController) Path(path string) string {
return filepath.Join(n.root, string(n.name), path)
}

42
vendor/github.com/crosbymichael/cgroups/net_cls.go generated vendored Normal file
View file

@ -0,0 +1,42 @@
package cgroups
import (
"io/ioutil"
"os"
"path/filepath"
"strconv"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
func NewNetCls(root string) *netclsController {
return &netclsController{
root: filepath.Join(root, string(NetCLS)),
}
}
type netclsController struct {
root string
}
func (n *netclsController) Name() Name {
return NetCLS
}
func (n *netclsController) Path(path string) string {
return filepath.Join(n.root, path)
}
func (n *netclsController) Create(path string, resources *specs.LinuxResources) error {
if err := os.MkdirAll(n.Path(path), defaultDirPerm); err != nil {
return err
}
if resources.Network != nil && resources.Network.ClassID != nil && *resources.Network.ClassID > 0 {
return ioutil.WriteFile(
filepath.Join(n.Path(path), "net_cls_classid_u"),
[]byte(strconv.FormatUint(uint64(*resources.Network.ClassID), 10)),
defaultFilePerm,
)
}
return nil
}

50
vendor/github.com/crosbymichael/cgroups/net_prio.go generated vendored Normal file
View file

@ -0,0 +1,50 @@
package cgroups
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
func NewNetPrio(root string) *netprioController {
return &netprioController{
root: filepath.Join(root, string(NetPrio)),
}
}
type netprioController struct {
root string
}
func (n *netprioController) Name() Name {
return NetPrio
}
func (n *netprioController) Path(path string) string {
return filepath.Join(n.root, path)
}
func (n *netprioController) Create(path string, resources *specs.LinuxResources) error {
if err := os.MkdirAll(n.Path(path), defaultDirPerm); err != nil {
return err
}
if resources.Network != nil {
for _, prio := range resources.Network.Priorities {
if err := ioutil.WriteFile(
filepath.Join(n.Path(path), "net_prio_ifpriomap"),
formatPrio(prio.Name, prio.Priority),
defaultFilePerm,
); err != nil {
return err
}
}
}
return nil
}
func formatPrio(name string, prio uint32) []byte {
return []byte(fmt.Sprintf("%s %d", name, prio))
}

85
vendor/github.com/crosbymichael/cgroups/paths.go generated vendored Normal file
View file

@ -0,0 +1,85 @@
package cgroups
import (
"fmt"
"path/filepath"
)
type Path func(subsystem Name) (string, error)
func RootPath(subsysem Name) (string, error) {
return "/", nil
}
// StaticPath returns a static path to use for all cgroups
func StaticPath(path string) Path {
return func(_ Name) (string, error) {
return path, nil
}
}
// NestedPath will nest the cgroups based on the calling processes cgroup
// placing its child processes inside its own path
func NestedPath(suffix string) Path {
paths, err := parseCgroupFile("/proc/self/cgroup")
if err != nil {
return errorPath(err)
}
return existingPath(paths, suffix)
}
// PidPath will return the correct cgroup paths for an existing process running inside a cgroup
// This is commonly used for the Load function to restore an existing container
func PidPath(pid int) Path {
paths, err := parseCgroupFile(fmt.Sprintf("/proc/%d/cgroup", pid))
if err != nil {
return errorPath(err)
}
return existingPath(paths, "")
}
func existingPath(paths map[string]string, suffix string) Path {
// localize the paths based on the root mount dest for nested cgroups
for n, p := range paths {
dest, err := getCgroupDestination(string(n))
if err != nil {
return errorPath(err)
}
rel, err := filepath.Rel(dest, p)
if err != nil {
return errorPath(err)
}
if rel == "." {
rel = dest
}
paths[n] = filepath.Join("/", rel)
}
return func(name Name) (string, error) {
root, ok := paths[string(name)]
if !ok {
if root, ok = paths[fmt.Sprintf("name=%s", name)]; !ok {
return "", fmt.Errorf("unable to find %q in controller set", name)
}
}
if suffix != "" {
return filepath.Join(root, suffix), nil
}
return root, nil
}
}
func subPath(path Path, subName string) Path {
return func(name Name) (string, error) {
p, err := path(name)
if err != nil {
return "", err
}
return filepath.Join(p, subName), nil
}
}
func errorPath(err error) Path {
return func(_ Name) (string, error) {
return "", err
}
}

21
vendor/github.com/crosbymichael/cgroups/perf_event.go generated vendored Normal file
View file

@ -0,0 +1,21 @@
package cgroups
import "path/filepath"
func NewPerfEvent(root string) *PerfEventController {
return &PerfEventController{
root: filepath.Join(root, string(PerfEvent)),
}
}
type PerfEventController struct {
root string
}
func (p *PerfEventController) Name() Name {
return PerfEvent
}
func (p *PerfEventController) Path(path string) string {
return filepath.Join(p.root, path)
}

69
vendor/github.com/crosbymichael/cgroups/pids.go generated vendored Normal file
View file

@ -0,0 +1,69 @@
package cgroups
import (
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
func NewPids(root string) *pidsController {
return &pidsController{
root: filepath.Join(root, string(Pids)),
}
}
type pidsController struct {
root string
}
func (p *pidsController) Name() Name {
return Pids
}
func (p *pidsController) Path(path string) string {
return filepath.Join(p.root, path)
}
func (p *pidsController) Create(path string, resources *specs.LinuxResources) error {
if err := os.MkdirAll(p.Path(path), defaultDirPerm); err != nil {
return err
}
if resources.Pids != nil && resources.Pids.Limit > 0 {
return ioutil.WriteFile(
filepath.Join(p.Path(path), "pids.max"),
[]byte(strconv.FormatInt(resources.Pids.Limit, 10)),
defaultFilePerm,
)
}
return nil
}
func (p *pidsController) Update(path string, resources *specs.LinuxResources) error {
return p.Create(path, resources)
}
func (p *pidsController) Stat(path string, stats *Stats) error {
current, err := readUint(filepath.Join(p.Path(path), "pids.current"))
if err != nil {
return err
}
var max uint64
maxData, err := ioutil.ReadFile(filepath.Join(p.Path(path), "pids.max"))
if err != nil {
return err
}
if maxS := strings.TrimSpace(string(maxData)); maxS != "max" {
if max, err = parseUint(maxS, 10, 64); err != nil {
return err
}
}
stats.Pids = &PidsStat{
Current: current,
Limit: max,
}
return nil
}

View file

@ -0,0 +1,101 @@
package prometheus
import (
"github.com/crosbymichael/cgroups"
metrics "github.com/docker/go-metrics"
"github.com/prometheus/client_golang/prometheus"
)
var blkioMetrics = []*metric{
{
name: "blkio_io_merged_recursive",
help: "The blkio io merged recursive",
unit: metrics.Total,
vt: prometheus.GaugeValue,
labels: []string{"op", "device", "major", "minor"},
getValues: func(stats *cgroups.Stats) []value {
if stats.Blkio == nil {
return nil
}
return blkioValues(stats.Blkio.IoMergedRecursive)
},
},
{
name: "blkio_io_queued_recursive",
help: "The blkio io queued recursive",
unit: metrics.Total,
vt: prometheus.GaugeValue,
labels: []string{"op", "device", "major", "minor"},
getValues: func(stats *cgroups.Stats) []value {
if stats.Blkio == nil {
return nil
}
return blkioValues(stats.Blkio.IoQueuedRecursive)
},
},
{
name: "blkio_io_service_bytes_recursive",
help: "The blkio io service bytes recursive",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
labels: []string{"op", "device", "major", "minor"},
getValues: func(stats *cgroups.Stats) []value {
if stats.Blkio == nil {
return nil
}
return blkioValues(stats.Blkio.IoServiceBytesRecursive)
},
},
{
name: "blkio_io_service_time_recursive",
help: "The blkio io servie time recursive",
unit: metrics.Total,
vt: prometheus.GaugeValue,
labels: []string{"op", "device", "major", "minor"},
getValues: func(stats *cgroups.Stats) []value {
if stats.Blkio == nil {
return nil
}
return blkioValues(stats.Blkio.IoServiceTimeRecursive)
},
},
{
name: "blkio_io_serviced_recursive",
help: "The blkio io servied recursive",
unit: metrics.Total,
vt: prometheus.GaugeValue,
labels: []string{"op", "device", "major", "minor"},
getValues: func(stats *cgroups.Stats) []value {
if stats.Blkio == nil {
return nil
}
return blkioValues(stats.Blkio.IoServicedRecursive)
},
},
{
name: "blkio_io_time_recursive",
help: "The blkio io time recursive",
unit: metrics.Total,
vt: prometheus.GaugeValue,
labels: []string{"op", "device", "major", "minor"},
getValues: func(stats *cgroups.Stats) []value {
if stats.Blkio == nil {
return nil
}
return blkioValues(stats.Blkio.IoTimeRecursive)
},
},
{
name: "blkio_sectors_recursive",
help: "The blkio sectors recursive",
unit: metrics.Total,
vt: prometheus.GaugeValue,
labels: []string{"op", "device", "major", "minor"},
getValues: func(stats *cgroups.Stats) []value {
if stats.Blkio == nil {
return nil
}
return blkioValues(stats.Blkio.SectorsRecursive)
},
},
}

View file

@ -0,0 +1,128 @@
package prometheus
import (
"strconv"
"github.com/crosbymichael/cgroups"
metrics "github.com/docker/go-metrics"
"github.com/prometheus/client_golang/prometheus"
)
var cpuMetrics = []*metric{
{
name: "cpu_total",
help: "The total cpu time",
unit: metrics.Nanoseconds,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Cpu == nil {
return nil
}
return []value{
{
v: float64(stats.Cpu.Usage.Total),
},
}
},
},
{
name: "cpu_kernel",
help: "The total kernel cpu time",
unit: metrics.Nanoseconds,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Cpu == nil {
return nil
}
return []value{
{
v: float64(stats.Cpu.Usage.Kernel),
},
}
},
},
{
name: "cpu_user",
help: "The total user cpu time",
unit: metrics.Nanoseconds,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Cpu == nil {
return nil
}
return []value{
{
v: float64(stats.Cpu.Usage.User),
},
}
},
},
{
name: "per_cpu",
help: "The total cpu time per cpu",
unit: metrics.Nanoseconds,
vt: prometheus.GaugeValue,
labels: []string{"cpu"},
getValues: func(stats *cgroups.Stats) []value {
if stats.Cpu == nil {
return nil
}
var out []value
for i, v := range stats.Cpu.Usage.PerCpu {
out = append(out, value{
v: float64(v),
l: []string{strconv.Itoa(i)},
})
}
return out
},
},
{
name: "cpu_throttle_periods",
help: "The total cpu throttle periods",
unit: metrics.Total,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Cpu == nil {
return nil
}
return []value{
{
v: float64(stats.Cpu.Throttling.Periods),
},
}
},
},
{
name: "cpu_throttled_periods",
help: "The total cpu throttled periods",
unit: metrics.Total,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Cpu == nil {
return nil
}
return []value{
{
v: float64(stats.Cpu.Throttling.ThrottledPeriods),
},
}
},
},
{
name: "cpu_throttled_time",
help: "The total cpu throttled time",
unit: metrics.Nanoseconds,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Cpu == nil {
return nil
}
return []value{
{
v: float64(stats.Cpu.Throttling.ThrottledTime),
},
}
},
},
}

View file

@ -0,0 +1,70 @@
package prometheus
import (
"github.com/crosbymichael/cgroups"
metrics "github.com/docker/go-metrics"
"github.com/prometheus/client_golang/prometheus"
)
var hugetlbMetrics = []*metric{
{
name: "hugetlb_usage",
help: "The hugetlb usage",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
labels: []string{"page"},
getValues: func(stats *cgroups.Stats) []value {
if stats.Hugetlb == nil {
return nil
}
var out []value
for page, v := range stats.Hugetlb {
out = append(out, value{
v: float64(v.Usage),
l: []string{page},
})
}
return out
},
},
{
name: "hugetlb_failcnt",
help: "The hugetlb failcnt",
unit: metrics.Total,
vt: prometheus.GaugeValue,
labels: []string{"page"},
getValues: func(stats *cgroups.Stats) []value {
if stats.Hugetlb == nil {
return nil
}
var out []value
for page, v := range stats.Hugetlb {
out = append(out, value{
v: float64(v.Failcnt),
l: []string{page},
})
}
return out
},
},
{
name: "hugetlb_max",
help: "The hugetlb maximum usage",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
labels: []string{"page"},
getValues: func(stats *cgroups.Stats) []value {
if stats.Hugetlb == nil {
return nil
}
var out []value
for page, v := range stats.Hugetlb {
out = append(out, value{
v: float64(v.Max),
l: []string{page},
})
}
return out
},
},
}

View file

@ -0,0 +1,778 @@
package prometheus
import (
"github.com/crosbymichael/cgroups"
metrics "github.com/docker/go-metrics"
"github.com/prometheus/client_golang/prometheus"
)
var memoryMetrics = []*metric{
{
name: "memory_cache",
help: "The cache amount used",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.Cache),
},
}
},
},
{
name: "memory_rss",
help: "The rss amount used",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.RSS),
},
}
},
},
{
name: "memory_rss_huge",
help: "The rss_huge amount used",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.RSSHuge),
},
}
},
},
{
name: "memory_mapped_file",
help: "The mapped_file amount used",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.MappedFile),
},
}
},
},
{
name: "memory_dirty",
help: "The dirty amount",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.Dirty),
},
}
},
},
{
name: "memory_writeback",
help: "The writeback amount",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.Writeback),
},
}
},
},
{
name: "memory_pgpgin",
help: "The pgpgin amount",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.PgPgIn),
},
}
},
},
{
name: "memory_pgpgout",
help: "The pgpgout amount",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.PgPgOut),
},
}
},
},
{
name: "memory_pgfault",
help: "The pgfault amount",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.PgFault),
},
}
},
},
{
name: "memory_pgmajfault",
help: "The pgmajfault amount",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.PgMajFault),
},
}
},
},
{
name: "memory_inactive_anon",
help: "The inactive_anon amount",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.InactiveAnon),
},
}
},
},
{
name: "memory_active_anon",
help: "The active_anon amount",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.ActiveAnon),
},
}
},
},
{
name: "memory_inactive_file",
help: "The inactive_file amount",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.InactiveFile),
},
}
},
},
{
name: "memory_active_file",
help: "The active_file amount",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.ActiveFile),
},
}
},
},
{
name: "memory_unevictable",
help: "The unevictable amount",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.Unevictable),
},
}
},
},
{
name: "memory_hierarchical_memory_limit",
help: "The hierarchical_memory_limit amount",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.HierarchicalMemoryLimit),
},
}
},
},
{
name: "memory_hierarchical_memsw_limit",
help: "The hierarchical_memsw_limit amount",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.HierarchicalSwapLimit),
},
}
},
},
{
name: "memory_total_cache",
help: "The total_cache amount used",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.TotalCache),
},
}
},
},
{
name: "memory_total_rss",
help: "The total_rss amount used",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.TotalRSS),
},
}
},
},
{
name: "memory_total_rss_huge",
help: "The total_rss_huge amount used",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.TotalRSSHuge),
},
}
},
},
{
name: "memory_total_mapped_file",
help: "The total_mapped_file amount used",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.TotalMappedFile),
},
}
},
},
{
name: "memory_total_dirty",
help: "The total_dirty amount",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.TotalDirty),
},
}
},
},
{
name: "memory_total_writeback",
help: "The total_writeback amount",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.TotalWriteback),
},
}
},
},
{
name: "memory_total_pgpgin",
help: "The total_pgpgin amount",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.TotalPgPgIn),
},
}
},
},
{
name: "memory_total_pgpgout",
help: "The total_pgpgout amount",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.TotalPgPgOut),
},
}
},
},
{
name: "memory_total_pgfault",
help: "The total_pgfault amount",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.TotalPgFault),
},
}
},
},
{
name: "memory_total_pgmajfault",
help: "The total_pgmajfault amount",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.TotalPgMajFault),
},
}
},
},
{
name: "memory_total_inactive_anon",
help: "The total_inactive_anon amount",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.TotalInactiveAnon),
},
}
},
},
{
name: "memory_total_active_anon",
help: "The total_active_anon amount",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.TotalActiveAnon),
},
}
},
},
{
name: "memory_total_inactive_file",
help: "The total_inactive_file amount",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.TotalInactiveFile),
},
}
},
},
{
name: "memory_total_active_file",
help: "The total_active_file amount",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.TotalActiveFile),
},
}
},
},
{
name: "memory_total_unevictable",
help: "The total_unevictable amount",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.TotalUnevictable),
},
}
},
},
{
name: "memory_usage_failcnt",
help: "The usage failcnt",
unit: metrics.Total,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.Usage.Failcnt),
},
}
},
},
{
name: "memory_usage_limit",
help: "The memory limit",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.Usage.Limit),
},
}
},
},
{
name: "memory_usage_max",
help: "The memory maximum usage",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.Usage.Max),
},
}
},
},
{
name: "memory_usage_usage",
help: "The memory usage",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.Usage.Usage),
},
}
},
},
{
name: "memory_swap_failcnt",
help: "The swap failcnt",
unit: metrics.Total,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.Swap.Failcnt),
},
}
},
},
{
name: "memory_swap_limit",
help: "The swap limit",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.Swap.Limit),
},
}
},
},
{
name: "memory_swap_max",
help: "The swap maximum usage",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.Swap.Max),
},
}
},
},
{
name: "memory_swap_usage",
help: "The swap usage",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.Swap.Usage),
},
}
},
},
{
name: "memory_kernel_failcnt",
help: "The kernel failcnt",
unit: metrics.Total,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.Kernel.Failcnt),
},
}
},
},
{
name: "memory_kernel_limit",
help: "The kernel limit",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.Kernel.Limit),
},
}
},
},
{
name: "memory_kernel_max",
help: "The kernel maximum usage",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.Kernel.Max),
},
}
},
},
{
name: "memory_kernel_usage",
help: "The kernel usage",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.Kernel.Usage),
},
}
},
},
{
name: "memory_kerneltcp_failcnt",
help: "The kerneltcp failcnt",
unit: metrics.Total,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.KernelTCP.Failcnt),
},
}
},
},
{
name: "memory_kerneltcp_limit",
help: "The kerneltcp limit",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.KernelTCP.Limit),
},
}
},
},
{
name: "memory_kerneltcp_max",
help: "The kerneltcp maximum usage",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.KernelTCP.Max),
},
}
},
},
{
name: "memory_kerneltcp_usage",
help: "The kerneltcp usage",
unit: metrics.Bytes,
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Memory == nil {
return nil
}
return []value{
{
v: float64(stats.Memory.KernelTCP.Usage),
},
}
},
},
}

View file

@ -0,0 +1,33 @@
package prometheus
import (
"github.com/crosbymichael/cgroups"
metrics "github.com/docker/go-metrics"
"github.com/prometheus/client_golang/prometheus"
)
type value struct {
v float64
l []string
}
type metric struct {
name string
help string
unit metrics.Unit
vt prometheus.ValueType
labels []string
// getValues returns the value and labels for the data
getValues func(stats *cgroups.Stats) []value
}
func (m *metric) desc(ns *metrics.Namespace) *prometheus.Desc {
return ns.NewDesc(m.name, m.help, m.unit, append([]string{"id"}, m.labels...)...)
}
func (m *metric) collect(id string, stats *cgroups.Stats, ns *metrics.Namespace, ch chan<- prometheus.Metric) {
values := m.getValues(stats)
for _, v := range values {
ch <- prometheus.MustNewConstMetric(m.desc(ns), m.vt, v.v, append([]string{id}, v.l...)...)
}
}

View file

@ -0,0 +1,99 @@
package prometheus
import (
"errors"
"strconv"
"sync"
"github.com/Sirupsen/logrus"
"github.com/crosbymichael/cgroups"
metrics "github.com/docker/go-metrics"
"github.com/prometheus/client_golang/prometheus"
)
var ErrAlreadyCollected = errors.New("cgroup is already being collected")
// New registers the Collector with the provided namespace and returns it so
// that cgroups can be added for collection
func New(ns *metrics.Namespace) *Collector {
// add machine cpus and memory info
c := &Collector{
ns: ns,
cgroups: make(map[string]cgroups.Cgroup),
}
c.metrics = append(c.metrics, pidMetrics...)
c.metrics = append(c.metrics, cpuMetrics...)
c.metrics = append(c.metrics, memoryMetrics...)
c.metrics = append(c.metrics, hugetlbMetrics...)
c.metrics = append(c.metrics, blkioMetrics...)
ns.Add(c)
return c
}
// Collector provides the ability to collect container stats and export
// them in the prometheus format
type Collector struct {
mu sync.RWMutex
cgroups map[string]cgroups.Cgroup
ns *metrics.Namespace
metrics []*metric
}
func (c *Collector) Describe(ch chan<- *prometheus.Desc) {
for _, m := range c.metrics {
ch <- m.desc(c.ns)
}
}
func (c *Collector) Collect(ch chan<- prometheus.Metric) {
c.mu.RLock()
wg := &sync.WaitGroup{}
for id, cg := range c.cgroups {
wg.Add(1)
go c.collect(id, cg, ch, wg)
}
c.mu.RUnlock()
wg.Wait()
}
func (c *Collector) collect(id string, cg cgroups.Cgroup, ch chan<- prometheus.Metric, wg *sync.WaitGroup) {
defer wg.Done()
stats, err := cg.Stat(cgroups.IgnoreNotExist)
if err != nil {
logrus.WithError(err).Errorf("stat cgroup %s", id)
return
}
for _, m := range c.metrics {
m.collect(id, stats, c.ns, ch)
}
}
// Add adds the provided cgroup and id so that metrics are collected and exported
func (c *Collector) Add(id string, cg cgroups.Cgroup) error {
c.mu.Lock()
defer c.mu.Unlock()
if _, ok := c.cgroups[id]; ok {
return ErrAlreadyCollected
}
c.cgroups[id] = cg
return nil
}
// Remove removes the provided cgroup by id from the collector
func (c *Collector) Remove(id string) {
c.mu.Lock()
defer c.mu.Unlock()
delete(c.cgroups, id)
}
func blkioValues(l []cgroups.BlkioEntry) []value {
var out []value
for _, e := range l {
out = append(out, value{
v: float64(e.Value),
l: []string{e.Op, e.Device, strconv.FormatUint(e.Major, 10), strconv.FormatUint(e.Minor, 10)},
})
}
return out
}

View file

@ -0,0 +1,110 @@
package prometheus
import (
"sync"
"syscall"
"github.com/Sirupsen/logrus"
"github.com/crosbymichael/cgroups"
metrics "github.com/docker/go-metrics"
)
func NewOOMCollector(ns *metrics.Namespace) (*OOMCollector, error) {
fd, err := syscall.EpollCreate1(syscall.EPOLL_CLOEXEC)
if err != nil {
return nil, err
}
c := &OOMCollector{
fd: fd,
memoryOOM: ns.NewLabeledGauge("memory_oom", "The number of times a container received an oom event", metrics.Total, "id"),
set: make(map[uintptr]*oom),
}
go c.start()
return c, nil
}
type OOMCollector struct {
mu sync.Mutex
memoryOOM metrics.LabeledGauge
fd int
set map[uintptr]*oom
}
type oom struct {
id string
c cgroups.Cgroup
}
func (o *OOMCollector) Add(id string, cg cgroups.Cgroup) error {
o.mu.Lock()
defer o.mu.Unlock()
fd, err := cg.OOMEventFD()
if err != nil {
return err
}
o.set[fd] = &oom{
id: id,
c: cg,
}
// set the gauge's default value
o.memoryOOM.WithValues(id).Set(0)
event := syscall.EpollEvent{
Fd: int32(fd),
Events: syscall.EPOLLHUP | syscall.EPOLLIN | syscall.EPOLLERR,
}
if err := syscall.EpollCtl(o.fd, syscall.EPOLL_CTL_ADD, int(fd), &event); err != nil {
return err
}
return nil
}
// Close closes the epoll fd
func (o *OOMCollector) Close() error {
return syscall.Close(int(o.fd))
}
func (o *OOMCollector) start() {
var events [128]syscall.EpollEvent
for {
n, err := syscall.EpollWait(o.fd, events[:], -1)
if err != nil {
if err == syscall.EINTR {
continue
}
logrus.WithField("error", err).Fatal("cgroups: epoll wait")
}
for i := 0; i < n; i++ {
o.process(uintptr(events[i].Fd), events[i].Events)
}
}
}
func (o *OOMCollector) process(fd uintptr, event uint32) {
// make sure to always flush the fd
flush(fd)
o.mu.Lock()
info, ok := o.set[fd]
if !ok {
o.mu.Unlock()
return
}
o.mu.Unlock()
// if we received an event but it was caused by the cgroup being deleted and the fd
// being closed make sure we close our copy and remove the container from the set
if info.c.State() == cgroups.Deleted {
o.mu.Lock()
delete(o.set, fd)
o.mu.Unlock()
syscall.Close(int(fd))
return
}
o.memoryOOM.WithValues(info.id).Inc(1)
}
func flush(fd uintptr) error {
buf := make([]byte, 8)
_, err := syscall.Read(int(fd), buf)
return err
}

View file

@ -0,0 +1,42 @@
package prometheus
import (
"github.com/crosbymichael/cgroups"
metrics "github.com/docker/go-metrics"
"github.com/prometheus/client_golang/prometheus"
)
var pidMetrics = []*metric{
{
name: "pids",
help: "The limit to the number of pids allowed",
unit: metrics.Unit("limit"),
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Pids == nil {
return nil
}
return []value{
{
v: float64(stats.Pids.Limit),
},
}
},
},
{
name: "pids",
help: "The current number of pids",
unit: metrics.Unit("current"),
vt: prometheus.GaugeValue,
getValues: func(stats *cgroups.Stats) []value {
if stats.Pids == nil {
return nil
}
return []value{
{
v: float64(stats.Pids.Current),
},
}
},
},
}

12
vendor/github.com/crosbymichael/cgroups/state.go generated vendored Normal file
View file

@ -0,0 +1,12 @@
package cgroups
// State is a type that represents the state of the current cgroup
type State string
const (
Unknown State = ""
Thawed State = "thawed"
Frozen State = "frozen"
Freezing State = "freezing"
Deleted State = "deleted"
)

109
vendor/github.com/crosbymichael/cgroups/stats.go generated vendored Normal file
View file

@ -0,0 +1,109 @@
package cgroups
import "sync"
type Stats struct {
cpuMu sync.Mutex
Hugetlb map[string]HugetlbStat
Pids *PidsStat
Cpu *CpuStat
Memory *MemoryStat
Blkio *BlkioStat
}
type HugetlbStat struct {
Usage uint64
Max uint64
Failcnt uint64
}
type PidsStat struct {
Current uint64
Limit uint64
}
type CpuStat struct {
Usage CpuUsage
Throttling Throttle
}
type CpuUsage struct {
// Units: nanoseconds.
Total uint64
PerCpu []uint64
Kernel uint64
User uint64
}
type Throttle struct {
Periods uint64
ThrottledPeriods uint64
ThrottledTime uint64
}
type MemoryStat struct {
Cache uint64
RSS uint64
RSSHuge uint64
MappedFile uint64
Dirty uint64
Writeback uint64
PgPgIn uint64
PgPgOut uint64
PgFault uint64
PgMajFault uint64
InactiveAnon uint64
ActiveAnon uint64
InactiveFile uint64
ActiveFile uint64
Unevictable uint64
HierarchicalMemoryLimit uint64
HierarchicalSwapLimit uint64
TotalCache uint64
TotalRSS uint64
TotalRSSHuge uint64
TotalMappedFile uint64
TotalDirty uint64
TotalWriteback uint64
TotalPgPgIn uint64
TotalPgPgOut uint64
TotalPgFault uint64
TotalPgMajFault uint64
TotalInactiveAnon uint64
TotalActiveAnon uint64
TotalInactiveFile uint64
TotalActiveFile uint64
TotalUnevictable uint64
Usage MemoryEntry
Swap MemoryEntry
Kernel MemoryEntry
KernelTCP MemoryEntry
}
type MemoryEntry struct {
Limit uint64
Usage uint64
Max uint64
Failcnt uint64
}
type BlkioStat struct {
IoServiceBytesRecursive []BlkioEntry
IoServicedRecursive []BlkioEntry
IoQueuedRecursive []BlkioEntry
IoServiceTimeRecursive []BlkioEntry
IoWaitTimeRecursive []BlkioEntry
IoMergedRecursive []BlkioEntry
IoTimeRecursive []BlkioEntry
SectorsRecursive []BlkioEntry
}
type BlkioEntry struct {
Op string
Device string
Major uint64
Minor uint64
Value uint64
}

94
vendor/github.com/crosbymichael/cgroups/subsystem.go generated vendored Normal file
View file

@ -0,0 +1,94 @@
package cgroups
import (
"fmt"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
// Name is a typed name for a cgroup subsystem
type Name string
const (
Devices Name = "devices"
Hugetlb Name = "hugetlb"
Freezer Name = "freezer"
Pids Name = "pids"
NetCLS Name = "net_cls"
NetPrio Name = "net_prio"
PerfEvent Name = "perf_event"
Cpuset Name = "cpuset"
Cpu Name = "cpu"
Cpuacct Name = "cpuacct"
Memory Name = "memory"
Blkio Name = "blkio"
)
// Subsystems returns a complete list of the default cgroups
// avaliable on most linux systems
func Subsystems() []Name {
n := []Name{
Hugetlb,
Freezer,
Pids,
NetCLS,
NetPrio,
PerfEvent,
Cpuset,
Cpu,
Cpuacct,
Memory,
Blkio,
}
if !isUserNS {
n = append(n, Devices)
}
return n
}
type Subsystem interface {
Name() Name
}
type pather interface {
Subsystem
Path(path string) string
}
type creator interface {
Subsystem
Create(path string, resources *specs.LinuxResources) error
}
type deleter interface {
Subsystem
Delete(path string) error
}
type stater interface {
Subsystem
Stat(path string, stats *Stats) error
}
type updater interface {
Subsystem
Update(path string, resources *specs.LinuxResources) error
}
// SingleSubsystem returns a single cgroup subsystem within the base Hierarchy
func SingleSubsystem(baseHierarchy Hierarchy, subsystem Name) Hierarchy {
return func() ([]Subsystem, error) {
subsystems, err := baseHierarchy()
if err != nil {
return nil, err
}
for _, s := range subsystems {
if s.Name() == subsystem {
return []Subsystem{
s,
}, nil
}
}
return nil, fmt.Errorf("unable to find subsystem %s", subsystem)
}
}

103
vendor/github.com/crosbymichael/cgroups/systemd.go generated vendored Normal file
View file

@ -0,0 +1,103 @@
// +build systemd
package cgroups
import (
"fmt"
"path/filepath"
"strings"
"sync"
systemdDbus "github.com/coreos/go-systemd/dbus"
"github.com/godbus/dbus"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
const (
SystemdDbus Name = "systemd"
defaultSlice = "system.slice"
)
func Systemd() ([]Subsystem, error) {
root, err := unifiedMountPoint()
if err != nil {
return nil, err
}
defaultSubsystems, err := defaults(root)
if err != nil {
return nil, err
}
s, err := NewSystemd(root)
if err != nil {
return nil, err
}
// make sure the systemd controller is added first
return append([]Subsystem{s}, defaultSubsystems...), nil
}
func Slice(slice, name string) Path {
if slice == "" {
slice = defaultSlice
}
return func(subsystem Name) string {
return filepath.Join(slice, unitName(name))
}
}
func NewSystemd(root string) (*SystemdController, error) {
conn, err := systemdDbus.New()
if err != nil {
return nil, err
}
return &SystemdController{
root: root,
conn: conn,
}, nil
}
type SystemdController struct {
mu sync.Mutex
conn *systemdDbus.Conn
root string
}
func (s *SystemdController) Name() Name {
return SystemdDbus
}
func (s *SystemdController) Create(path string, resources *specs.LinuxResources) error {
slice, name := splitName(path)
properties := []systemdDbus.Property{
systemdDbus.PropDescription(fmt.Sprintf("cgroup %s", name)),
systemdDbus.PropWants(slice),
newProperty("DefaultDependencies", false),
newProperty("Delegate", true),
newProperty("MemoryAccounting", true),
newProperty("CPUAccounting", true),
newProperty("BlockIOAccounting", true),
}
_, err := s.conn.StartTransientUnit(name, "replace", properties, nil)
return err
}
func (s *SystemdController) Delete(path string) error {
_, name := splitName(path)
_, err := s.conn.StopUnit(name, "replace", nil)
return err
}
func newProperty(name string, units interface{}) systemdDbus.Property {
return systemdDbus.Property{
Name: name,
Value: dbus.MakeVariant(units),
}
}
func unitName(name string) string {
return fmt.Sprintf("%s.slice", name)
}
func splitName(path string) (slice string, unit string) {
slice, unit = filepath.Split(path)
return strings.TrimSuffix(slice, "/"), unit
}

10
vendor/github.com/crosbymichael/cgroups/ticks.go generated vendored Normal file
View file

@ -0,0 +1,10 @@
package cgroups
/*
#include <unistd.h>
*/
import "C"
func getClockTicks() uint64 {
return uint64(C.sysconf(C._SC_CLK_TCK))
}

274
vendor/github.com/crosbymichael/cgroups/utils.go generated vendored Normal file
View file

@ -0,0 +1,274 @@
package cgroups
import (
"bufio"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"time"
units "github.com/docker/go-units"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
var isUserNS = runningInUserNS()
// runningInUserNS detects whether we are currently running in a user namespace.
// Copied from github.com/lxc/lxd/shared/util.go
func runningInUserNS() bool {
file, err := os.Open("/proc/self/uid_map")
if err != nil {
// This kernel-provided file only exists if user namespaces are supported
return false
}
defer file.Close()
buf := bufio.NewReader(file)
l, _, err := buf.ReadLine()
if err != nil {
return false
}
line := string(l)
var a, b, c int64
fmt.Sscanf(line, "%d %d %d", &a, &b, &c)
/*
* We assume we are in the initial user namespace if we have a full
* range - 4294967295 uids starting at uid 0.
*/
if a == 0 && b == 0 && c == 4294967295 {
return false
}
return true
}
// defaults returns all known groups
func defaults(root string) ([]Subsystem, error) {
h, err := NewHugetlb(root)
if err != nil {
return nil, err
}
s := []Subsystem{
NewNamed(root, "systemd"),
h,
NewFreezer(root),
NewPids(root),
NewNetCls(root),
NewNetPrio(root),
NewPerfEvent(root),
NewCputset(root),
NewCpu(root),
NewCpuacct(root),
NewMemory(root),
NewBlkio(root),
}
// only add the devices cgroup if we are not in a user namespace
// because modifications are not allowed
if !isUserNS {
s = append(s, NewDevices(root))
}
return s, nil
}
// remove will remove a cgroup path handling EAGAIN and EBUSY errors and
// retrying the remove after a exp timeout
func remove(path string) error {
for i := 0; i < 5; i++ {
delay := 10 * time.Millisecond
if i != 0 {
time.Sleep(delay)
delay *= 2
}
if err := os.RemoveAll(path); err == nil {
return nil
}
}
return fmt.Errorf("cgroups: unable to remove path %q", path)
}
// readPids will read all the pids in a cgroup by the provided path
func readPids(path string, subsystem Name) ([]Process, error) {
f, err := os.Open(filepath.Join(path, cgroupProcs))
if err != nil {
return nil, err
}
defer f.Close()
var (
out []Process
s = bufio.NewScanner(f)
)
for s.Scan() {
if t := s.Text(); t != "" {
pid, err := strconv.Atoi(t)
if err != nil {
return nil, err
}
out = append(out, Process{
Pid: pid,
Subsystem: subsystem,
Path: path,
})
}
}
return out, nil
}
func hugePageSizes() ([]string, error) {
var (
pageSizes []string
sizeList = []string{"B", "kB", "MB", "GB", "TB", "PB"}
)
files, err := ioutil.ReadDir("/sys/kernel/mm/hugepages")
if err != nil {
return nil, err
}
for _, st := range files {
nameArray := strings.Split(st.Name(), "-")
pageSize, err := units.RAMInBytes(nameArray[1])
if err != nil {
return nil, err
}
pageSizes = append(pageSizes, units.CustomSize("%g%s", float64(pageSize), 1024.0, sizeList))
}
return pageSizes, nil
}
func readUint(path string) (uint64, error) {
v, err := ioutil.ReadFile(path)
if err != nil {
return 0, err
}
return parseUint(strings.TrimSpace(string(v)), 10, 64)
}
func parseUint(s string, base, bitSize int) (uint64, error) {
v, err := strconv.ParseUint(s, base, bitSize)
if err != nil {
intValue, intErr := strconv.ParseInt(s, base, bitSize)
// 1. Handle negative values greater than MinInt64 (and)
// 2. Handle negative values lesser than MinInt64
if intErr == nil && intValue < 0 {
return 0, nil
} else if intErr != nil &&
intErr.(*strconv.NumError).Err == strconv.ErrRange &&
intValue < 0 {
return 0, nil
}
return 0, err
}
return v, nil
}
func parseKV(raw string) (string, uint64, error) {
parts := strings.Fields(raw)
switch len(parts) {
case 2:
v, err := parseUint(parts[1], 10, 64)
if err != nil {
return "", 0, err
}
return parts[0], v, nil
default:
return "", 0, ErrInvalidFormat
}
}
func parseCgroupFile(path string) (map[string]string, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
return parseCgroupFromReader(f)
}
func parseCgroupFromReader(r io.Reader) (map[string]string, error) {
var (
cgroups = make(map[string]string)
s = bufio.NewScanner(r)
)
for s.Scan() {
if err := s.Err(); err != nil {
return nil, err
}
var (
text = s.Text()
parts = strings.SplitN(text, ":", 3)
)
if len(parts) < 3 {
return nil, fmt.Errorf("invalid cgroup entry: must contain at least two colons: %v", text)
}
for _, subs := range strings.Split(parts[1], ",") {
cgroups[subs] = parts[2]
}
}
return cgroups, nil
}
func getCgroupDestination(subsystem string) (string, error) {
f, err := os.Open("/proc/self/mountinfo")
if err != nil {
return "", err
}
defer f.Close()
s := bufio.NewScanner(f)
for s.Scan() {
if err := s.Err(); err != nil {
return "", err
}
fields := strings.Fields(s.Text())
for _, opt := range strings.Split(fields[len(fields)-1], ",") {
if opt == subsystem {
return fields[3], nil
}
}
}
return "", ErrNoCgroupMountDestination
}
func pathers(subystems []Subsystem) []pather {
var out []pather
for _, s := range subystems {
if p, ok := s.(pather); ok {
out = append(out, p)
}
}
return out
}
func initializeSubsystem(s Subsystem, path Path, resources *specs.LinuxResources) error {
if c, ok := s.(creator); ok {
p, err := path(s.Name())
if err != nil {
return err
}
if err := c.Create(p, resources); err != nil {
return err
}
} else if c, ok := s.(pather); ok {
p, err := path(s.Name())
if err != nil {
return err
}
// do the default create if the group does not have a custom one
if err := os.MkdirAll(c.Path(p), defaultDirPerm); err != nil {
return err
}
}
return nil
}
func cleanPath(path string) string {
if path == "" {
return ""
}
path = filepath.Clean(path)
if !filepath.IsAbs(path) {
path, _ = filepath.Rel(string(os.PathSeparator), filepath.Clean(string(os.PathSeparator)+path))
}
return filepath.Clean(path)
}

65
vendor/github.com/crosbymichael/cgroups/v1.go generated vendored Normal file
View file

@ -0,0 +1,65 @@
package cgroups
import (
"bufio"
"fmt"
"os"
"path/filepath"
"strings"
)
// V1 returns all the groups in the default cgroups mountpoint in a single hierarchy
func V1() ([]Subsystem, error) {
root, err := v1MountPoint()
if err != nil {
return nil, err
}
subsystems, err := defaults(root)
if err != nil {
return nil, err
}
var enabled []Subsystem
for _, s := range pathers(subsystems) {
// check and remove the default groups that do not exist
if _, err := os.Lstat(s.Path("/")); err == nil {
enabled = append(enabled, s)
}
}
return enabled, nil
}
// v1MountPoint returns the mount point where the cgroup
// mountpoints are mounted in a single hiearchy
func v1MountPoint() (string, error) {
f, err := os.Open("/proc/self/mountinfo")
if err != nil {
return "", err
}
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
if err := scanner.Err(); err != nil {
return "", err
}
var (
text = scanner.Text()
fields = strings.Split(text, " ")
// safe as mountinfo encodes mountpoints with spaces as \040.
index = strings.Index(text, " - ")
postSeparatorFields = strings.Fields(text[index+3:])
numPostFields = len(postSeparatorFields)
)
// this is an error as we can't detect if the mount is for "cgroup"
if numPostFields == 0 {
return "", fmt.Errorf("Found no fields post '-' in %q", text)
}
if postSeparatorFields[0] == "cgroup" {
// check that the mount is properly formated.
if numPostFields < 3 {
return "", fmt.Errorf("Error found less than 3 fields post '-' in %q", text)
}
return filepath.Dir(fields[4]), nil
}
}
return "", ErrMountPointNotExist
}

View file

@ -52,13 +52,13 @@ func (n *Namespace) WithConstLabels(labels Labels) *Namespace {
func (n *Namespace) NewCounter(name, help string) Counter {
c := &counter{pc: prometheus.NewCounter(n.newCounterOpts(name, help))}
n.addMetric(c)
n.Add(c)
return c
}
func (n *Namespace) NewLabeledCounter(name, help string, labels ...string) LabeledCounter {
c := &labeledCounter{pc: prometheus.NewCounterVec(n.newCounterOpts(name, help), labels)}
n.addMetric(c)
n.Add(c)
return c
}
@ -76,7 +76,7 @@ func (n *Namespace) NewTimer(name, help string) Timer {
t := &timer{
m: prometheus.NewHistogram(n.newTimerOpts(name, help)),
}
n.addMetric(t)
n.Add(t)
return t
}
@ -84,7 +84,7 @@ func (n *Namespace) NewLabeledTimer(name, help string, labels ...string) Labeled
t := &labeledTimer{
m: prometheus.NewHistogramVec(n.newTimerOpts(name, help), labels),
}
n.addMetric(t)
n.Add(t)
return t
}
@ -102,7 +102,7 @@ func (n *Namespace) NewGauge(name, help string, unit Unit) Gauge {
g := &gauge{
pg: prometheus.NewGauge(n.newGaugeOpts(name, help, unit)),
}
n.addMetric(g)
n.Add(g)
return g
}
@ -110,7 +110,7 @@ func (n *Namespace) NewLabeledGauge(name, help string, unit Unit, labels ...stri
g := &labeledGauge{
pg: prometheus.NewGaugeVec(n.newGaugeOpts(name, help, unit), labels),
}
n.addMetric(g)
n.Add(g)
return g
}
@ -142,12 +142,24 @@ func (n *Namespace) Collect(ch chan<- prometheus.Metric) {
}
}
func (n *Namespace) addMetric(collector prometheus.Collector) {
func (n *Namespace) Add(collector prometheus.Collector) {
n.mu.Lock()
n.metrics = append(n.metrics, collector)
n.mu.Unlock()
}
func (n *Namespace) NewDesc(name, help string, unit Unit, labels ...string) *prometheus.Desc {
if string(unit) != "" {
name = fmt.Sprintf("%s_%s", name, unit)
}
namespace := n.name
if n.subsystem != "" {
namespace = fmt.Sprintf("%s_%s", namespace, n.subsystem)
}
name = fmt.Sprintf("%s_%s", namespace, name)
return prometheus.NewDesc(name, help, labels, prometheus.Labels(n.labels))
}
// mergeLabels merges two or more labels objects into a single map, favoring
// the later labels.
func mergeLabels(lbs ...Labels) Labels {

View file

@ -1,12 +1,14 @@
# Open Container Initiative Runtime Specification
The [Open Container Initiative](http://www.opencontainers.org/) develops specifications for standards on Operating System process and application containers.
The [Open Container Initiative][oci] develops specifications for standards on Operating System process and application containers.
The specification can be found [here](spec.md).
## Table of Contents
Additional documentation about how this group operates:
- [Code of Conduct](https://github.com/opencontainers/tob/blob/d2f9d68c1332870e40693fe077d311e0742bc73d/code-of-conduct.md)
- [Code of Conduct][code-of-conduct]
- [Style and Conventions](style.md)
- [Roadmap](ROADMAP.md)
- [Implementations](implementations.md)
@ -14,38 +16,38 @@ Additional documentation about how this group operates:
- [project](project.md)
- [charter][charter]
# Use Cases
## Use Cases
To provide context for users the following section gives example use cases for each part of the spec.
#### Application Bundle Builders
### Application Bundle Builders
Application bundle builders can create a [bundle](bundle.md) directory that includes all of the files required for launching an application as a container.
The bundle contains an OCI [configuration file](config.md) where the builder can specify host-independent details such as [which executable to launch](config.md#process) and host-specific settings such as [mount](config.md#mounts) locations, [hook](config.md#hooks) paths, Linux [namespaces](config-linux.md#namespaces) and [cgroups](config-linux.md#control-groups).
Because the configuration includes host-specific settings, application bundle directories copied between two hosts may require configuration adjustments.
#### Hook Developers
### Hook Developers
[Hook](config.md#hooks) developers can extend the functionality of an OCI-compliant runtime by hooking into a container's lifecycle with an external application.
Example use cases include sophisticated network configuration, volume garbage collection, etc.
#### Runtime Developers
### Runtime Developers
Runtime developers can build runtime implementations that run OCI-compliant bundles and container configuration, containing low-level OS and host specific details, on a particular platform.
# Releases
## Releases
There is a loose [Road Map](./ROADMAP.md).
During the `0.x` series of OCI releases we make no backwards compatibility guarantees and intend to break the schema during this series.
# Contributing
## Contributing
Development happens on GitHub for the spec.
Issues are used for bugs and actionable items and longer discussions can happen on the [mailing list](#mailing-list).
The specification and code is licensed under the Apache 2.0 license found in the [LICENSE](./LICENSE) file.
## Discuss your design
### Discuss your design
The project welcomes submissions, but please let everyone know what you are working on.
@ -56,27 +58,27 @@ It also guarantees that the design is sound before code is written; a GitHub pul
Typos and grammatical errors can go straight to a pull-request.
When in doubt, start on the [mailing-list](#mailing-list).
## Weekly Call
### Weekly Call
The contributors and maintainers of all OCI projects have a weekly meeting Wednesdays at 2:00 PM (USA Pacific).
Everyone is welcome to participate via [UberConference web][UberConference] or audio-only: 415-968-0849 (no PIN needed.)
Everyone is welcome to participate via [UberConference web][uberconference] or audio-only: 415-968-0849 (no PIN needed.)
An initial agenda will be posted to the [mailing list](#mailing-list) earlier in the week, and everyone is welcome to propose additional topics or suggest other agenda alterations there.
Minutes are posted to the [mailing list](#mailing-list) and minutes from past calls are archived to the [wiki](https://github.com/opencontainers/runtime-spec/wiki) for those who are unable to join the call.
Minutes are posted to the [mailing list](#mailing-list) and minutes from past calls are archived to the [wiki][runtime-wiki].
## Mailing List
### Mailing List
You can subscribe and join the mailing list on [Google Groups](https://groups.google.com/a/opencontainers.org/forum/#!forum/dev).
You can subscribe and join the mailing list on [Google Groups][dev-list].
## IRC
### IRC
OCI discussion happens on #opencontainers on Freenode ([logs][irc-logs]).
## Git commit
### Git commit
### Sign your work
#### Sign your work
The sign-off is a simple line at the end of the explanation for the patch, which certifies that you wrote it or otherwise have the right to pass it on as an open-source patch.
The rules are pretty simple: if you can certify the below (from [developercertificate.org](http://developercertificate.org/)):
The rules are pretty simple: if you can certify the below (from http://developercertificate.org):
```
Developer Certificate of Origin
@ -125,10 +127,10 @@ using your real name (sorry, no pseudonyms or anonymous contributions.)
You can add the sign off when creating the git commit via `git commit -s`.
### Commit Style
#### Commit Style
Simple house-keeping for clean git history.
Read more on [How to Write a Git Commit Message](http://chris.beams.io/posts/git-commit/) or the Discussion section of [`git-commit(1)`](http://git-scm.com/docs/git-commit).
Read more on [How to Write a Git Commit Message][how-to-git-commit] or the Discussion section of [git-commit(1)][git-commit.1].
1. Separate the subject from body with a blank line
2. Limit the subject line to 50 characters
@ -140,6 +142,14 @@ Read more on [How to Write a Git Commit Message](http://chris.beams.io/posts/git
* If there was important/useful/essential conversation or information, copy or include a reference
8. When possible, one keyword to scope the change in the subject (i.e. "README: ...", "runtime: ...")
[UberConference]: https://www.uberconference.com/opencontainers
[irc-logs]: http://ircbot.wl.linuxfoundation.org/eavesdrop/%23opencontainers/
[charter]: https://www.opencontainers.org/about/governance
[code-of-conduct]: https://github.com/opencontainers/tob/blob/master/code-of-conduct.md
[dev-list]: https://groups.google.com/a/opencontainers.org/forum/#!forum/dev
[how-to-git-commit]: http://chris.beams.io/posts/git-commit
[irc-logs]: http://ircbot.wl.linuxfoundation.org/eavesdrop/%23opencontainers/
[oci]: https://www.opencontainers.org
[runtime-wiki]: https://github.com/opencontainers/runtime-spec/wiki
[uberconference]: https://www.uberconference.com/opencontainers
[git-commit.1]: http://git-scm.com/docs/git-commit

View file

@ -17,7 +17,7 @@ type Spec struct {
// Mounts configures additional mounts (on top of Root).
Mounts []Mount `json:"mounts,omitempty"`
// Hooks configures callbacks for container lifecycle events.
Hooks Hooks `json:"hooks"`
Hooks *Hooks `json:"hooks,omitempty"`
// Annotations contains arbitrary metadata for the container.
Annotations map[string]string `json:"annotations,omitempty"`
@ -44,8 +44,8 @@ type Process struct {
// Cwd is the current working directory for the process and must be
// relative to the container's root.
Cwd string `json:"cwd"`
// Capabilities are Linux capabilities that are kept for the container.
Capabilities []string `json:"capabilities,omitempty" platform:"linux"`
// Capabilities are Linux capabilities that are kept for the process.
Capabilities *LinuxCapabilities `json:"capabilities,omitempty" platform:"linux"`
// Rlimits specifies rlimit options to apply to the process.
Rlimits []LinuxRlimit `json:"rlimits,omitempty" platform:"linux"`
// NoNewPrivileges controls whether additional privileges could be gained by processes in the container.
@ -56,6 +56,21 @@ type Process struct {
SelinuxLabel string `json:"selinuxLabel,omitempty" platform:"linux"`
}
// LinuxCapabilities specifies the whitelist of capabilities that are kept for a process.
// http://man7.org/linux/man-pages/man7/capabilities.7.html
type LinuxCapabilities struct {
// Bounding is the set of capabilities checked by the kernel.
Bounding []string `json:"bounding,omitempty" platform:"linux"`
// Effective is the set of capabilities checked by the kernel.
Effective []string `json:"effective,omitempty" platform:"linux"`
// Inheritable is the capabilities preserved across execve.
Inheritable []string `json:"inheritable,omitempty" platform:"linux"`
// Permitted is the limiting superset for effective capabilities.
Permitted []string `json:"permitted,omitempty" platform:"linux"`
// Ambient is the ambient set of capabilities that are kept.
Ambient []string `json:"ambient,omitempty" platform:"linux"`
}
// Box specifies dimensions of a rectangle. Used for specifying the size of a console.
type Box struct {
// Height is the vertical dimension of a box.
@ -98,10 +113,10 @@ type Mount struct {
// Destination is the path where the mount will be placed relative to the container's root. The path and child directories MUST exist, a runtime MUST NOT create directories automatically to a mount point.
Destination string `json:"destination"`
// Type specifies the mount kind.
Type string `json:"type"`
Type string `json:"type,omitempty"`
// Source specifies the source path of the mount. In the case of bind mounts on
// Linux based systems this would be the file on the host.
Source string `json:"source"`
Source string `json:"source,omitempty"`
// Options are fstab style mount options.
Options []string `json:"options,omitempty"`
}
@ -139,7 +154,7 @@ type Linux struct {
// CgroupsPath specifies the path to cgroups that are created and/or joined by the container.
// The path is expected to be relative to the cgroups mountpoint.
// If resources are specified, the cgroups at CgroupsPath will be updated based on resources.
CgroupsPath *string `json:"cgroupsPath,omitempty"`
CgroupsPath string `json:"cgroupsPath,omitempty"`
// Namespaces contains the namespaces that are created and/or joined by the container
Namespaces []LinuxNamespace `json:"namespaces,omitempty"`
// Devices are a list of device nodes that are created for the container
@ -284,17 +299,17 @@ type LinuxCPU struct {
// CPU shares (relative weight (ratio) vs. other cgroups with cpu shares).
Shares *uint64 `json:"shares,omitempty"`
// CPU hardcap limit (in usecs). Allowed cpu time in a given period.
Quota *uint64 `json:"quota,omitempty"`
Quota *int64 `json:"quota,omitempty"`
// CPU period to be used for hardcapping (in usecs).
Period *uint64 `json:"period,omitempty"`
// How much time realtime scheduling may use (in usecs).
RealtimeRuntime *uint64 `json:"realtimeRuntime,omitempty"`
RealtimeRuntime *int64 `json:"realtimeRuntime,omitempty"`
// CPU period to be used for realtime scheduling (in usecs).
RealtimePeriod *uint64 `json:"realtimePeriod,omitempty"`
// CPUs to use within the cpuset. Default is to use any CPU available.
Cpus *string `json:"cpus,omitempty"`
Cpus string `json:"cpus,omitempty"`
// List of memory nodes in the cpuset. Default is to use any available memory node.
Mems *string `json:"mems,omitempty"`
Mems string `json:"mems,omitempty"`
}
// LinuxPids for Linux cgroup 'pids' resource management (Linux 4.3)
@ -356,20 +371,13 @@ type LinuxDeviceCgroup struct {
// Allow or deny
Allow bool `json:"allow"`
// Device type, block, char, etc.
Type *string `json:"type,omitempty"`
Type string `json:"type,omitempty"`
// Major is the device's major number.
Major *int64 `json:"major,omitempty"`
// Minor is the device's minor number.
Minor *int64 `json:"minor,omitempty"`
// Cgroup access permissions format, rwm.
Access *string `json:"access,omitempty"`
}
// LinuxSeccomp represents syscall restrictions
type LinuxSeccomp struct {
DefaultAction LinuxSeccompAction `json:"defaultAction"`
Architectures []Arch `json:"architectures"`
Syscalls []LinuxSyscall `json:"syscalls,omitempty"`
Access string `json:"access,omitempty"`
}
// Solaris contains platform specific configuration for Solaris application containers.
@ -469,6 +477,13 @@ type WindowsNetworkResources struct {
EgressBandwidth *uint64 `json:"egressBandwidth,omitempty"`
}
// LinuxSeccomp represents syscall restrictions
type LinuxSeccomp struct {
DefaultAction LinuxSeccompAction `json:"defaultAction"`
Architectures []Arch `json:"architectures,omitempty"`
Syscalls []LinuxSyscall `json:"syscalls"`
}
// Arch used for additional architectures
type Arch string
@ -491,6 +506,8 @@ const (
ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE"
ArchS390 Arch = "SCMP_ARCH_S390"
ArchS390X Arch = "SCMP_ARCH_S390X"
ArchPARISC Arch = "SCMP_ARCH_PARISC"
ArchPARISC64 Arch = "SCMP_ARCH_PARISC64"
)
// LinuxSeccompAction taken upon Seccomp rule match
@ -529,7 +546,8 @@ type LinuxSeccompArg struct {
// LinuxSyscall is used to match a syscall in Seccomp
type LinuxSyscall struct {
Name string `json:"name"`
Action LinuxSeccompAction `json:"action"`
Args []LinuxSeccompArg `json:"args,omitempty"`
Names []string `json:"names"`
Action LinuxSeccompAction `json:"action"`
Args []LinuxSeccompArg `json:"args"`
Comment string `json:"comment"`
}

View file

@ -6,12 +6,12 @@ type State struct {
Version string `json:"ociVersion"`
// ID is the container ID
ID string `json:"id"`
// Status is the runtime state of the container.
// Status is the runtime status of the container.
Status string `json:"status"`
// Pid is the process ID for the container process.
Pid int `json:"pid"`
// BundlePath is the path to the container's bundle directory.
BundlePath string `json:"bundlePath"`
// Annotations are the annotations associated with the container.
Annotations map[string]string `json:"annotations"`
// Bundle is the path to the container's bundle directory.
Bundle string `json:"bundle"`
// Annotations are key values associated with the container.
Annotations map[string]string `json:"annotations,omitempty"`
}

View file

@ -11,7 +11,7 @@ const (
VersionPatch = 0
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = "-rc3"
VersionDev = "-rc5"
)
// Version is the specification version that the package types support.