vendor: remove dep and use vndr

Signed-off-by: Antonio Murdaca <runcom@redhat.com>
This commit is contained in:
Antonio Murdaca 2017-06-06 09:19:04 +02:00
parent 16f44674a4
commit 148e72d81e
No known key found for this signature in database
GPG key ID: B2BEAD150DE936B9
16131 changed files with 73815 additions and 4235138 deletions

View file

@ -1 +0,0 @@
Stephen J Day <stephen.day@docker.com> <stevvooe@users.noreply.github.com>

View file

@ -1,12 +0,0 @@
approve_by_comment: true
approve_regex: '^(Approved|lgtm|LGTM|:shipit:|:star:|:\+1:|:ship:)'
reject_regex: ^Rejected
reset_on_push: true
author_approval: ignored
signed_off_by:
required: true
reviewers:
teams:
- go-digest-maintainers
name: default
required: 2

View file

@ -1,4 +0,0 @@
language: go
go:
- 1.7
- master

View file

@ -1,72 +0,0 @@
# Contributing to Docker open source projects
Want to hack on this project? Awesome! Here are instructions to get you started.
This project is a part of the [Docker](https://www.docker.com) project, and follows
the same rules and principles. If you're already familiar with the way
Docker does things, you'll feel right at home.
Otherwise, go read Docker's
[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md),
[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md),
[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and
[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md).
For an in-depth description of our contribution process, visit the
contributors guide: [Understand how to contribute](https://docs.docker.com/opensource/workflow/make-a-contribution/)
### Sign your work
The sign-off is a simple line at the end of the explanation for the patch. Your
signature certifies that you wrote the patch or otherwise have the right to pass
it on as an open-source patch. The rules are pretty simple: if you can certify
the below (from [developercertificate.org](http://developercertificate.org/)):
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
1 Letterman Drive
Suite D4700
San Francisco, CA, 94129
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
```
Then you just add a line to every git commit message:
Signed-off-by: Joe Smith <joe.smith@email.com>
Use your real name (sorry, no pseudonyms or anonymous contributions.)
If you set your `user.name` and `user.email` git configs, you can sign your
commit automatically with `git commit -s`.

View file

@ -1,9 +0,0 @@
Aaron Lehmann <aaron.lehmann@docker.com> (@aaronlehmann)
Brandon Philips <brandon.philips@coreos.com> (@philips)
Brendan Burns <bburns@microsoft.com> (@brendandburns)
Derek McGowan <derek@mcgstyle.net> (@dmcgowan)
Jason Bouzane <jbouzane@google.com> (@jbouzane)
John Starks <jostarks@microsoft.com> (@jstarks)
Jonathan Boulle <jon.boulle@coreos.com> (@jonboulle)
Stephen Day <stephen.day@docker.com> (@stevvooe)
Vincent Batts <vbatts@redhat.com> (@vbatts)

View file

@ -1,114 +0,0 @@
// Copyright 2017 Docker, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package digest
import (
"bytes"
"crypto/rand"
_ "crypto/sha256"
_ "crypto/sha512"
"flag"
"fmt"
"strings"
"testing"
)
func TestFlagInterface(t *testing.T) {
var (
alg Algorithm
flagSet flag.FlagSet
)
flagSet.Var(&alg, "algorithm", "set the digest algorithm")
for _, testcase := range []struct {
Name string
Args []string
Err error
Expected Algorithm
}{
{
Name: "Invalid",
Args: []string{"-algorithm", "bean"},
Err: ErrDigestUnsupported,
},
{
Name: "Default",
Args: []string{"unrelated"},
Expected: "sha256",
},
{
Name: "Other",
Args: []string{"-algorithm", "sha512"},
Expected: "sha512",
},
} {
t.Run(testcase.Name, func(t *testing.T) {
alg = Canonical
if err := flagSet.Parse(testcase.Args); err != testcase.Err {
if testcase.Err == nil {
t.Fatal("unexpected error", err)
}
// check that flag package returns correct error
if !strings.Contains(err.Error(), testcase.Err.Error()) {
t.Fatalf("unexpected error: %v != %v", err, testcase.Err)
}
return
}
if alg != testcase.Expected {
t.Fatalf("unexpected algorithm: %v != %v", alg, testcase.Expected)
}
})
}
}
func TestFroms(t *testing.T) {
p := make([]byte, 1<<20)
rand.Read(p)
for alg := range algorithms {
h := alg.Hash()
h.Write(p)
expected := Digest(fmt.Sprintf("%s:%x", alg, h.Sum(nil)))
readerDgst, err := alg.FromReader(bytes.NewReader(p))
if err != nil {
t.Fatalf("error calculating hash from reader: %v", err)
}
dgsts := []Digest{
alg.FromBytes(p),
alg.FromString(string(p)),
readerDgst,
}
if alg == Canonical {
readerDgst, err := FromReader(bytes.NewReader(p))
if err != nil {
t.Fatalf("error calculating hash from reader: %v", err)
}
dgsts = append(dgsts,
FromBytes(p),
FromString(string(p)),
readerDgst)
}
for _, dgst := range dgsts {
if dgst != expected {
t.Fatalf("unexpected digest %v != %v", dgst, expected)
}
}
}
}

View file

@ -1,106 +0,0 @@
// Copyright 2017 Docker, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package digest
import (
"testing"
)
func TestParseDigest(t *testing.T) {
for _, testcase := range []struct {
input string
err error
algorithm Algorithm
hex string
}{
{
input: "sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b",
algorithm: "sha256",
hex: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b",
},
{
input: "sha384:d3fc7881460b7e22e3d172954463dddd7866d17597e7248453c48b3e9d26d9596bf9c4a9cf8072c9d5bad76e19af801d",
algorithm: "sha384",
hex: "d3fc7881460b7e22e3d172954463dddd7866d17597e7248453c48b3e9d26d9596bf9c4a9cf8072c9d5bad76e19af801d",
},
{
// empty hex
input: "sha256:",
err: ErrDigestInvalidFormat,
},
{
// empty hex
input: ":",
err: ErrDigestInvalidFormat,
},
{
// just hex
input: "d41d8cd98f00b204e9800998ecf8427e",
err: ErrDigestInvalidFormat,
},
{
// not hex
input: "sha256:d41d8cd98f00b204e9800m98ecf8427e",
err: ErrDigestInvalidFormat,
},
{
// too short
input: "sha256:abcdef0123456789",
err: ErrDigestInvalidLength,
},
{
// too short (from different algorithm)
input: "sha512:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789",
err: ErrDigestInvalidLength,
},
{
input: "foo:d41d8cd98f00b204e9800998ecf8427e",
err: ErrDigestUnsupported,
},
} {
digest, err := Parse(testcase.input)
if err != testcase.err {
t.Fatalf("error differed from expected while parsing %q: %v != %v", testcase.input, err, testcase.err)
}
if testcase.err != nil {
continue
}
if digest.Algorithm() != testcase.algorithm {
t.Fatalf("incorrect algorithm for parsed digest: %q != %q", digest.Algorithm(), testcase.algorithm)
}
if digest.Hex() != testcase.hex {
t.Fatalf("incorrect hex for parsed digest: %q != %q", digest.Hex(), testcase.hex)
}
// Parse string return value and check equality
newParsed, err := Parse(digest.String())
if err != nil {
t.Fatalf("unexpected error parsing input %q: %v", testcase.input, err)
}
if newParsed != digest {
t.Fatalf("expected equal: %q != %q", newParsed, digest)
}
newFromHex := NewDigestFromHex(newParsed.Algorithm().String(), newParsed.Hex())
if newFromHex != digest {
t.Fatalf("%v != %v", newFromHex, digest)
}
}
}

View file

@ -1,80 +0,0 @@
// Copyright 2017 Docker, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package digest
import (
"bytes"
"crypto/rand"
"io"
"reflect"
"testing"
)
func TestDigestVerifier(t *testing.T) {
p := make([]byte, 1<<20)
rand.Read(p)
digest := FromBytes(p)
verifier := digest.Verifier()
io.Copy(verifier, bytes.NewReader(p))
if !verifier.Verified() {
t.Fatalf("bytes not verified")
}
}
// TestVerifierUnsupportedDigest ensures that unsupported digest validation is
// flowing through verifier creation.
func TestVerifierUnsupportedDigest(t *testing.T) {
for _, testcase := range []struct {
Name string
Digest Digest
Expected interface{} // expected panic target
}{
{
Name: "Empty",
Digest: "",
Expected: "no ':' separator in digest \"\"",
},
{
Name: "EmptyAlg",
Digest: ":",
Expected: "empty digest algorithm, validate before calling Algorithm.Hash()",
},
{
Name: "Unsupported",
Digest: Digest("bean:0123456789abcdef"),
Expected: "bean not available (make sure it is imported)",
},
{
Name: "Garbage",
Digest: Digest("sha256-garbage:pure"),
Expected: "sha256-garbage not available (make sure it is imported)",
},
} {
t.Run(testcase.Name, func(t *testing.T) {
expected := testcase.Expected
defer func() {
recovered := recover()
if !reflect.DeepEqual(recovered, expected) {
t.Fatalf("unexpected recover: %v != %v", recovered, expected)
}
}()
_ = testcase.Digest.Verifier()
})
}
}

View file

@ -1,3 +0,0 @@
/oci-validate-examples
output
header.html

View file

@ -1,14 +0,0 @@
// Copyright 2016 The Linux Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

View file

@ -1,27 +0,0 @@
version: 2
requirements:
signed_off_by:
required: true
group_defaults:
required: 2
approve_by_comment:
enabled: true
approve_regex: '^(Approved|lgtm|LGTM|:shipit:|:star:|:\+1:|:ship:)'
reject_regex: ^Rejected
reset_on_push:
enabled: true
author_approval:
ignored: true
always_pending:
title_regex: ^WIP
explanation: 'Work in progress...'
conditions:
branches:
- master
groups:
image-spec:
teams:
- image-spec-maintainers

View file

@ -1,16 +0,0 @@
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
ret=0
for file in $(find . -type f -iname '*.go' ! -path './vendor/*'); do
if ! head -n3 "${file}" | grep -Eq "(Copyright|generated|GENERATED)"; then
echo "${file}:missing license header"
ret=1
fi
done
exit $ret

View file

@ -1,54 +0,0 @@
// Copyright 2017 The Linux Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"bytes"
"fmt"
"os"
"os/exec"
"strings"
"text/template"
specs "github.com/opencontainers/image-spec/specs-go"
)
var headerTemplate = template.Must(template.New("gen").Parse(`<title>image-spec {{.Version}}</title>
<base href="https://raw.githubusercontent.com/opencontainers/image-spec/{{.Branch}}/">`))
type Obj struct {
Version string
Branch string
}
func main() {
obj := Obj{
Version: specs.Version,
Branch: specs.Version,
}
if strings.HasSuffix(specs.Version, "-dev") {
cmd := exec.Command("git", "log", "-1", `--pretty=%H`, "HEAD")
var out bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
obj.Branch = strings.Trim(out.String(), " \n\r")
}
headerTemplate.Execute(os.Stdout, obj)
}

View file

@ -1,24 +0,0 @@
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
if [ ! $(command -v gometalinter) ]; then
go get -u github.com/alecthomas/gometalinter
gometalinter --install
fi
for d in $(find . -type d -not -iwholename '*.git*' -a -not -iname '.tool' -a -not -iwholename '*vendor*'); do
gometalinter \
--exclude='error return value not checked.*(Close|Log|Print).*\(errcheck\)$' \
--exclude='.*_test\.go:.*error return value not checked.*\(errcheck\)$' \
--exclude='duplicate of.*_test.go.*\(dupl\)$' \
--exclude='schema/fs.go' \
--disable=aligncheck \
--disable=gotype \
--disable=gas \
--cyclo-over=35 \
--tests \
--deadline=60s "${d}"
done

View file

@ -1,28 +0,0 @@
language: go
go:
- 1.7
sudo: required
services:
- docker
before_script:
- export PATH=$HOME/gopath/bin:$PATH
before_install:
- docker pull vbatts/pandoc
- make install.tools
- go get -u github.com/alecthomas/gometalinter
- gometalinter --install
- go get -t -d ./...
install: true
script:
- env | grep TRAVIS_
- make .gitvalidation
- make lint
- make check-license
- make test
- make docs

View file

@ -1,19 +0,0 @@
Open Container Initiative Image Format Specification
Changes with v0.4.0:
Additions:
* Go package added for types
https://godoc.org/github.com/opencontainers/image-spec/specs-go/v1 (#172)
* oci-image-tool: man page docs added
https://github.com/opencontainers/image-spec/tree/master/cmd/oci-image-tool/man
(#180)
Minor fixes and documentation:
* oci-image-tool: replace colon with hyphen to be windows friendly (#177)
* oci-image-tool: fix various bugs in extraction of images (#177)
* manifest: size as int64 allowing for huge containers (#153)
* manifest: move duplicate descriptions to reference descriptor doc (#167)
* serialization: explain opaques and their role (#171)

View file

@ -1,70 +0,0 @@
# Project governance
The [OCI charter][charter] §5.b.viii tasks an OCI Project's maintainers (listed in the repository's MAINTAINERS file and sometimes referred to as "the TDC", [§5.e][charter]) with:
> Creating, maintaining and enforcing governance guidelines for the TDC, approved by the maintainers, and which shall be posted visibly for the TDC.
This section describes generic rules and procedures for fulfilling that mandate.
## Proposing a motion
A maintainer SHOULD propose a motion on the dev@opencontainers.org mailing list (except [security issues](#security-issues)) with another maintainer as a co-sponsor.
## Voting
Voting on a proposed motion SHOULD happen on the dev@opencontainers.org mailing list (except [security issues](#security-issues)) with maintainers posting LGTM or REJECT.
Maintainers MAY also explicitly not vote by posting ABSTAIN (which is useful to revert a previous vote).
Maintainers MAY post multiple times (e.g. as they revise their position based on feedback), but only their final post counts in the tally.
A proposed motion is adopted if two-thirds of votes cast, a quorum having voted, are in favor of the release.
Voting SHOULD remain open for a week to collect feedback from the wider community and allow the maintainers to digest the proposed motion.
Under exceptional conditions (e.g. non-major security fix releases) proposals which reach quorum with unanimous support MAY be adopted earlier.
A maintainer MAY choose to reply with REJECT.
A maintainer posting a REJECT MUST include a list of concerns or links to written documentation for those concerns (e.g. GitHub issues or mailing-list threads).
The maintainers SHOULD try to resolve the concerns and wait for the rejecting maintainer to change their opinion to LGTM.
However, a motion MAY be adopted with REJECTs, as outlined in the previous paragraphs.
## Quorum
A quorum is established when at least two-thirds of maintainers have voted.
For projects that are not specifications, a [motion to release](#release-approval) MAY be adopted if the tally is at least three LGTMs and no REJECTs, even if three votes does not meet the usual two-thirds quorum.
## Security issues
Motions with sensitive security implications MUST be proposed on the security@opencontainers.org mailing list instead of dev@opencontainers.org, but should otherwise follow the standard [proposal](#proposing-a-motion) process.
The security@opencontainers.org mailing list includes all members of the TOB.
The TOB will contact the project maintainers and provide a channel for discussing and voting on the motion, but voting will otherwise follow the standard [voting](#voting) and [quorum](#quorum) rules.
The TOB and project maintainers will work together to notify affected parties before making an adopted motion public.
## Amendments
The [project governance](#project-governance) rules and procedures MAY be amended or replaced using the procedures themselves.
The MAINTAINERS of this project governance document is the total set of MAINTAINERS from all Open Containers projects (runC, runtime-spec, and image-spec).
## Subject templates
Maintainers are busy and get lots of email.
To make project proposals recognizable, proposed motions SHOULD use the following subject templates.
### Proposing a motion
> [{project} VOTE]: {motion description} (closes {end of voting window})
For example:
> [runtime-spec VOTE]: Tag 0647920 as 1.0.0-rc (closes 2016-06-03 20:00 UTC)
### Tallying results
After voting closes, a maintainer SHOULD post a tally to the motion thread with a subject template like:
> [{project} {status}]: {motion description} (+{LGTMs} -{REJECTs} #{ABSTAINs})
Where `{status}` is either `adopted` or `rejected`.
For example:
> [runtime-spec adopted]: Tag 0647920 as 1.0.0-rc (+6 -0 #3)
[charter]: https://www.opencontainers.org/about/governance

View file

@ -1,104 +0,0 @@
# Hacking Guide
## Overview
This guide contains instructions for building artifacts contained in this repository.
### Go
This spec includes several Go packages, and a command line tool considered to be a reference implementation of the OCI image specification.
Prerequisites:
* Go - current release only, earlier releases are not supported
* make
The following make targets are relevant for any work involving the Go packages.
### Linting
The included Go source code is being examined for any linting violations not included in the standard Go compiler. Linting is done using [gometalinter](https://github.com/alecthomas/gometalinter).
Invocation:
```
$ make lint
```
### Tests
This target executes all Go based tests.
Invocation:
```
$ make test
$ make validate-examples
```
### Virtual schema http/FileSystem
The `schema` validator uses a virtual [http/FileSystem](https://golang.org/pkg/net/http/#FileSystem) to load the JSON schema files for validating OCI images and/or manifests.
The virtual filesystem is generated using the `esc` tool and compiled into consumers of the `schema` package so the JSON schema files don't have to be distributed along with and consumer binaries.
Whenever changes are being done in any of the `schema/*.json` files, one must refresh the generated virtual filesystem.
Otherwise schema changes will not be visible inside `schema` consumers.
Prerequisites:
* [esc](https://github.com/mjibson/esc)
Invocation:
```
$ make schema-fs
```
### JSON schema formatting
This target auto-formats all JSON files in the `schema` directory using the `jq` tool.
Prerequisites:
* [jq](https://stedolan.github.io/jq/) >=1.5
Invocation:
```
$ make fmt
```
### OCI image specification PDF/HTML documentation files
This target generates a PDF/HTML version of the OCI image specification.
Prerequisites:
* [Docker](https://www.docker.com/)
Invocation:
```
$ make docs
```
### License header check
This target checks if the source code includes necessary headers.
Invocation:
```
$ make check-license
```
### Clean build artifacts
This target cleans all generated/compiled artifacts.
Invocation:
```
$ make clean
```
### Create PNG images from dot files
This target generates PNG image files from DOT source files in the `img` directory.
Prerequisites:
* [graphviz](http://www.graphviz.org/)
Invocation:
```
$ make img/media-types.png
```

View file

@ -1,7 +0,0 @@
Brandon Philips <brandon.philips@coreos.com> (@philips)
Brendan Burns <bburns@microsoft.com> (@brendandburns)
Jason Bouzane <jbouzane@google.com> (@jbouzane)
John Starks <jostarks@microsoft.com> (@jstarks)
Jonathan Boulle <jon.boulle@coreos.com> (@jonboulle)
Stephen Day <stephen.day@docker.com> (@stevvooe)
Vincent Batts <vbatts@redhat.com> (@vbatts)

View file

@ -1,145 +0,0 @@
GO15VENDOREXPERIMENT=1
export GO15VENDOREXPERIMENT
DOCKER ?= $(shell command -v docker 2>/dev/null)
PANDOC ?= $(shell command -v pandoc 2>/dev/null)
ifeq "$(strip $(PANDOC))" ''
ifneq "$(strip $(DOCKER))" ''
PANDOC = $(DOCKER) run \
-it \
--rm \
-v $(shell pwd)/:/input/:ro \
-v $(shell pwd)/$(OUTPUT_DIRNAME)/:/$(OUTPUT_DIRNAME)/ \
-u $(shell id -u) \
--workdir /input \
docker.io/vbatts/pandoc:1.17.0.3-2.fc25.x86_64
PANDOC_SRC := /input/
PANDOC_DST := /
endif
endif
# These docs are in an order that determines how they show up in the PDF/HTML docs.
DOC_FILES := \
spec.md \
media-types.md \
descriptor.md \
image-layout.md \
manifest.md \
image-index.md \
layer.md \
config.md \
annotations.md \
considerations.md \
implementations.md
FIGURE_FILES := \
img/media-types.png
OUTPUT_DIRNAME ?= output/
DOC_FILENAME ?= oci-image-spec
EPOCH_TEST_COMMIT ?= v0.2.0
TOOLS := esc gitvalidation glide glide-vc
default: check-license lint test
help:
@echo "Usage: make <target>"
@echo
@echo " * 'docs' - produce document in the $(OUTPUT_DIRNAME) directory"
@echo " * 'fmt' - format the json with indentation"
@echo " * 'validate-examples' - validate the examples in the specification markdown files"
@echo " * 'schema-fs' - regenerate the virtual schema http/FileSystem"
@echo " * 'check-license' - check license headers in source files"
@echo " * 'lint' - Execute the source code linter"
@echo " * 'test' - Execute the unit tests"
@echo " * 'img/*.png' - Generate PNG from dot file"
fmt:
for i in schema/*.json ; do jq --indent 2 -M . "$${i}" > xx && cat xx > "$${i}" && rm xx ; done
docs: $(OUTPUT_DIRNAME)/$(DOC_FILENAME).pdf $(OUTPUT_DIRNAME)/$(DOC_FILENAME).html
ifeq "$(strip $(PANDOC))" ''
$(OUTPUT_DIRNAME)/$(DOC_FILENAME).pdf: $(DOC_FILES) $(FIGURE_FILES)
$(error cannot build $@ without either pandoc or docker)
else
$(OUTPUT_DIRNAME)/$(DOC_FILENAME).pdf: $(DOC_FILES) $(FIGURE_FILES)
@mkdir -p $(OUTPUT_DIRNAME)/ && \
$(PANDOC) -f markdown_github -t latex --latex-engine=xelatex -o $(PANDOC_DST)$@ $(patsubst %,$(PANDOC_SRC)%,$(DOC_FILES))
ls -sh $(shell readlink -f $@)
$(OUTPUT_DIRNAME)/$(DOC_FILENAME).html: header.html $(DOC_FILES) $(FIGURE_FILES)
@mkdir -p $(OUTPUT_DIRNAME)/ && \
cp -ap img/ $(shell pwd)/$(OUTPUT_DIRNAME)/&& \
$(PANDOC) -f markdown_github -t html5 -H $(PANDOC_SRC)header.html --standalone -o $(PANDOC_DST)$@ $(patsubst %,$(PANDOC_SRC)%,$(DOC_FILES))
ls -sh $(shell readlink -f $@)
endif
header.html: .tool/genheader.go specs-go/version.go
go run .tool/genheader.go > $@
validate-examples: schema/fs.go
go test -run TestValidate ./schema
schema/fs.go: $(wildcard schema/*.json) schema/gen.go
cd schema && printf "%s\n\n%s\n" "$$(cat ../.header)" "$$(go generate)" > fs.go
schema-fs: schema/fs.go
@echo "generating schema fs"
check-license:
@echo "checking license headers"
@./.tool/check-license
lint:
@echo "checking lint"
@./.tool/lint
test: schema/fs.go
go test -race -cover $(shell go list ./... | grep -v /vendor/)
img/%.png: img/%.dot
dot -Tpng $^ > $@
# When this is running in travis, it will only check the travis commit range
.gitvalidation:
@which git-validation > /dev/null 2>/dev/null || (echo "ERROR: git-validation not found. Consider 'make install.tools' target" && false)
ifdef TRAVIS_COMMIT_RANGE
git-validation -q -run DCO,short-subject,dangling-whitespace
else
git-validation -v -run DCO,short-subject,dangling-whitespace -range $(EPOCH_TEST_COMMIT)..HEAD
endif
install.tools: $(TOOLS:%=.install.%)
.install.esc:
go get -u github.com/mjibson/esc
.install.gitvalidation:
go get -u github.com/vbatts/git-validation
.install.glide:
go get -u github.com/Masterminds/glide
.install.glide-vc:
go get -u github.com/sgotti/glide-vc
clean:
rm -rf *~ $(OUTPUT_DIRNAME) header.html
.PHONY: \
$(TOOLS:%=.install.%) \
validate-examples \
check-license \
clean \
lint \
install.tools \
docs \
test \
.gitvalidation \
schema/fs.go \
schema-fs

View file

@ -1,51 +0,0 @@
# Releases
The release process hopes to encourage early, consistent consensus-building during project development.
The mechanisms used are regular community communication on the mailing list about progress, scheduled meetings for issue resolution and release triage, and regularly paced and communicated releases.
Releases are proposed and adopted or rejected using the usual [project governance](GOVERNANCE.md) rules and procedures.
An anti-pattern that we want to avoid is heavy development or discussions "late cycle" around major releases.
We want to build a community that is involved and communicates consistently through all releases instead of relying on "silent periods" as a judge of stability.
## Parallel releases
A single project MAY consider several motions to release in parallel.
However each motion to release after the initial 0.1.0 MUST be based on a previous release that has already landed.
For example, runtime-spec maintainers may propose a v1.0.0-rc2 on the 1st of the month and a v0.9.1 bugfix on the 2nd of the month.
They may not propose a v1.0.0-rc3 until the v1.0.0-rc2 is accepted (on the 7th if the vote initiated on the 1st passes).
## Specifications
The OCI maintains three categories of projects: specifications, applications, and conformance-testing tools.
However, specification releases have special restrictions in the [OCI charter][charter]:
* They are the target of backwards compatibility (§7.g), and
* They are subject to the OFWa patent grant (§8.d and e).
To avoid unfortunate side effects (onerous backwards compatibity requirements or Member resignations), the following additional procedures apply to specification releases:
### Planning a release
Every OCI specification project SHOULD hold meetings that involve maintainers reviewing pull requests, debating outstanding issues, and planning releases.
This meeting MUST be advertised on the project README and MAY happen on a phone call, video conference, or on IRC.
Maintainers MUST send updates to the dev@opencontainers.org with results of these meetings.
Before the specification reaches v1.0.0, the meetings SHOULD be weekly.
Once a specification has reached v1.0.0, the maintainers may alter the cadence, but a meeting MUST be held within four weeks of the previous meeting.
The release plans, corresponding milestones and estimated due dates MUST be published on GitHub (e.g. https://github.com/opencontainers/runtime-spec/milestones).
GitHub milestones and issues are only used for community organization and all releases MUST follow the [project governance](GOVERNANCE.md) rules and procedures.
### Timelines
Specifications have a variety of different timelines in their lifecycle.
* Pre-v1.0.0 specifications SHOULD release on a monthly cadence to garner feedback.
* Major specification releases MUST release at least three release candidates spaced a minimum of one week apart.
This means a major release like a v1.0.0 or v2.0.0 release will take 1 month at minimum: one week for rc1, one week for rc2, one week for rc3, and one week for the major release itself.
Maintainers SHOULD strive to make zero breaking changes during this cycle of release candidates and SHOULD restart the three-candidate count when a breaking change is introduced.
For example if a breaking change is introduced in v1.0.0-rc2 then the series would end with v1.0.0-rc4 and v1.0.0.
- Minor and patch releases SHOULD be made on an as-needed basis.
[charter]: https://www.opencontainers.org/about/governance

View file

@ -1,58 +0,0 @@
# Annotations
Several components of the specification, like [Image Manifests](manifest.md) and [Descriptors](descriptor.md), feature an optional annotations property, whose format is common and defined in this section.
This property contains arbitrary metadata.
## Rules
* Annotations MUST be a key-value map where both the key and value MUST be strings.
* While the value MUST be present, it MAY be an empty string.
* Keys MUST be unique within this map, and best practice is to namespace the keys.
* Keys SHOULD be named using a reverse domain notation - e.g. `com.example.myKey`.
* The prefix `org.opencontainers` is reserved for keys defined in Open Container Initiative (OCI) specifications and MUST NOT be used by other specifications and extensions.
* Keys using the `org.opencontainers.image` namespace are reserved for use in the OCI Image Specification and MUST NOT be used by other specifications and extensions, including other OCI specifications.
* If there are no annotations then this property MUST either be absent or be an empty map.
* Consumers MUST NOT generate an error if they encounter an unknown annotation key.
## Pre-Defined Annotation Keys
This specification defines the following annotation keys, intended for but not limited to [image index](image-index.md) and image [manifest](manifest.md) authors:
* **org.opencontainers.image.created** date and time on which the image was built (string, date-time as defined by [RFC 3339](https://tools.ietf.org/html/rfc3339#section-5.6)).
* **org.opencontainers.image.authors** contact details of the people or organization responsible for the image (freeform string)
* **org.opencontainers.image.url** URL to find more information on the image (string)
* **org.opencontainers.image.documentation** URL to get documentation on the image (string)
* **org.opencontainers.image.source** URL to get source code for building the image (string)
* **org.opencontainers.image.version** version of the packaged software
* The version MAY match a label or tag in the source code repository
* version MAY be [Semantic versioning-compatible](http://semver.org/)
* **org.opencontainers.image.revision** Source control revision identifier for the packaged software.
* **org.opencontainers.image.vendor** Name of the distributing entity, organization or individual.
* **org.opencontainers.image.licenses** License(s) under which contained software is distributed as a [SPDX License Expression][spdx-license-expression].
* **org.opencontainers.image.ref.name** Name of the reference for a target (string). SHOULD only be considered valid when on descriptors on `index.json` within [image layout](image-layout.md).
* **org.opencontainers.image.title** Human-readable title of the image (string)
* **org.opencontainers.image.description** Human-readable description of the software packaged in the image (string)
## Back-compatibility with Label Schema
[Label Schema](https://label-schema.org) defined a number of conventional labels for container images, and these are now superceded by annotations with keys starting **org.opencontainers.image**.
While users are encouraged to use the **org.opencontainers.image** keys, tools MAY choose to support compatible annotations using the **org.label-schema** prefix as follows.
| `org.opencontainers.image` prefix | `org.label-schema prefix` | Compatibility notes |
|---------------------------|-------------------------|---------------------|
| `created` | `build-date` | Compatible |
| `url` | `url` | Compatible |
| `source` | `vcs-url` | Compatible |
| `version` | `version` | Compatible |
| `revision` | `vcs-ref` | Compatible |
| `vendor` | `vendor` | Compatible |
| `title` | `name` | Compatible |
| `description` | `description` | Compatible |
| `documentation` | `usage` | Value is compatible if the documentation is located by a URL |
| `authors` | | No equivalent in Label Schema |
| `licenses` | | No equivalent in Label Schema |
| `ref.name` | | No equivalent in Label Schema |
| | `schema-version`| No equivalent in the OCI Image Spec |
| | `docker.*`, `rkt.*` | No equivalent in the OCI Image Spec |
[spdx-license-expression]: https://spdx.org/spdx-specification-21-web-version#h.jxpfx0ykyb60

View file

@ -1,275 +0,0 @@
# OCI Image Configuration
An OCI *Image* is an ordered collection of root filesystem changes and the corresponding execution parameters for use within a container runtime.
This specification outlines the JSON format describing images for use with a container runtime and execution tool and its relationship to filesystem changesets, described in [Layers](layer.md).
This section defines the `application/vnd.oci.image.config.v1+json` [media type](media-types.md).
## Terminology
This specification uses the following terms:
### [Layer](layer.md)
* Image filesystems are composed of *layers*.
* Each layer represents a set of filesystem changes in a tar-based [layer format](layer.md), recording files to be added, changed, or deleted relative to its parent layer.
* Layers do not have configuration metadata such as environment variables or default arguments - these are properties of the image as a whole rather than any particular layer.
* Using a layer-based or union filesystem such as AUFS, or by computing the diff from filesystem snapshots, the filesystem changeset can be used to present a series of image layers as if they were one cohesive filesystem.
### Image JSON
* Each image has an associated JSON structure which describes some basic information about the image such as date created, author, as well as execution/runtime configuration like its entrypoint, default arguments, networking, and volumes.
* The JSON structure also references a cryptographic hash of each layer used by the image, and provides history information for those layers.
* This JSON is considered to be immutable, because changing it would change the computed [ImageID](#imageid).
* Changing it means creating a new derived image, instead of changing the existing image.
### Layer DiffID
A layer DiffID is the digest over the layer's uncompressed tar archive and serialized in the descriptor digest format, e.g., `sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9`.
Layers SHOULD be packed and unpacked reproducibly to avoid changing the layer DiffID, for example by using [tar-split][] to save the tar headers.
NOTE: Do not confuse DiffIDs with [layer digests](manifest.md#image-manifest-property-descriptions), often referenced in the manifest, which are digests over compressed or uncompressed content.
### Layer ChainID
For convenience, it is sometimes useful to refer to a stack of layers with a single identifier.
While a layer's `DiffID` identifies a single changeset, the `ChainID` identifies the subsequent application of those changesets.
This ensures that we have handles referring to both the layer itself, as well as the result of the application of a series of changesets.
Use in combination with `rootfs.diff_ids` while applying layers to a root filesystem to uniquely and safely identify the result.
#### Definition
The `ChainID` of an applied set of layers is defined with the following recursion:
```
ChainID(L₀) = DiffID(L₀)
ChainID(L₀|...|Lₙ₋₁|Lₙ) = Digest(ChainID(L₀|...|Lₙ₋₁) + " " + DiffID(Lₙ))
```
For this, we define the binary `|` operation to be the result of applying the right operand to the left operand.
For example, given base layer `A` and a changeset `B`, we refer to the result of applying `B` to `A` as `A|B`.
Above, we define the `ChainID` for a single layer (`L₀`) as equivalent to the `DiffID` for that layer.
Otherwise, the `ChainID` for a set of applied layers (`L₀|...|Lₙ₋₁|Lₙ`) is defined as the recursion `Digest(ChainID(L₀|...|Lₙ₋₁) + " " + DiffID(Lₙ))`.
#### Explanation
Let's say we have layers A, B, C, ordered from bottom to top, where A is the base and C is the top.
Defining `|` as a binary application operator, the root filesystem may be `A|B|C`.
While it is implied that `C` is only useful when applied to `A|B`, the identifier `C` is insufficient to identify this result, as we'd have the equality `C = A|B|C`, which isn't true.
The main issue is when we have two definitions of `C`, `C = C` and `C = A|B|C`.
If this is true (with some handwaving), `C = x|C` where `x = any application`.
This means that if an attacker can define `x`, relying on `C` provides no guarantee that the layers were applied in any order.
The `ChainID` addresses this problem by being defined as a compound hash.
__We differentiate the changeset `C`, from the order-dependent application `A|B|C` by saying that the resulting rootfs is identified by ChainID(A|B|C), which can be calculated by `ImageConfig.rootfs`.__
Let's expand the definition of `ChainID(A|B|C)` to explore its internal structure:
```
ChainID(A) = DiffID(A)
ChainID(A|B) = Digest(ChainID(A) + " " + DiffID(B))
ChainID(A|B|C) = Digest(ChainID(A|B) + " " + DiffID(C))
```
We can replace each definition and reduce to a single equality:
```
ChainID(A|B|C) = Digest(Digest(DiffID(A) + " " + DiffID(B)) + " " + DiffID(C))
```
Hopefully, the above is illustrative of the _actual_ contents of the `ChainID`.
Most importantly, we can easily see that `ChainID(C) != ChainID(A|B|C)`, otherwise, `ChainID(C) = DiffID(C)`, which is the base case, could not be true.
### ImageID
Each image's ID is given by the SHA256 hash of its [configuration JSON](#image-json).
It is represented as a hexadecimal encoding of 256 bits, e.g., `sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9`.
Since the [configuration JSON](#image-json) that gets hashed references hashes of each layer in the image, this formulation of the ImageID makes images content-addressable.
## Properties
Note: Any OPTIONAL field MAY also be set to null, which is equivalent to being absent.
- **created** *string*, OPTIONAL
An combined date and time at which the image was created, formatted as defined by [RFC 3339, section 5.6][rfc3339-s5.6].
- **author** *string*, OPTIONAL
Gives the name and/or email address of the person or entity which created and is responsible for maintaining the image.
- **architecture** *string*, REQUIRED
The CPU architecture which the binaries in this image are built to run on.
Configurations SHOULD use, and implementations SHOULD understand, values [supported by runtime-spec's `platform.arch`][runtime-platform].
- **os** *string*, REQUIRED
The name of the operating system which the image is built to run on.
Configurations SHOULD use, and implementations SHOULD understand, values [supported by runtime-spec's `platform.os`][runtime-platform].
- **config** *object*, OPTIONAL
The execution parameters which SHOULD be used as a base when running a container using the image.
This field can be `null`, in which case any execution parameters should be specified at creation of the container.
- **User** *string*, OPTIONAL
The username or UID which is a platform-specific structure that allows specific control over which user the process run as.
This acts as a default value to use when the value is not specified when creating a container.
For Linux based systems, all of the following are valid: `user`, `uid`, `user:group`, `uid:gid`, `uid:group`, `user:gid`.
If `group`/`gid` is not specified, the default group and supplementary groups of the given `user`/`uid` in `/etc/passwd` from the container are applied.
- **ExposedPorts** *object*, OPTIONAL
A set of ports to expose from a container running this image.
Its keys can be in the format of:
`port/tcp`, `port/udp`, `port` with the default protocol being `tcp` if not specified.
These values act as defaults and are merged with any specified when creating a container.
**NOTE:** This JSON structure value is unusual because it is a direct JSON serialization of the Go type `map[string]struct{}` and is represented in JSON as an object mapping its keys to an empty object.
- **Env** *array of strings*, OPTIONAL
Entries are in the format of `VARNAME=VARVALUE`.
These values act as defaults and are merged with any specified when creating a container.
- **Entrypoint** *array of strings*, OPTIONAL
A list of arguments to use as the command to execute when the container starts.
These values act as defaults and may be replaced by an entrypoint specified when creating a container.
- **Cmd** *array of strings*, OPTIONAL
Default arguments to the entrypoint of the container.
These values act as defaults and may be replaced by any specified when creating a container.
If an `Entrypoint` value is not specified, then the first entry of the `Cmd` array SHOULD be interpreted as the executable to run.
- **Volumes** *object*, OPTIONAL
A set of directories which SHOULD be created as data volumes in a container running this image.
If a file or folder exists within the image with the same path as a data volume, that file or folder will be replaced by the data volume and never be merged.
**NOTE:** This JSON structure value is unusual because it is a direct JSON serialization of the Go type `map[string]struct{}` and is represented in JSON as an object mapping its keys to an empty object.
- **WorkingDir** *string*, OPTIONAL
Sets the current working directory of the entrypoint process in the container.
This value acts as a default and may be replaced by a working directory specified when creating a container.
- **Labels** *object*, OPTIONAL
The field contains arbitrary metadata for the container.
This property MUST use the [annotation rules](annotations.md#rules).
- **StopSignal** *string*, OPTIONAL
The field contains the system call signal that will be sent to the container to exit. The signal can be a signal name in the format `SIGNAME`, for instance `SIGKILL` or `SIGRTMIN+3`.
- **rootfs** *object*, REQUIRED
The rootfs key references the layer content addresses used by the image.
This makes the image config hash depend on the filesystem hash.
- **type** *string*, REQUIRED
MUST be set to `layers`.
Implementations MUST generate an error if they encounter a unknown value while verifying or unpacking an image.
- **diff_ids** *array of strings*, REQUIRED
An array of layer content hashes (`DiffIDs`), in order from first to last.
- **history** *array of objects*, OPTIONAL
Describes the history of each layer.
The array is ordered from first to last.
The object has the following fields:
- **created** *string*, OPTIONAL
A combined date and time at which the layer was created, formatted as defined by [RFC 3339, section 5.6][rfc3339-s5.6].
- **author** *string*, OPTIONAL
The author of the build point.
- **created_by** *string*, OPTIONAL
The command which created the layer.
- **comment** *string*, OPTIONAL
A custom message set when creating the layer.
- **empty_layer** *boolean*, OPTIONAL
This field is used to mark if the history item created a filesystem diff.
It is set to true if this history item doesn't correspond to an actual layer in the rootfs section (for example, Dockerfile's [ENV](https://docs.docker.com/engine/reference/builder/#/env) command results in no change to the filesystem).
Any extra fields in the Image JSON struct are considered implementation specific and MUST be ignored by any implementations which are unable to interpret them.
Whitespace is OPTIONAL and implementations MAY have compact JSON with no whitespace.
## Example
Here is an example image configuration JSON document:
```json,title=Image%20JSON&mediatype=application/vnd.oci.image.config.v1%2Bjson
{
"created": "2015-10-31T22:22:56.015925234Z",
"author": "Alyssa P. Hacker <alyspdev@example.com>",
"architecture": "amd64",
"os": "linux",
"config": {
"User": "alice",
"ExposedPorts": {
"8080/tcp": {}
},
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"FOO=oci_is_a",
"BAR=well_written_spec"
],
"Entrypoint": [
"/bin/my-app-binary"
],
"Cmd": [
"--foreground",
"--config",
"/etc/my-app.d/default.cfg"
],
"Volumes": {
"/var/job-result-data": {},
"/var/log/my-app-logs": {}
},
"WorkingDir": "/home/alice",
"Labels": {
"com.example.project.git.url": "https://example.com/project.git",
"com.example.project.git.commit": "45a939b2999782a3f005621a8d0f29aa387e1d6b"
}
},
"rootfs": {
"diff_ids": [
"sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1",
"sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
],
"type": "layers"
},
"history": [
{
"created": "2015-10-31T22:22:54.690851953Z",
"created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"
},
{
"created": "2015-10-31T22:22:55.613815829Z",
"created_by": "/bin/sh -c #(nop) CMD [\"sh\"]",
"empty_layer": true
}
]
}
```
[rfc3339-s5.6]: https://tools.ietf.org/html/rfc3339#section-5.6
[runtime-platform]: https://github.com/opencontainers/runtime-spec/blob/v1.0.0-rc3/config.md#platform
[tar-split]: https://github.com/vbatts/tar-split

View file

@ -1,26 +0,0 @@
# Extensibility
Implementations that are reading/processing [manifests](manifest.md) or [image indexes](image-index.md) MUST NOT generate an error if they encounter an unknown property.
Instead they MUST ignore unknown properties.
# Canonicalization
* OCI Images are [content-addressable](https://en.wikipedia.org/wiki/Content-addressable_storage). See [descriptors](descriptor.md) for more.
* One benefit of content-addressable storage is easy deduplication.
* Many images might depend on a particular [layer](layer.md), but there will only be one blob in the [store](image-layout.md).
* With a different serialization, that same semantic layer would have a different hash, and if both versions of the layer are referenced there will be two blobs with the same semantic content.
* To allow efficient storage, implementations serializing content for blobs SHOULD use a canonical serialization.
* This increases the chance that different implementations can push the same semantic content to the store without creating redundant blobs.
## JSON
[JSON][] content SHOULD be serialized as [canonical JSON][canonical-json].
Of the [OCI Image Format Specification media types](media-types.md), all the types ending in `+json` contain JSON content.
Implementations:
* [Go][]: [github.com/docker/go][], which claims to implement [canonical JSON][canonical-json] except for Unicode normalization.
[canonical-json]: http://wiki.laptop.org/go/Canonical_JSON
[github.com/docker/go]: https://github.com/docker/go/
[Go]: https://golang.org/
[JSON]: http://json.org/

View file

@ -1,185 +0,0 @@
# OCI Content Descriptors
* An OCI image consists of several different components, arranged in a [Merkle Directed Acyclic Graph (DAG)](https://en.wikipedia.org/wiki/Merkle_tree).
* References between components in the graph are expressed through _Content Descriptors_.
* A Content Descriptor (or simply _Descriptor_) describes the disposition of the targeted content.
* A Content Descriptor includes the type of the content, a content identifier (_digest_), and the byte-size of the raw content.
* Descriptors SHOULD be embedded in other formats to securely reference external content.
* Other formats SHOULD use descriptors to securely reference external content.
This section defines the `application/vnd.oci.descriptor.v1+json` [media type](media-types.md).
## Properties
A descriptor consists of a set of properties encapsulated in key-value fields.
The following fields contain the primary properties that constitute a Descriptor:
- **`mediaType`** *string*
This REQUIRED property contains the media type of the referenced content.
Values MUST comply with [RFC 6838][rfc6838], including the [naming requirements in its section 4.2][rfc6838-s4.2].
The OCI image specification defines [several of its own MIME types](media-types.md) for resources defined in the specification.
- **`digest`** *string*
This REQUIRED property is the _digest_ of the targeted content, conforming to the requirements outlined in [Digests and Verification](#digests-and-verification).
Retrieved content SHOULD be verified against this digest when consumed via untrusted sources.
- **`size`** *int64*
This REQUIRED property specifies the size, in bytes, of the raw content.
This property exists so that a client will have an expected size for the content before processing.
If the length of the retrieved content does not match the specified length, the content SHOULD NOT be trusted.
- **`urls`** *array of strings*
This OPTIONAL property specifies a list of URIs from which this object MAY be downloaded.
Each entry MUST conform to [RFC 3986][rfc3986].
Entries SHOULD use the `http` and `https` schemes, as defined in [RFC 7230][rfc7230-s2.7].
- **`annotations`** *string-string map*
This OPTIONAL property contains arbitrary metadata for this descriptor.
This OPTIONAL property MUST use the [annotation rules](annotations.md#rules).
Descriptors pointing to [`application/vnd.oci.image.manifest.v1+json`](manifest.md) SHOULD include the extended field `platform`, see [Image Index Property Descriptions](image-index.md#image-index-property-descriptions) for details.
### Reserved
The following field keys are reserved and MUST NOT be used by other specifications.
- **`data`** *string*
This key is RESERVED for future versions of the specification.
All other fields may be included in other OCI specifications.
Extended _Descriptor_ field additions proposed in other OCI specifications SHOULD first be considered for addition into this specification.
## Digests
The _digest_ property of a Descriptor acts as a content identifier, enabling [content addressability](http://en.wikipedia.org/wiki/Content-addressable_storage).
It uniquely identifies content by taking a [collision-resistant hash](https://en.wikipedia.org/wiki/Cryptographic_hash_function) of the bytes.
If the _digest_ can be communicated in a secure manner, one can verify content from an insecure source by recalculating the digest independently, ensuring the content has not been modified.
The value of the `digest` property is a string consisting of an _algorithm_ portion and an _encoded_ portion.
The _algorithm_ specifies the cryptographic hash function and encoding used for the digest; the _encoded_ portion contains the encoded result of the hash function.
A digest string MUST match the following grammar:
```
digest := algorithm ":" encoded
algorithm := algorithm-component [algorithm-separator algorithm-component]*
algorithm-component := /[a-z0-9]+/
algorithm-separator := /[+._-]/
encoded := /[a-zA-Z0-9=_-]+/
```
Note that _algorithm_ MAY impose algorithm-specific restriction on the grammar of the _encoded_ portion.
See also [Registered Algorithms](#registered-algorithms).
Some example digest strings include the following:
digest | algorithm | Registered |
--------------------------------------------------------------------------|---------------------|------------|
`sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b` | [SHA-256](#sha-256) | Yes |
`sha512:401b09eab3c013d4ca54922bb802bec8fd5318192b0a75f201d8b372742...` | [SHA-512](#sha-512) | Yes |
`multihash+base58:QmRZxt2b1FVZPNqd8hsiykDL3TdBDeTSPX9Kv46HmX4Gx8` | Multihash | No |
`sha256+b64u:LCa0a2j_xo_5m0U8HTBBNBNCLXBkg7-g-YpeiGJm564` | SHA-256 with urlsafe base64 | No |
Please see [Registered Algorithms](#registered-algorithms) for a list of registered algorithms.
Implementations SHOULD allow digests with unrecognized algorithms to pass validation if they comply with the above grammar.
While `sha256` will only use hex encoded digests, separators in _algorithm_ and alphanumerics in _encoded_ are included to allow for extensions.
As an example, we can parameterize the encoding and algorithm as `multihash+base58:QmRZxt2b1FVZPNqd8hsiykDL3TdBDeTSPX9Kv46HmX4Gx8`, which would be considered valid but unregistered by this specification.
### Verification
Before consuming content targeted by a descriptor from untrusted sources, the byte content SHOULD be verified against the digest string.
Before calculating the digest, the size of the content SHOULD be verified to reduce hash collision space.
Heavy processing before calculating a hash SHOULD be avoided.
Implementations MAY employ [canonicalization](canonicalization.md#canonicalization) of the underlying content to ensure stable content identifiers.
### Digest calculations
A _digest_ is calculated by the following pseudo-code, where `H` is the selected hash algorithm, identified by string `<alg>`:
```
let ID(C) = Descriptor.digest
let C = <bytes>
let D = '<alg>:' + Encode(H(C))
let verified = ID(C) == D
```
Above, we define the content identifier as `ID(C)`, extracted from the `Descriptor.digest` field.
Content `C` is a string of bytes.
Function `H` returns the hash of `C` in bytes and is passed to function `Encode` and prefixed with the algorithm to obtain the digest.
The result `verified` is true if `ID(C)` is equal to `D`, confirming that `C` is the content identified by `D`.
After verification, the following is true:
```
D == ID(C) == '<alg>:' + Encode(H(C))
```
The _digest_ is confirmed as the content identifier by independently calculating the _digest_.
### Registered algorithms
While the _algorithm_ component of the digest string allows the use of a variety of cryptographic algorithms, compliant implementations SHOULD use [SHA-256](#sha-256).
The following algorithm identifiers are currently defined by this specification:
| algorithm identifier | algorithm |
|----------------------|---------------------|
| `sha256` | [SHA-256](#sha-256) |
| `sha512` | [SHA-512](#sha-512) |
If a useful algorithm is not included in the above table, it SHOULD be submitted to this specification for registration.
#### SHA-256
[SHA-256][rfc4634-s4.1] is a collision-resistant hash function, chosen for ubiquity, reasonable size and secure characteristics.
Implementations MUST implement SHA-256 digest verification for use in descriptors.
When the _algorithm identifier_ is `sha256`, the _encoded_ portion MUST match `/[a-f0-9]{64}/`.
Note that `[A-F]` MUST NOT be used here.
#### SHA-512
[SHA-512][rfc4634-s4.2] is a collision-resistant hash function which [may be more perfomant][sha256-vs-sha512] than [SHA-256](#sha-256) on some CPUs.
Implementations MAY implement SHA-512 digest verification for use in descriptors.
When the _algorithm identifier_ is `sha512`, the _encoded_ portion MUST match `/[a-f0-9]{128}/`.
Note that `[A-F]` MUST NOT be used here.
## Examples
The following example describes a [_Manifest_](manifest.md#image-manifest) with a content identifier of "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270" and a size of 7682 bytes:
```json,title=Content%20Descriptor&mediatype=application/vnd.oci.descriptor.v1%2Bjson
{
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"size": 7682,
"digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270"
}
```
In the following example, the descriptor indicates that the referenced manifest is retrievable from a particular URL:
```json,title=Content%20Descriptor&mediatype=application/vnd.oci.descriptor.v1%2Bjson
{
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"size": 7682,
"digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270",
"urls": [
"https://example.com/example-manifest"
]
}
```
[rfc3986]: https://tools.ietf.org/html/rfc3986
[rfc4634-s4.1]: https://tools.ietf.org/html/rfc4634#section-4.1
[rfc4634-s4.2]: https://tools.ietf.org/html/rfc4634#section-4.2
[rfc6838]: https://tools.ietf.org/html/rfc6838
[rfc6838-s4.2]: https://tools.ietf.org/html/rfc6838#section-4.2
[rfc7230-s2.7]: https://tools.ietf.org/html/rfc7230#section-2.7
[sha256-vs-sha512]: https://groups.google.com/a/opencontainers.org/forum/#!topic/dev/hsMw7cAwrZE

View file

@ -1,67 +0,0 @@
// Copyright 2016 The Linux Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package identity provides implementations of subtle calculations pertaining
// to image and layer identity. The primary item present here is the ChainID
// calculation used in identifying the result of subsequent layer applications.
//
// Helpers are also provided here to ease transition to the
// github.com/opencontainers/go-digest package, but that package may be used
// directly.
package identity
import "github.com/opencontainers/go-digest"
// ChainID takes a slice of digests and returns the ChainID corresponding to
// the last entry. Typically, these are a list of layer DiffIDs, with the
// result providing the ChainID identifying the result of sequential
// application of the preceding layers.
func ChainID(dgsts []digest.Digest) digest.Digest {
chainIDs := make([]digest.Digest, len(dgsts))
copy(chainIDs, dgsts)
ChainIDs(chainIDs)
if len(chainIDs) == 0 {
return ""
}
return chainIDs[len(chainIDs)-1]
}
// ChainIDs calculates the recursively applied chain id for each identifier in
// the slice. The result is written direcly back into the slice such that the
// ChainID for each item will be in the respective position.
//
// By definition of ChainID, the zeroth element will always be the same before
// and after the call.
//
// As an example, given the chain of ids `[A, B, C]`, the result `[A,
// ChainID(A|B), ChainID(A|B|C)]` will be written back to the slice.
//
// The input is provided as a return value for convenience.
//
// Typically, these are a list of layer DiffIDs, with the
// result providing the ChainID for each the result of each layer application
// sequentially.
func ChainIDs(dgsts []digest.Digest) []digest.Digest {
if len(dgsts) < 2 {
return dgsts
}
parent := digest.FromBytes([]byte(dgsts[0] + " " + dgsts[1]))
next := dgsts[1:]
next[0] = parent
ChainIDs(next)
return dgsts
}

View file

@ -1,95 +0,0 @@
// Copyright 2016 The Linux Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package identity
import (
_ "crypto/sha256" // required to install sha256 digest support
"reflect"
"testing"
"github.com/opencontainers/go-digest"
)
func TestChainID(t *testing.T) {
// To provide a good testing base, we define the individual links in a
// chain recursively, illustrating the calculations for each chain.
//
// Note that we use invalid digests for the unmodified identifiers here to
// make the computation more readable.
chainDigestAB := digest.FromString("sha256:a" + " " + "sha256:b") // chain for A|B
chainDigestABC := digest.FromString(chainDigestAB.String() + " " + "sha256:c") // chain for A|B|C
for _, testcase := range []struct {
Name string
Digests []digest.Digest
Expected []digest.Digest
}{
{
Name: "nil",
},
{
Name: "empty",
Digests: []digest.Digest{},
Expected: []digest.Digest{},
},
{
Name: "identity",
Digests: []digest.Digest{"sha256:a"},
Expected: []digest.Digest{"sha256:a"},
},
{
Name: "two",
Digests: []digest.Digest{"sha256:a", "sha256:b"},
Expected: []digest.Digest{"sha256:a", chainDigestAB},
},
{
Name: "three",
Digests: []digest.Digest{"sha256:a", "sha256:b", "sha256:c"},
Expected: []digest.Digest{"sha256:a", chainDigestAB, chainDigestABC},
},
} {
t.Run(testcase.Name, func(t *testing.T) {
t.Log("before", testcase.Digests)
var ids []digest.Digest
if testcase.Digests != nil {
ids = make([]digest.Digest, len(testcase.Digests))
copy(ids, testcase.Digests)
}
ids = ChainIDs(ids)
t.Log("after", ids)
if !reflect.DeepEqual(ids, testcase.Expected) {
t.Errorf("unexpected chain: %v != %v", ids, testcase.Expected)
}
if len(testcase.Digests) == 0 {
return
}
// Make sure parent stays stable
if ids[0] != testcase.Digests[0] {
t.Errorf("parent changed: %v != %v", ids[0], testcase.Digests[0])
}
// make sure that the ChainID function takes the last element
id := ChainID(testcase.Digests)
if id != ids[len(ids)-1] {
t.Errorf("incorrect chain id returned from ChainID: %v != %v", id, ids[len(ids)-1])
}
})
}
}

View file

@ -1,40 +0,0 @@
// Copyright 2016 The Linux Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package identity
import (
_ "crypto/sha256" // side-effect to install impls, sha256
_ "crypto/sha512" // side-effect to install impls, sha384/sh512
"io"
digest "github.com/opencontainers/go-digest"
)
// FromReader consumes the content of rd until io.EOF, returning canonical
// digest.
func FromReader(rd io.Reader) (digest.Digest, error) {
return digest.Canonical.FromReader(rd)
}
// FromBytes digests the input and returns a Digest.
func FromBytes(p []byte) digest.Digest {
return digest.Canonical.FromBytes(p)
}
// FromString digests the input and returns a Digest.
func FromString(s string) digest.Digest {
return digest.Canonical.FromString(s)
}

View file

@ -1,124 +0,0 @@
# OCI Image Index Specification
The image index is a higher-level manifest which points to specific [image manifests](manifest.md), ideal for one or more platforms.
While the use of an image index is OPTIONAL for image providers, image consumers SHOULD be prepared to process them.
This section defines the `application/vnd.oci.image.index.v1+json` [media type](media-types.md).
For the media type(s) that this document is compatible with, see the [matrix][matrix].
## *Image Index* Property Descriptions
- **`schemaVersion`** *int*
This REQUIRED property specifies the image manifest schema version.
For this version of the specification, this MUST be `2` to ensure backward compatibility with older versions of Docker.
The value of this field will not change.
This field MAY be removed in a future version of the specification.
- **`mediaType`** *string*
This property is *reserved* for use, to [maintain compatibility][matrix].
When used, this field contains the media type of this document, which differs from the [descriptor](descriptor.md#properties) use of `mediaType`.
- **`manifests`** *array of objects*
This REQUIRED property contains a list of [manifests](manifest.md) for specific platforms.
While this property MUST be present, the size of the array MAY be zero.
Each object in `manifests` includes a set of [descriptor properties](descriptor.md#properties) with the following additional properties and restrictions:
- **`mediaType`** *string*
This [descriptor property](descriptor.md#properties) has additional restrictions for `manifests`.
Implementations MUST support at least the following media types:
- [`application/vnd.oci.image.manifest.v1+json`](manifest.md)
Image indexes concerned with portability SHOULD use one of the above media types.
Future versions of the spec MAY use a different mediatype (i.e. a new versioned format).
An encountered `mediaType` that is unknown SHOULD be safely ignored.
- **`platform`** *object*
This OPTIONAL property describes the minimum runtime requirements of the image.
This property SHOULD be present if its target is platform-specific.
- **`architecture`** *string*
This REQUIRED property specifies the CPU architecture.
Image indexes SHOULD use, and implementations SHOULD understand, values [supported by runtime-spec's `platform.arch`][runtime-platform2].
- **`os`** *string*
This REQUIRED property specifies the operating system.
Image indexes SHOULD use, and implementations SHOULD understand, values [supported by runtime-spec's `platform.os`][runtime-platform2].
- **`os.version`** *string*
This OPTIONAL property specifies the version of the operating system targeted by the referenced blob.
Implementations MAY refuse to use manifests where `os.version` is not known to work with the host OS version.
Valid values are implementation-defined. e.g. `10.0.14393.1066` on `windows`.
- **`os.features`** *array of strings*
This OPTIONAL property specifies an array of strings, each specifying a mandatory OS feature (for example on Windows `win32k`).
- **`variant`** *string*
This OPTIONAL property specifies the variant of the CPU.
Image indexes SHOULD use, and implementations SHOULD understand, values listed in the following table.
When the variant of the CPU is not listed in the table, values are implementation-defined and SHOULD be submitted to this specification for standardization.
| ISA/ABI | `architecture` | `variant` |
|-----------------|----------------|-------------|
| ARM 32-bit, v6 | `arm` | `v6` |
| ARM 32-bit, v7 | `arm` | `v7` |
| ARM 32-bit, v8 | `arm` | `v8` |
| ARM 64-bit, v8 | `arm64` | `v8` |
- **`features`** *array of strings*
This property is RESERVED for future versions of the specification.
- **`annotations`** *string-string map*
This OPTIONAL property contains arbitrary metadata for the image index.
This OPTIONAL property MUST use the [annotation rules](annotations.md#rules).
See [Pre-Defined Annotation Keys](annotations.md#pre-defined-annotation-keys).
## Example Image Index
*Example showing a simple image index pointing to image manifests for two platforms:*
```json,title=Image%20Index&mediatype=application/vnd.oci.image.index.v1%2Bjson
{
"schemaVersion": 2,
"manifests": [
{
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"size": 7143,
"digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
"platform": {
"architecture": "ppc64le",
"os": "linux"
}
},
{
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"size": 7682,
"digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270",
"platform": {
"architecture": "amd64",
"os": "linux"
}
}
],
"annotations": {
"com.example.key1": "value1",
"com.example.key2": "value2"
}
}
```
[runtime-platform2]: https://github.com/opencontainers/runtime-spec/blob/v1.0.0-rc3/config.md#platform
[matrix]: media-types.md#compatibility-matrix

View file

@ -1,205 +0,0 @@
## OCI Image Layout Specification
* The OCI Image Layout is a slash separated layout of OCI content-addressable blobs and [location-addressable](https://en.wikipedia.org/wiki/Content-addressable_storage#Content-addressed_vs._location-addressed) references (refs).
* This layout MAY be used in a variety of different transport mechanisms: archive formats (e.g. tar, zip), shared filesystem environments (e.g. nfs), or networked file fetching (e.g. http, ftp, rsync).
Given an image layout and a ref, a tool can create an [OCI Runtime Specification bundle](https://github.com/opencontainers/runtime-spec/blob/v1.0.0-rc3/bundle.md) by:
* Following the ref to find a [manifest](manifest.md#image-manifest), possibly via an [image index](image-index.md)
* [Applying the filesystem layers](layer.md#applying) in the specified order
* Converting the [image configuration](config.md) into an [OCI Runtime Specification `config.json`](https://github.com/opencontainers/runtime-spec/blob/v1.0.0-rc3/config.md)
# Content
The image layout is as follows:
- `blobs` directory
- Contains content-addressable blobs
- A blob has no schema and SHOULD be considered opaque
- Directory MUST exist and MAY be empty
- See [blobs](#blobs) section
- `oci-layout` file
- It MUST exist
- It MUST be a JSON object
- It MUST contain an `imageLayoutVersion` field
- See [oci-layout file](#oci-layout-file) section
- It MAY include additional fields
- `index.json` file
- It MUST exist
- It MUST be an [image index](image-index.md) JSON object.
- See [index.json](#indexjson-file) section
## Example Layout
This is an example image layout:
```
$ cd example.com/app/
$ find . -type f
./index.json
./oci-layout
./blobs/sha256/3588d02542238316759cbf24502f4344ffcc8a60c803870022f335d1390c13b4
./blobs/sha256/4b0bc1c4050b03c95ef2a8e36e25feac42fd31283e8c30b3ee5df6b043155d3c
./blobs/sha256/7968321274dc6b6171697c33df7815310468e694ac5be0ec03ff053bb135e768
```
Blobs are named by their contents:
```
$ shasum -a 256 ./blobs/sha256/afff3924849e458c5ef237db5f89539274d5e609db5db935ed3959c90f1f2d51
afff3924849e458c5ef237db5f89539274d5e609db5db935ed3959c90f1f2d51 ./blobs/sha256/afff3924849e458c5ef237db5f89539274d5e609db5db935ed3959c90f1f2d51
```
## Blobs
* Object names in the `blobs` subdirectories are composed of a directory for each hash algorithm, the children of which will contain the actual content.
* The content of `blobs/<alg>/<encoded>` MUST match the digest `<alg>:<encoded>` (referenced per [descriptor](descriptor.md#digests-and-verification)). For example, the content of `blobs/sha256/da39a3ee5e6b4b0d3255bfef95601890afd80709` MUST match the digest `sha256:da39a3ee5e6b4b0d3255bfef95601890afd80709`.
* The character set of the entry name for `<alg>` and `<encoded>` MUST match the respective grammar elements described in [descriptor](descriptor.md#digests-and-verification).
* The blobs directory MAY contain blobs which are not referenced by any of the [refs](#indexjson-file).
* The blobs directory MAY be missing referenced blobs, in which case the missing blobs SHOULD be fulfilled by an external blob store.
### Example Blobs
```
$ cat ./blobs/sha256/9b97579de92b1c195b85bb42a11011378ee549b02d7fe9c17bf2a6b35d5cb079 | jq
{
"schemaVersion": 2,
"manifests": [
{
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"size": 7143,
"digest": "sha256:afff3924849e458c5ef237db5f89539274d5e609db5db935ed3959c90f1f2d51",
"platform": {
"architecture": "ppc64le",
"os": "linux"
}
},
...
```
```
$ cat ./blobs/sha256/afff3924849e458c5ef237db5f89539274d5e609db5db935ed3959c90f1f2d51 | jq
{
"schemaVersion": 2,
"config": {
"mediaType": "application/vnd.oci.image.config.v1+json",
"size": 7023,
"digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270"
},
"layers": [
{
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"size": 32654,
"digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
},
...
```
```
$ cat ./blobs/sha256/5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270 | jq
{
"architecture": "amd64",
"author": "Alyssa P. Hacker <alyspdev@example.com>",
"config": {
"Hostname": "8dfe43d80430",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "sha256:6986ae504bbf843512d680cc959484452034965db15f75ee8bdd1b107f61500b",
...
```
```
$ cat ./blobs/sha256/e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f
[tar stream]
```
## oci-layout file
This JSON object serves as a marker for the base of an Open Container Image Layout and to provide the version of the image-layout in use.
The `imageLayoutVersion` value will align with the OCI Image Specification version at the time changes to the layout are made, and will pin a given version until changes to the image layout are required.
This section defines the `application/vnd.oci.layout.header.v1+json` [media type](media-types.md).
### oci-layout Example
```json,title=OCI%20Layout&mediatype=application/vnd.oci.layout.header.v1%2Bjson
{
"imageLayoutVersion": "1.0.0"
}
```
## index.json file
This REQUIRED file is the entry point for references and descriptors of the image-layout.
The [image index](image-index.md) is a multi-descriptor entry point.
This index provides an established path (`/index.json`) to have an entry point for an image-layout and to discover auxiliary descriptors.
* No semantic restriction is given for the "org.opencontainers.image.ref.name" annotation of descriptors.
* In general the `mediaType` of each [descriptor][descriptors] object in the `manifests` field will be either `application/vnd.oci.image.index.v1+json` or `application/vnd.oci.image.manifest.v1+json`.
* Future versions of the spec MAY use a different mediatype (i.e. a new versioned format).
* An encountered `mediaType` that is unknown SHOULD be safely ignored.
**Implementor's Note:**
A common use case of descriptors with a "org.opencontainers.image.ref.name" annotation is representing a "tag" for a container image.
For example, an image may have a tag for different versions or builds of the software.
In the wild you often see "tags" like "v1.0.0-vendor.0", "2.0.0-debug", etc.
Those tags will often be represented in an image-layout repository with matching "org.opencontainers.image.ref.name" annotations like "v1.0.0-vendor.0", "2.0.0-debug", etc.
### Index Example
```json,title=Image%20Index&mediatype=application/vnd.oci.image.index.v1%2Bjson
{
"schemaVersion": 2,
"manifests": [
{
"mediaType": "application/vnd.oci.image.index.v1+json",
"size": 7143,
"digest": "sha256:0228f90e926ba6b96e4f39cf294b2586d38fbb5a1e385c05cd1ee40ea54fe7fd",
"annotations": {
"org.opencontainers.image.ref.name": "stable-release"
}
},
{
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"size": 7143,
"digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
"platform": {
"architecture": "ppc64le",
"os": "linux"
},
"annotations": {
"org.opencontainers.image.ref.name": "v1.0"
}
},
{
"mediaType": "application/xml",
"size": 7143,
"digest": "sha256:b3d63d132d21c3ff4c35a061adf23cf43da8ae054247e32faa95494d904a007e",
"annotations": {
"org.freedesktop.specifications.metainfo.version": "1.0",
"org.freedesktop.specifications.metainfo.type": "AppStream"
}
}
],
"annotations": {
"com.example.index.revision": "r124356"
}
}
```
This illustrates an index that provides two named manifest references and an auxiliary mediatype for this image layout.
[descriptors]: ./descriptor.md

Binary file not shown.

Before

Width:  |  Height:  |  Size: 27 KiB

View file

@ -1,12 +0,0 @@
digraph G {
{
imageIndex [shape=note, label="Image Index\n<<optional>>\napplication/vnd.oci.image.index.v1+json"]
manifest [shape=note, label="Image manifest\napplication/vnd.oci.image.manifest.v1+json"]
config [shape=note, label="Image JSON\napplication/vnd.oci.image.config.v1+json"]
layer [shape=note, label="Layer tar archive\napplication/vnd.oci.image.layer.v1.tar\napplication/vnd.oci.image.layer.v1.tar+gzip\napplication/vnd.oci.image.layer.nondistributable.v1.tar\napplication/vnd.oci.image.layer.nondistributable.v1.tar+gzip"]
}
imageIndex -> manifest [label="1..*"]
manifest -> config [label="1..1"]
manifest -> layer [label="1..*"]
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 14 KiB

View file

@ -1,21 +0,0 @@
# OCI Image Implementations
Projects or Companies currently adopting the OCI Image Specification
* [projectatomic/skopeo](https://github.com/projectatomic/skopeo)
* [Amazon Elastic Container Registry (ECR)](https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-manifest-formats.html) ([announcement](https://aws.amazon.com/about-aws/whats-new/2017/01/amazon-ecr-supports-docker-image-manifest-v2-schema-2/))
* [openSUSE/umoci](https://github.com/openSUSE/umoci)
* [cloudfoundry/grootfs](https://github.com/cloudfoundry/grootfs) ([source](https://github.com/cloudfoundry/grootfs/blob/c3da26e1e463b51be1add289032f3dca6698b335/fetcher/remote/docker_src.go))
* [Mesos plans](https://issues.apache.org/jira/browse/MESOS-5011) ([design doc](https://docs.google.com/document/d/1Pus7D-inIBoLSIPyu3rl_apxvUhtp3rp0_b0Ttr2Xww/edit#heading=h.hrvk2wboog4p))
* [Docker](https://github.com/docker)
- [docker/containerd](https://github.com/docker/containerd)
- [docker/docker (`docker save/load` WIP)](https://github.com/docker/docker/pull/26369)
- [docker/distribution (registry PR)](https://github.com/docker/distribution/pull/2076)
* [Containers](https://github.com/containers/)
- [containers/build](https://github.com/containers/build)
- [containers/image](https://github.com/containers/image)
* [coreos/rkt](https://github.com/coreos/rkt)
* [box-builder/box](https://github.com/box-builder/box)
_(to add your project please open a [pull-request](https://github.com/opencontainers/image-spec/pulls))_

View file

@ -1,323 +0,0 @@
# Image Layer Filesystem Changeset
This document describes how to serialize a filesystem and filesystem changes like removed files into a blob called a layer.
One or more layers are applied on top of each other to create a complete filesystem.
This document will use a concrete example to illustrate how to create and consume these filesystem layers.
This section defines the `application/vnd.oci.image.layer.v1.tar`, `application/vnd.oci.image.layer.v1.tar+gzip`, `application/vnd.oci.image.layer.nondistributable.v1.tar`, and `application/vnd.oci.image.layer.nondistributable.v1.tar+gzip` [media types](media-types.md).
## `+gzip` Media Types
* The media type `application/vnd.oci.image.layer.v1.tar+gzip` represents an `application/vnd.oci.image.layer.v1.tar` payload which has been compressed with [gzip][rfc1952_2].
* The media type `application/vnd.oci.image.layer.nondistributable.v1.tar+gzip` represents an `application/vnd.oci.image.layer.nondistributable.v1.tar` payload which has been compressed with [gzip][rfc1952_2].
## Distributable Format
* Layer Changesets for the [media type](media-types.md) `application/vnd.oci.image.layer.v1.tar` MUST be packaged in [tar archive][tar-archive].
* Layer Changesets for the [media type](media-types.md) `application/vnd.oci.image.layer.v1.tar` MUST NOT include duplicate entries for file paths in the resulting [tar archive][tar-archive].
## Change Types
Types of changes that can occur in a changeset are:
* Additions
* Modifications
* Removals
Additions and Modifications are represented the same in the changeset tar archive.
Removals are represented using "[whiteout](#whiteouts)" file entries (See [Representing Changes](#representing-changes)).
### File Types
Throughout this document section, the use of word "files" or "entries" includes:
* regular files
* directories
* sockets
* symbolic links
* block devices
* character devices
* FIFOs
### File Attributes
Where supported, MUST include file attributes for Additions and Modifications include:
* Modification Time (`mtime`)
* User ID (`uid`)
* User Name (`uname`) *secondary to `uid`*
* Group ID (`gid `)
* Group Name (`gname`) *secondary to `gid`*
* Mode (`mode`)
* Extended Attributes (`xattrs`)
* Symlink reference (`linkname` + symbolic link type)
* [Hardlink](#hardlinks) reference (`linkname`)
[Sparse files](https://en.wikipedia.org/wiki/Sparse_file) SHOULD NOT be used because they lack consistent support across tar implementations.
#### Hardlinks
* Hardlinks are a [POSIX concept](http://pubs.opengroup.org/onlinepubs/9699919799/functions/link.html) for having one or more directory entries for the same file on the same device.
* Not all filesystems support hardlinks (e.g. [FAT](https://en.wikipedia.org/wiki/File_Allocation_Table)).
* Hardlinks are possible with all [file types](#file-types) except `directories`.
* Non-directory files are considered "hardlinked" when their link count is greater than 1.
* Hardlinked files are on a same device (i.e. comparing Major:Minor pair) and have the same inode.
* The corresponding files that share the link with the > 1 linkcount may be outside the directory that the changeset is being produced from, in which case the `linkname` is not recorded in the changeset.
* Hardlinks are stored in a tar archive with type of a `1` char, per the [GNU Basic Tar Format][gnu-tar-standard] and [libarchive tar(5)][libarchive-tar].
* While approaches to deriving new or changed hardlinks may vary, a possible approach is:
```
SET LinkMap to map[< Major:Minor String >]map[< inode integer >]< path string >
SET LinkNames to map[< src path string >]< dest path string >
FOR each path in root path
IF path type is directory
CONTINUE
ENDIF
SET filestat to stat(path)
IF filestat num of links == 1
CONTINUE
ENDIF
IF LinkMap[filestat device][filestat inode] is not empty
SET LinkNames[path] to LinkMap[filestat device][filestat inode]
ELSE
SET LinkMap[filestat device][filestat inode] to path
ENDIF
END FOR
```
With this approach, the link map and links names of a directory could be compared against that of another directory to derive additions and changes to hardlinks.
## Creating
### Initial Root Filesystem
The initial root filesystem is the base or parent layer.
For this example, an image root filesystem has an initial state as an empty directory.
The name of the directory is not relevant to the layer itself, only for the purpose of producing comparisons.
Here is an initial empty directory structure for a changeset, with a unique directory name `rootfs-c9d-v1`.
```
rootfs-c9d-v1/
```
### Populate Initial Filesystem
Files and directories are then created:
```
rootfs-c9d-v1/
etc/
my-app-config
bin/
my-app-binary
my-app-tools
```
The `rootfs-c9d-v1` directory is then created as a plain [tar archive][tar-archive] with relative path to `rootfs-c9d-v1`.
Entries for the following files:
```
./
./etc/
./etc/my-app-config
./bin/
./bin/my-app-binary
./bin/my-app-tools
```
### Populate a Comparison Filesystem
Create a new directory and initialize it with a copy or snapshot of the prior root filesystem.
Example commands that can preserve [file attributes](#file-attributes) to make this copy are:
* [cp(1)](http://linux.die.net/man/1/cp): `cp -a rootfs-c9d-v1/ rootfs-c9d-v1.s1/`
* [rsync(1)](http://linux.die.net/man/1/rsync): `rsync -aHAX rootfs-c9d-v1/ rootfs-c9d-v1.s1/`
* [tar(1)](http://linux.die.net/man/1/tar): `mkdir rootfs-c9d-v1.s1 && tar --acls --xattrs -C rootfs-c9d-v1/ -c . | tar -C rootfs-c9d-v1.s1/ --acls --xattrs -x` (including `--selinux` where supported)
Any [changes](#change-types) to the snapshot MUST NOT change or affect the directory it was copied from.
For example `rootfs-c9d-v1.s1` is an identical snapshot of `rootfs-c9d-v1`.
In this way `rootfs-c9d-v1.s1` is prepared for updates and alterations.
**Implementor's Note**: *a copy-on-write or union filesystem can efficiently make directory snapshots*
Initial layout of the snapshot:
```
rootfs-c9d-v1.s1/
etc/
my-app-config
bin/
my-app-binary
my-app-tools
```
See [Change Types](#change-types) for more details on changes.
For example, add a directory at `/etc/my-app.d` containing a default config file, removing the existing config file.
Also a change (in attribute or file content) to `./bin/my-app-tools` binary to handle the config layout change.
Following these changes, the representation of the `rootfs-c9d-v1.s1` directory:
```
rootfs-c9d-v1.s1/
etc/
my-app.d/
default.cfg
bin/
my-app-binary
my-app-tools
```
### Determining Changes
When two directories are compared, the relative root is the top-level directory.
The directories are compared, looking for files that have been [added, modified, or removed](#change-types).
For this example, `rootfs-c9d-v1/` and `rootfs-c9d-v1.s1/` are recursively compared, each as relative root path.
The following changeset is found:
```
Added: /etc/my-app.d/
Added: /etc/my-app.d/default.cfg
Modified: /bin/my-app-tools
Deleted: /etc/my-app-config
```
This reflects the removal of `/etc/my-app-config` and creation of a file and directory at `/etc/my-app.d/default.cfg`.
`/bin/my-app-tools` has also been replaced with an updated version.
### Representing Changes
A [tar archive][tar-archive] is then created which contains *only* this changeset:
- Added and modified files and directories in their entirety
- Deleted files or directories marked with a [whiteout file](#whiteouts)
The resulting tar archive for `rootfs-c9d-v1.s1` has the following entries:
```
./etc/my-app.d/
./etc/my-app.d/default.cfg
./bin/my-app-tools
./etc/.wh.my-app-config
```
To signify that the resource `./etc/my-app-config` MUST be removed when the changeset is applied, the basename of the entry is prefixed with `.wh.`.
## Applying Changesets
* Layer Changesets of [media type](media-types.md) `application/vnd.oci.image.layer.v1.tar` are _applied_, rather than simply extracted as tar archives.
* Applying a layer changeset requires special consideration for the [whiteout](#whiteouts) files.
* In the absence of any [whiteout](#whiteouts) files in a layer changeset, the archive is extracted like a regular tar archive.
### Changeset over existing files
This section specifies applying an entry from a layer changeset if the target path already exists.
If the entry and the existing path are both directories, then the existing path's attributes MUST be replaced by those of the entry in the changeset.
In all other cases, the implementation MUST do the semantic equivalent of the following:
- removing the file path (e.g. [`unlink(2)`](http://linux.die.net/man/2/unlink) on Linux systems)
- recreating the file path, based on the contents and attributes of the changeset entry
## Whiteouts
* A whiteout file is an empty file with a special filename that signifies a path should be deleted.
* A whiteout filename consists of the prefix `.wh.` plus the basename of the path to be deleted.
* As files prefixed with `.wh.` are special whiteout markers, it is not possible to create a filesystem which has a file or directory with a name beginning with `.wh.`.
* Once a whiteout is applied, the whiteout itself MUST also be hidden.
* Whiteout files MUST only apply to resources in lower/parent layers.
* Files that are present in the same layer as a whiteout file can only be hidden by whiteout files in subsequent layers.
The following is a base layer with several resources:
```
a/
a/b/
a/b/c/
a/b/c/bar
```
When the next layer is created, the original `a/b` directory is deleted and recreated with `a/b/c/foo`:
```
a/
a/.wh..wh..opq
a/b/
a/b/c/
a/b/c/foo
```
When processing the second layer, `a/.wh..wh..opq` is applied first, before creating the new version of `a/b`, regardless of the ordering in which the whiteout file was encountered.
For example, the following layer is equivalent to the layer above:
```
a/
a/b/
a/b/c/
a/b/c/foo
a/.wh..wh..opq
```
Implementations SHOULD generate layers such that the whiteout files appear before sibling directory entries.
### Opaque Whiteout
* In addition to expressing that a single entry should be removed from a lower layer, layers may remove all of the children using an opaque whiteout entry.
* An opaque whiteout entry is a file with the name `.wh..wh..opq` indicating that all siblings are hidden in the lower layer.
Let's take the following base layer as an example:
```
etc/
my-app-config
bin/
my-app-binary
my-app-tools
tools/
my-app-tool-one
```
If all children of `bin/` are removed, the next layer would have the following:
```
bin/
.wh..wh..opq
```
This is called _opaque whiteout_ format.
An _opaque whiteout_ file hides _all_ children of the `bin/` including sub-directories and all descendants.
Using _explicit whiteout_ files, this would be equivalent to the following:
```
bin/
.wh.my-app-binary
.wh.my-app-tools
.wh.tools
```
In this case, a unique whiteout file is generated for each entry.
If there were more children of `bin/` in the base layer, there would be an entry for each.
Note that this opaque file will apply to _all_ children, including sub-directories, other resources and all descendants.
Implementations SHOULD generate layers using _explicit whiteout_ files, but MUST accept both.
Any given image is likely to be composed of several of these Image Filesystem Changeset tar archives.
# Non-Distributable Layers
Due to legal requirements, certain layers may not be regularly distributable.
Such "non-distributable" layers are typically downloaded directly from a distributor but never uploaded.
Non-distributable layers SHOULD be tagged with an alternative mediatype of `application/vnd.oci.image.layer.nondistributable.v1.tar`.
Implementations SHOULD NOT upload layers tagged with this media type; however, such a media type SHOULD NOT affect whether an implementation downloads the layer.
[Descriptors](descriptor.md) referencing non-distributable layers MAY include `urls` for downloading these layers directly; however, the presence of the `urls` field SHOULD NOT be used to determine whether or not a layer is non-distributable.
[libarchive-tar]: https://github.com/libarchive/libarchive/wiki/ManPageTar5#POSIX_ustar_Archives
[gnu-tar-standard]: http://www.gnu.org/software/tar/manual/html_node/Standard.html
[rfc1952_2]: https://tools.ietf.org/html/rfc1952
[tar-archive]: https://en.wikipedia.org/wiki/Tar_(computing)

View file

@ -1,106 +0,0 @@
# OCI Image Manifest Specification
There are three main goals of the Image Manifest Specification.
The first goal is content-addressable images, by supporting an image model where the image's configuration can be hashed to generate a unique ID for the image and its components.
The second goal is to allow multi-architecture images, through a "fat manifest" which references image manifests for platform-specific versions of an image.
In OCI, this is codified in an [image index](image-index.md).
The third goal is to be translatable to the [OCI Runtime Specification](https://github.com/opencontainers/runtime-spec).
This section defines the `application/vnd.oci.image.manifest.v1+json` [media type](media-types.md).
For the media type(s) that this is compatible with see the [matrix](media-types.md#compatibility-matrix).
# Image Manifest
Unlike the [image index](image-index.md), which contains information about a set of images that can span a variety of architectures and operating systems, an image manifest provides a configuration and set of layers for a single container image for a specific architecture and operating system.
## *Image Manifest* Property Descriptions
- **`schemaVersion`** *int*
This REQUIRED property specifies the image manifest schema version.
For this version of the specification, this MUST be `2` to ensure backward compatibility with older versions of Docker. The value of this field will not change. This field MAY be removed in a future version of the specification.
- **`mediaType`** *string*
This property is *reserved* for use, to [maintain compatibility](media-types.md#compatibility-matrix).
When used, this field contains the media type of this document, which differs from the [descriptor](descriptor.md#properties) use of `mediaType`.
- **`config`** *[descriptor](descriptor.md)*
This REQUIRED property references a configuration object for a container, by digest.
Beyond the [descriptor requirements](descriptor.md#properties), the value has the following additional restrictions:
- **`mediaType`** *string*
This [descriptor property](descriptor.md#properties) has additional restrictions for `config`.
Implementations MUST support at least the following media types:
- [`application/vnd.oci.image.config.v1+json`](config.md)
Manifests concerned with portability SHOULD use one of the above media types.
- **`layers`** *array of objects*
Each item in the array MUST be a [descriptor](descriptor.md).
The array MUST have the base layer at index 0.
Subsequent layers MUST then follow in stack order (i.e. from `layers[0]` to `layers[len(layers)-1]`).
The final filesystem layout MUST match the result of [applying](layer.md#applying-changesets) the layers to an empty directory.
The [ownership, mode, and other attributes](layer.md#file-attributes) of the initial empty directory are unspecified.
Beyond the [descriptor requirements](descriptor.md#properties), the value has the following additional restrictions:
- **`mediaType`** *string*
This [descriptor property](descriptor.md#properties) has additional restrictions for `layers[]`.
Implementations MUST support at least the following media types:
- [`application/vnd.oci.image.layer.v1.tar`](layer.md)
- [`application/vnd.oci.image.layer.v1.tar+gzip`](layer.md#gzip-media-types)
- [`application/vnd.oci.image.layer.nondistributable.v1.tar`](layer.md#non-distributable-layers)
- [`application/vnd.oci.image.layer.nondistributable.v1.tar+gzip`](layer.md#gzip-media-types)
Manifests concerned with portability SHOULD use one of the above media types.
Entries in this field will frequently use the `+gzip` types.
- **`annotations`** *string-string map*
This OPTIONAL property contains arbitrary metadata for the image manifest.
This OPTIONAL property MUST use the [annotation rules](annotations.md#rules).
See [Pre-Defined Annotation Keys](annotations.md#pre-defined-annotation-keys).
## Example Image Manifest
*Example showing an image manifest:*
```json,title=Manifest&mediatype=application/vnd.oci.image.manifest.v1%2Bjson
{
"schemaVersion": 2,
"config": {
"mediaType": "application/vnd.oci.image.config.v1+json",
"size": 7023,
"digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7"
},
"layers": [
{
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"size": 32654,
"digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
},
{
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"size": 16724,
"digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b"
},
{
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"size": 73109,
"digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736"
}
],
"annotations": {
"com.example.key1": "value1",
"com.example.key2": "value2"
}
}
```

View file

@ -1,68 +0,0 @@
# OCI Image Media Types
The following media types identify the formats described here and their referenced resources:
- `application/vnd.oci.descriptor.v1+json`: [Content Descriptor](descriptor.md)
- `application/vnd.oci.layout.header.v1+json`: [OCI Layout](image-layout.md#oci-layout-file)
- `application/vnd.oci.image.index.v1+json`: [Image Index](image-index.md)
- `application/vnd.oci.image.manifest.v1+json`: [Image manifest](manifest.md#image-manifest)
- `application/vnd.oci.image.config.v1+json`: [Image config](config.md)
- `application/vnd.oci.image.layer.v1.tar`: ["Layer", as a tar archive](layer.md)
- `application/vnd.oci.image.layer.v1.tar+gzip`: ["Layer", as a tar archive](layer.md#gzip-media-types) compressed with [gzip][rfc1952]
- `application/vnd.oci.image.layer.nondistributable.v1.tar`: ["Layer", as a tar archive with distribution restrictions](layer.md#non-distributable-layers)
- `application/vnd.oci.image.layer.nondistributable.v1.tar+gzip`: ["Layer", as a tar archive with distribution restrictions](layer.md#gzip-media-types) compressed with [gzip][rfc1952]
## Media Type Conflicts
[Blob](image-layout.md) retrieval methods MAY return media type metadata.
For example, a HTTP response might return a manifest with the Content-Type header set to `application/vnd.oci.image.manifest.v1+json`.
Implementations MAY also have expectations for the blob's media type and digest (e.g. from a [descriptor](descriptor.md) referencing the blob).
* Implementations that do not have an expected media type for the blob SHOULD respect the returned media type.
* Implementations that have an expected media type which matches the returned media type SHOULD respect the matched media type.
* Implementations that have an expected media type which does not match the returned media type SHOULD:
* Respect the expected media type if the blob matches the expected digest.
Implementations MAY warn about the media type mismatch.
* Return an error if the blob does not match the expected digest (as [recommended for descriptors](descriptor.md#properties)).
* Return an error if they do not have an expected digest.
## Compatibility Matrix
The OCI Image Specification strives to be backwards and forwards compatible when possible.
Breaking compatibility with existing systems creates a burden on users whether they are build systems, distribution systems, container engines, etc.
This section shows where the OCI Image Specification is compatible with formats external to the OCI Image and different versions of this specification.
### application/vnd.oci.image.index.v1+json
**Similar/related schema**
- [application/vnd.docker.distribution.manifest.list.v2+json](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-2.md#manifest-list) - mediaType is different
### application/vnd.oci.image.manifest.v1+json
**Similar/related schema**
- [application/vnd.docker.distribution.manifest.v2+json](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-2.md#image-manifest-field-descriptions)
### application/vnd.oci.image.layer.v1.tar+gzip
**Interchangeable and fully compatible mime-types**
- [application/vnd.docker.image.rootfs.diff.tar.gzip](https://github.com/docker/docker/blob/master/image/spec/v1.md#creating-an-image-filesystem-changeset)
### application/vnd.oci.image.config.v1+json
**Similar/related schema**
- [application/vnd.docker.container.image.v1+json](https://github.com/docker/docker/blob/master/image/spec/v1.md#image-json-description)
## Relations
The following figure shows how the above media types reference each other:
![](img/media-types.png)
[Descriptors](descriptor.md) are used for all references.
The image-index being a "fat manifest" references a list of image manifests per target platform. An image manifest references exactly one target configuration and possibly many layers.
[rfc1952]: https://tools.ietf.org/html/rfc1952

View file

@ -1,7 +0,0 @@
# Project docs
## Release Process
* `git tag` the prior commit (preferably signed tag)
* Make a release on [github.com/opencontainers/image-spec](https://github.com/opencontainers/image-spec/releases) for the version. Attach the produced docs.

View file

@ -1,223 +0,0 @@
// Copyright 2016 The Linux Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package schema_test
import (
_ "crypto/sha256"
"strings"
"testing"
"github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/schema"
"github.com/opencontainers/image-spec/specs-go/v1"
)
var compatMap = map[string]string{
"application/vnd.docker.distribution.manifest.list.v2+json": v1.MediaTypeImageIndex,
"application/vnd.docker.distribution.manifest.v2+json": v1.MediaTypeImageManifest,
"application/vnd.docker.image.rootfs.diff.tar.gzip": v1.MediaTypeImageLayerGzip,
"application/vnd.docker.container.image.v1+json": v1.MediaTypeImageConfig,
}
// convertFormats converts Docker v2.2 image format JSON documents to OCI
// format by simply replacing instances of the strings found in the compatMap
// found in the input string.
func convertFormats(input string) string {
out := input
for k, v := range compatMap {
out = strings.Replace(out, k, v, -1)
}
return out
}
func TestBackwardsCompatibilityImageIndex(t *testing.T) {
for i, tt := range []struct {
imageIndex string
digest digest.Digest
fail bool
}{
{
digest: "sha256:4ffd0883f25635999f04ea543240a27c9a4341979ff7d46a9774f71512eebb1f",
imageIndex: `{
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
"manifests": [
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"size": 2094,
"digest": "sha256:7820f9a86d4ad15a2c4f0c0e5479298df2aa7c2f6871288e2ef8546f3e7b6783",
"platform": {
"architecture": "ppc64le",
"os": "linux"
}
},
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"size": 1922,
"digest": "sha256:ae1b0e06e8ade3a11267564a26e750585ba2259c0ecab59ab165ad1af41d1bdd",
"platform": {
"architecture": "amd64",
"os": "linux"
}
},
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"size": 2084,
"digest": "sha256:e4c0df75810b953d6717b8f8f28298d73870e8aa2a0d5e77b8391f16fdfbbbe2",
"platform": {
"architecture": "s390x",
"os": "linux"
}
},
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"size": 2084,
"digest": "sha256:07ebe243465ef4a667b78154ae6c3ea46fdb1582936aac3ac899ea311a701b40",
"platform": {
"architecture": "arm",
"os": "linux",
"variant": "v7"
}
},
{
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"size": 2090,
"digest": "sha256:fb2fc0707b86dafa9959fe3d29e66af8787aee4d9a23581714be65db4265ad8a",
"platform": {
"architecture": "arm64",
"os": "linux",
"variant": "v8"
}
}
]
}`,
fail: false,
},
} {
got := digest.FromString(tt.imageIndex)
if tt.digest != got {
t.Errorf("test %d: expected digest %s but got %s", i, tt.digest, got)
}
imageIndex := convertFormats(tt.imageIndex)
r := strings.NewReader(imageIndex)
err := schema.ValidatorMediaTypeImageIndex.Validate(r)
if got := err != nil; tt.fail != got {
t.Errorf("test %d: expected validation failure %t but got %t, err %v", i, tt.fail, got, err)
}
}
}
func TestBackwardsCompatibilityManifest(t *testing.T) {
for i, tt := range []struct {
manifest string
digest digest.Digest
fail bool
}{
// manifest pulled from docker hub using hash value
//
// curl -L -H "Authorization: Bearer ..." -H \
// "Accept: application/vnd.docker.distribution.manifest.v2+json" \
// https://registry-1.docker.io/v2/library/docker/manifests/sha256:888206c77cd2811ec47e752ba291e5b7734e3ef137dfd222daadaca39a9f17bc
{
digest: "sha256:888206c77cd2811ec47e752ba291e5b7734e3ef137dfd222daadaca39a9f17bc",
manifest: `{
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"config": {
"mediaType": "application/octet-stream",
"size": 3210,
"digest": "sha256:5359a4f250650c20227055957e353e8f8a74152f35fe36f00b6b1f9fc19c8861"
},
"layers": [
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 2310272,
"digest": "sha256:fae91920dcd4542f97c9350b3157139a5d901362c2abec284de5ebd1b45b4957"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 913022,
"digest": "sha256:f384f6ab36adad485192f09379c0b58dc612a3cde82c551e082a7c29a87c95da"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 9861668,
"digest": "sha256:ed0d2dd5e1a0e5e650a330a864c8a122e9aa91fa6ba9ac6f0bd1882e59df55e7"
},
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 465,
"digest": "sha256:ec4d00b58417c45f7ddcfde7bcad8c9d62a7d6d5d17cdc1f7d79bcb2e22c1491"
}
]
}`,
fail: false,
},
} {
got := digest.FromString(tt.manifest)
if tt.digest != got {
t.Errorf("test %d: expected digest %s but got %s", i, tt.digest, got)
}
manifest := convertFormats(tt.manifest)
r := strings.NewReader(manifest)
err := schema.ValidatorMediaTypeManifest.Validate(r)
if got := err != nil; tt.fail != got {
t.Errorf("test %d: expected validation failure %t but got %t, err %v", i, tt.fail, got, err)
}
}
}
func TestBackwardsCompatibilityConfig(t *testing.T) {
for i, tt := range []struct {
config string
digest digest.Digest
fail bool
}{
// config pulled from docker hub blob store
//
// $ TOKEN=$(curl https://auth.docker.io/token\?service\=registry.docker.io\&scope\=repository:library/docker:pull | jq -r .token)
// $ CONFIG_DIGEST=$(curl -H "Authorization: Bearer ${TOKEN}" -H 'Accept: application/vnd.docker.distribution.manifest.v2+json' https://index.docker.io/v2/library/docker/manifests/1.12.1 | jq -r .config.digest)
// $ curl -LH "Authorization: Bearer ${TOKEN}" https://index.docker.io/v2/library/docker/blobs/${CONFIG_DIGEST}
{
digest: "sha256:a059ea7356d5b5a9e0f6352bfa463e7bd4721c2ade3ef168603826e0de6fe54b",
config: `{"architecture":"amd64","config":{"Hostname":"09713501c176","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","DOCKER_BUCKET=get.docker.com","DOCKER_VERSION=1.12.1","DOCKER_SHA256=05ceec7fd937e1416e5dce12b0b6e1c655907d349d52574319a1e875077ccb79"],"Cmd":["sh"],"Image":"sha256:32e2e3ccf2a4fbaa75b078bf539cd5ea2e374a4242665a5ec3f3c01e7a3eefb8","Volumes":null,"WorkingDir":"","Entrypoint":["docker-entrypoint.sh"],"OnBuild":[],"Labels":{}},"container":"15a30be053fb3069a7879b4ea537e84689d8e8e8ba94dc4dd499271506803ba1","container_config":{"Hostname":"09713501c176","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","DOCKER_BUCKET=get.docker.com","DOCKER_VERSION=1.12.1","DOCKER_SHA256=05ceec7fd937e1416e5dce12b0b6e1c655907d349d52574319a1e875077ccb79"],"Cmd":["/bin/sh","-c","#(nop) ","CMD [\"sh\"]"],"Image":"sha256:32e2e3ccf2a4fbaa75b078bf539cd5ea2e374a4242665a5ec3f3c01e7a3eefb8","Volumes":null,"WorkingDir":"","Entrypoint":["docker-entrypoint.sh"],"OnBuild":[],"Labels":{}},"created":"2016-10-10T23:04:00.821781828Z","docker_version":"1.12.1","history":[{"created":"2016-09-23T16:29:57.276868291Z","created_by":"/bin/sh -c #(nop) ADD file:d6ee3ba7a4d59b161917082cc7242c660c61bb3f3cc1549c7e2dfff2b0de7104 in / "},{"created":"2016-09-23T16:36:54.024611637Z","created_by":"/bin/sh -c apk add --no-cache \t\tca-certificates \t\tcurl \t\topenssl"},{"created":"2016-09-23T16:36:54.365914519Z","created_by":"/bin/sh -c #(nop) ENV DOCKER_BUCKET=get.docker.com","empty_layer":true},{"created":"2016-09-23T16:36:54.662005049Z","created_by":"/bin/sh -c #(nop) ENV DOCKER_VERSION=1.12.1","empty_layer":true},{"created":"2016-09-23T16:36:54.946033025Z","created_by":"/bin/sh -c #(nop) ENV DOCKER_SHA256=05ceec7fd937e1416e5dce12b0b6e1c655907d349d52574319a1e875077ccb79","empty_layer":true},{"created":"2016-09-23T16:36:58.535084011Z","created_by":"/bin/sh -c set -x \t\u0026\u0026 curl -fSL \"https://${DOCKER_BUCKET}/builds/Linux/x86_64/docker-${DOCKER_VERSION}.tgz\" -o docker.tgz \t\u0026\u0026 echo \"${DOCKER_SHA256} *docker.tgz\" | sha256sum -c - \t\u0026\u0026 tar -xzvf docker.tgz \t\u0026\u0026 mv docker/* /usr/local/bin/ \t\u0026\u0026 rmdir docker \t\u0026\u0026 rm docker.tgz \t\u0026\u0026 docker -v"},{"created":"2016-10-10T23:04:00.334158993Z","created_by":"/bin/sh -c #(nop) COPY file:399605dc1850a60a586b5494ab538bad495fd6f94eabca0c5f8a26468ce6030f in /usr/local/bin/ "},{"created":"2016-10-10T23:04:00.577900192Z","created_by":"/bin/sh -c #(nop) ENTRYPOINT [\"docker-entrypoint.sh\"]","empty_layer":true},{"created":"2016-10-10T23:04:00.821781828Z","created_by":"/bin/sh -c #(nop) CMD [\"sh\"]","empty_layer":true}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:9007f5987db353ec398a223bc5a135c5a9601798ba20a1abba537ea2f8ac765f","sha256:1b06990ff0df8dad281fad7e6e4c5e91f32f8f8c095d6c74cf1e90a6f4407e28","sha256:9d12251ce74aac7619a83641ab72431a8d82e58bcd8a262c2bb0cdb280f1f3b5","sha256:17a7f292c2427adfc75c3a789bab8efec925dc38c5437bf83d2f528013ab80e2"]}}`,
fail: false,
},
{
// fedora:23 from docker hub
// both Entrypoint and Cmd can be nullable
digest: "sha256:a20665eb1fe2912accb3d5dadaed360430df0d1aa46874875886947d61d3d4ee",
config: `{"architecture":"amd64","author":"Patrick Uiterwijk \u003cpatrick@puiterwijk.org\u003e","config":{"Hostname":"8dfe43d80430","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"sha256:6986ae504bbf843512d680cc959484452034965db15f75ee8bdd1b107f61500b","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":{}},"container":"6249cd2c4b1d6b1bf05903364cbcb95781508994d6407c1564d494e748ea1b41","container_config":{"Hostname":"8dfe43d80430","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","#(nop) ADD file:293a6e463aa402bb8f80eb5cfc937f375cedc6843abaeb9eccfe3923bb3fc80b in /"],"Image":"sha256:6986ae504bbf843512d680cc959484452034965db15f75ee8bdd1b107f61500b","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":{}},"created":"2016-06-10T18:44:31.784795904Z","docker_version":"1.10.3","history":[{"created":"2016-06-10T18:44:03.360264073Z","author":"Patrick Uiterwijk \u003cpatrick@puiterwijk.org\u003e","created_by":"/bin/sh -c #(nop) MAINTAINER Patrick Uiterwijk \u003cpatrick@puiterwijk.org\u003e","empty_layer":true},{"created":"2016-06-10T18:44:31.784795904Z","author":"Patrick Uiterwijk \u003cpatrick@puiterwijk.org\u003e","created_by":"/bin/sh -c #(nop) ADD file:293a6e463aa402bb8f80eb5cfc937f375cedc6843abaeb9eccfe3923bb3fc80b in /"}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:d43f38155a799dc53d8fbb9f3bc11f51805f4027cd5c3d10b9823201cd5b9400"]}}`,
fail: false,
},
} {
got := digest.FromString(tt.config)
if tt.digest != got {
t.Errorf("test %d: expected digest %s but got %s", i, tt.digest, got)
}
config := convertFormats(tt.config)
r := strings.NewReader(config)
err := schema.ValidatorMediaTypeImageConfig.Validate(r)
if got := err != nil; tt.fail != got {
t.Errorf("test %d: expected validation failure %t but got %t, err %v", i, tt.fail, got, err)
}
}
}

View file

@ -1,140 +0,0 @@
{
"description": "OpenContainer Config Specification",
"$schema": "http://json-schema.org/draft-04/schema#",
"id": "https://opencontainers.org/schema/image/config",
"type": "object",
"properties": {
"created": {
"type": "string",
"format": "date-time"
},
"author": {
"type": "string"
},
"architecture": {
"type": "string"
},
"os": {
"type": "string"
},
"config": {
"type": "object",
"properties": {
"User": {
"type": "string"
},
"ExposedPorts": {
"$ref": "defs.json#/definitions/mapStringObject"
},
"Env": {
"type": "array",
"items": {
"type": "string"
}
},
"Entrypoint": {
"oneOf": [
{
"type": "array",
"items": {
"type": "string"
}
},
{
"type": "null"
}
]
},
"Cmd": {
"oneOf": [
{
"type": "array",
"items": {
"type": "string"
}
},
{
"type": "null"
}
]
},
"Volumes": {
"oneOf": [
{
"$ref": "defs.json#/definitions/mapStringObject"
},
{
"type": "null"
}
]
},
"WorkingDir": {
"type": "string"
},
"Labels": {
"oneOf": [
{
"$ref": "defs.json#/definitions/mapStringString"
},
{
"type": "null"
}
]
},
"StopSignal": {
"type": "string"
}
}
},
"rootfs": {
"type": "object",
"properties": {
"diff_ids": {
"type": "array",
"items": {
"type": "string"
}
},
"type": {
"type": "string",
"enum": [
"layers"
]
}
},
"required": [
"diff_ids",
"type"
]
},
"history": {
"type": "array",
"items": {
"type": "object",
"properties": {
"created": {
"type": "string",
"format": "date-time"
},
"author": {
"type": "string"
},
"created_by": {
"type": "string"
},
"comment": {
"type": "string"
},
"empty_layer": {
"type": "boolean"
}
}
}
}
},
"required": [
"architecture",
"os",
"rootfs"
]
}

View file

@ -1,221 +0,0 @@
// Copyright 2016 The Linux Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package schema_test
import (
"strings"
"testing"
"github.com/opencontainers/image-spec/schema"
)
func TestConfig(t *testing.T) {
for i, tt := range []struct {
config string
fail bool
}{
// expected failure: field "os" has numeric value, must be string
{
config: `
{
"architecture": "amd64",
"os": 123,
"rootfs": {
"diff_ids": [
"sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
],
"type": "layers"
}
}
`,
fail: true,
},
// expected failure: field "config.User" has numeric value, must be string
{
config: `
{
"created": "2015-10-31T22:22:56.015925234Z",
"author": "Alyssa P. Hacker <alyspdev@example.com>",
"architecture": "amd64",
"os": "linux",
"config": {
"User": 1234
},
"rootfs": {
"diff_ids": [
"sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
],
"type": "layers"
}
}
`,
fail: true,
},
// expected failue: history has string value, must be an array
{
config: `
{
"history": "should be an array",
"architecture": "amd64",
"os": 123,
"rootfs": {
"diff_ids": [
"sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
],
"type": "layers"
}
}
`,
fail: true,
},
// expected failure: Env has numeric value, must be a string
{
config: `
{
"architecture": "amd64",
"os": 123,
"config": {
"Env": [
7353
]
},
"rootfs": {
"diff_ids": [
"sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
],
"type": "layers"
}
}
`,
fail: true,
},
// expected failure: config.Volumes has string array, must be an object (string set)
{
config: `
{
"architecture": "amd64",
"os": 123,
"config": {
"Volumes": [
"/var/job-result-data",
"/var/log/my-app-logs"
]
},
"rootfs": {
"diff_ids": [
"sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
],
"type": "layers"
}
}
`,
fail: true,
},
// expected failue: invalid JSON
{
config: `invalid JSON`,
fail: true,
},
// valid config with optional fields
{
config: `
{
"created": "2015-10-31T22:22:56.015925234Z",
"author": "Alyssa P. Hacker <alyspdev@example.com>",
"architecture": "amd64",
"os": "linux",
"config": {
"User": "1:1",
"ExposedPorts": {
"8080/tcp": {}
},
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"FOO=docker_is_a_really",
"BAR=great_tool_you_know"
],
"Entrypoint": [
"/bin/sh"
],
"Cmd": [
"--foreground",
"--config",
"/etc/my-app.d/default.cfg"
],
"Volumes": {
"/var/job-result-data": {},
"/var/log/my-app-logs": {}
},
"StopSignal": "SIGKILL",
"WorkingDir": "/home/alice",
"Labels": {
"com.example.project.git.url": "https://example.com/project.git",
"com.example.project.git.commit": "45a939b2999782a3f005621a8d0f29aa387e1d6b"
}
},
"rootfs": {
"diff_ids": [
"sha256:9d3dd9504c685a304985025df4ed0283e47ac9ffa9bd0326fddf4d59513f0827",
"sha256:2b689805fbd00b2db1df73fae47562faac1a626d5f61744bfe29946ecff5d73d"
],
"type": "layers"
},
"history": [
{
"created": "2015-10-31T22:22:54.690851953Z",
"created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"
},
{
"created": "2015-10-31T22:22:55.613815829Z",
"created_by": "/bin/sh -c #(nop) CMD [\"sh\"]",
"empty_layer": true
}
]
}
`,
fail: false,
},
// valid config with only required fields
{
config: `
{
"architecture": "amd64",
"os": "linux",
"rootfs": {
"diff_ids": [
"sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
],
"type": "layers"
}
}
`,
fail: false,
},
} {
r := strings.NewReader(tt.config)
err := schema.ValidatorMediaTypeImageConfig.Validate(r)
if got := err != nil; tt.fail != got {
t.Errorf("test %d: expected validation failure %t but got %t, err %v", i, tt.fail, got, err)
}
}
}

View file

@ -1,33 +0,0 @@
{
"description": "OpenContainer Content Descriptor Specification",
"$schema": "http://json-schema.org/draft-04/schema#",
"id": "https://opencontainers.org/schema/descriptor",
"type": "object",
"properties": {
"mediaType": {
"description": "the mediatype of the referenced object",
"$ref": "defs-descriptor.json#/definitions/mediaType"
},
"size": {
"description": "the size in bytes of the referenced object",
"$ref": "defs.json#/definitions/int64"
},
"digest": {
"description": "the cryptographic checksum digest of the object, in the pattern '<algorithm>:<encoded>'",
"$ref": "defs-descriptor.json#/definitions/digest"
},
"urls": {
"description": "a list of urls from which this object may be downloaded",
"$ref": "defs-descriptor.json#/definitions/urls"
},
"annotations": {
"id": "https://opencontainers.org/schema/image/descriptor/annotations",
"$ref": "defs-descriptor.json#/definitions/annotations"
}
},
"required": [
"mediaType",
"size",
"digest"
]
}

View file

@ -1,27 +0,0 @@
{
"description": "Definitions particular to OpenContainer Descriptor Specification",
"definitions": {
"mediaType": {
"id": "https://opencontainers.org/schema/image/descriptor/mediaType",
"type": "string",
"pattern": "^[A-Za-z0-9][A-Za-z0-9!#$&-^_.+]{0,126}/[A-Za-z0-9][A-Za-z0-9!#$&-^_.+]{0,126}$"
},
"digest": {
"description": "the cryptographic checksum digest of the object, in the pattern '<algorithm>:<encoded>'",
"type": "string",
"pattern": "^[a-z0-9]+(?:[+._-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$"
},
"urls": {
"description": "a list of urls from which this object may be downloaded",
"type": "array",
"items": {
"type": "string",
"format": "uri"
}
},
"annotations": {
"id": "https://opencontainers.org/schema/image/descriptor/annotations",
"$ref": "defs.json#/definitions/mapStringString"
}
}
}

View file

@ -1,91 +0,0 @@
{
"description": "Definitions used throughout the OpenContainer Specification",
"definitions": {
"int8": {
"type": "integer",
"minimum": -128,
"maximum": 127
},
"int16": {
"type": "integer",
"minimum": -32768,
"maximum": 32767
},
"int32": {
"type": "integer",
"minimum": -2147483648,
"maximum": 2147483647
},
"int64": {
"type": "integer",
"minimum": -9223372036854776000,
"maximum": 9223372036854776000
},
"uint8": {
"type": "integer",
"minimum": 0,
"maximum": 255
},
"uint16": {
"type": "integer",
"minimum": 0,
"maximum": 65535
},
"uint32": {
"type": "integer",
"minimum": 0,
"maximum": 4294967295
},
"uint64": {
"type": "integer",
"minimum": 0,
"maximum": 18446744073709552000
},
"uint16Pointer": {
"oneOf": [
{
"$ref": "#/definitions/uint16"
},
{
"type": "null"
}
]
},
"uint64Pointer": {
"oneOf": [
{
"$ref": "#/definitions/uint64"
},
{
"type": "null"
}
]
},
"stringPointer": {
"oneOf": [
{
"type": "string"
},
{
"type": "null"
}
]
},
"mapStringString": {
"type": "object",
"patternProperties": {
".{1,}": {
"type": "string"
}
}
},
"mapStringObject": {
"type": "object",
"patternProperties": {
".{1,}": {
"type": "object"
}
}
}
}
}

View file

@ -1,303 +0,0 @@
// Copyright 2016 The Linux Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package schema_test
import (
"strings"
"testing"
"github.com/opencontainers/image-spec/schema"
)
func TestDescriptor(t *testing.T) {
for i, tt := range []struct {
descriptor string
fail bool
}{
// valid descriptor
{
descriptor: `
{
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"size": 7682,
"digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270"
}
`,
fail: false,
},
// expected failure: mediaType missing
{
descriptor: `
{
"size": 7682,
"digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270"
}
`,
fail: true,
},
// expected failure: mediaType does not match pattern (no subtype)
{
descriptor: `
{
"mediaType": "application",
"size": 7682,
"digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270"
}
`,
fail: true,
},
// expected failure: mediaType does not match pattern (invalid first type character)
{
descriptor: `
{
"mediaType": ".foo/bar",
"size": 7682,
"digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270"
}
`,
fail: true,
},
// expected failure: mediaType does not match pattern (invalid first subtype character)
{
descriptor: `
{
"mediaType": "foo/.bar",
"size": 7682,
"digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270"
}
`,
fail: true,
},
// expected success: mediaType has type and subtype as long as possible
{
descriptor: `
{
"mediaType": "1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567/1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567",
"size": 7682,
"digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270"
}
`,
fail: false,
},
// expected success: mediaType does not match pattern (type too long)
{
descriptor: `
{
"mediaType": "12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678/bar",
"size": 7682,
"digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270"
}
`,
fail: true,
},
// expected success: mediaType does not match pattern (subtype too long)
{
descriptor: `
{
"mediaType": "foo/12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678",
"size": 7682,
"digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270"
}
`,
fail: true,
},
// expected failure: size missing
{
descriptor: `
{
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270"
}
`,
fail: true,
},
// expected failure: size is a string, expected integer
{
descriptor: `
{
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"size": "7682",
"digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270"
}
`,
fail: true,
},
// expected failure: digest missing
{
descriptor: `
{
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"size": 7682
}
`,
fail: true,
},
// expected failure: digest does not match pattern (no algorithm)
{
descriptor: `
{
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"size": 7682,
"digest": ":5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270"
}
`,
fail: true,
},
// expected failure: digest does not match pattern (no hash)
{
descriptor: `
{
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"size": 7682,
"digest": "sha256"
}
`,
fail: true,
},
// expected failure: digest does not match pattern (invalid aglorithm characters)
{
descriptor: `
{
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"size": 7682,
"digest": "SHA256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270"
}
`,
fail: true,
},
// expected failure: digest does not match pattern (characters needs to be lower for sha256)
{
descriptor: `
{
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"size": 7682,
"digest": "sha256:5B0BCABD1ED22E9FB1310CF6C2DEC7CDEF19F0AD69EFA1F392E94A4333501270"
}
`,
fail: true,
},
// expected success: valid URL entry
{
descriptor: `
{
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"size": 7682,
"digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270",
"urls": [
"https://example.com/foo"
]
}
`,
fail: false,
},
// expected failure: urls does not match format (invalide url characters)
{
descriptor: `
{
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"size": 7682,
"digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270",
"urls": [
"value"
]
}
`,
fail: true,
},
{
descriptor: `{
"mediaType": "application/vnd.oci.image.config.v1+json",
"size": 1470,
"digest": "sha256+b64:c86f7763873b6c0aae22d963bab59b4f5debbed6685761b5951584f6efb0633b"
}`,
},
{
descriptor: `{
"mediaType": "application/vnd.oci.image.config.v1+json",
"size": 1470,
"digest": "sha256+b64:c86f7763873b6c0aae22d963bab59b4f5debbed6685761b5951584f6efb0633b"
}`,
},
{
descriptor: `{
"mediaType": "application/vnd.oci.image.config.v1+json",
"size": 1470,
"digest": "sha256+foo-bar:c86f7763873b6c0aae22d963bab59b4f5debbed6685761b5951584f6efb0633b"
}`,
},
{
descriptor: `
{
"mediaType": "application/vnd.oci.image.config.v1+json",
"size": 1470,
"digest": "sha256.foo-bar:c86f7763873b6c0aae22d963bab59b4f5debbed6685761b5951584f6efb0633b"
}`,
},
{
descriptor: `{
"mediaType": "application/vnd.oci.image.config.v1+json",
"size": 1470,
"digest": "multihash+base58:QmRZxt2b1FVZPNqd8hsiykDL3TdBDeTSPX9Kv46HmX4Gx8"
}`,
},
{
// fail: repeated separators in algorithm
descriptor: `{
"mediaType": "application/vnd.oci.image.config.v1+json",
"size": 1470,
"digest": "sha256+foo+-b:c86f7763873b6c0aae22d963bab59b4f5debbed6685761b5951584f6efb0633b"
}`,
fail: true,
},
{
descriptor: `{
"digest": "sha256+b64u:LCa0a2j_xo_5m0U8HTBBNBNCLXBkg7-g-YpeiGJm564",
"size": 1000000,
"mediaType": "application/vnd.oci.image.config.v1+json"
}`,
},
{
// test for those who cannot use modulo arithmetic to recover padding.
descriptor: `{
"digest": "sha256+b64u.unknownlength:LCa0a2j_xo_5m0U8HTBBNBNCLXBkg7-g-YpeiGJm564=",
"size": 1000000,
"mediaType": "application/vnd.oci.image.config.v1+json"
}`,
},
} {
r := strings.NewReader(tt.descriptor)
err := schema.ValidatorMediaTypeDescriptor.Validate(r)
if got := err != nil; tt.fail != got {
t.Errorf("test %d: expected validation failure %t but got %t, err %v", i, tt.fail, got, err)
}
}
}

View file

@ -1,16 +0,0 @@
// Copyright 2016 The Linux Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package schema defines the OCI image media types, schema definitions and validation functions.
package schema

View file

@ -1,44 +0,0 @@
// Copyright 2016 The Linux Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package schema
import (
"encoding/json"
"io"
"go4.org/errorutil"
)
// A SyntaxError is a description of a JSON syntax error
// including line, column and offset in the JSON file.
type SyntaxError struct {
msg string
Line, Col int
Offset int64
}
func (e *SyntaxError) Error() string { return e.msg }
// WrapSyntaxError checks whether the given error is a *json.SyntaxError
// and converts it into a *schema.SyntaxError containing line/col information using the given reader.
// If the given error is not a *json.SyntaxError it is returned unchanged.
func WrapSyntaxError(r io.Reader, err error) error {
if serr, ok := err.(*json.SyntaxError); ok {
line, col, _ := errorutil.HighlightBytePosition(r, serr.Offset)
return &SyntaxError{serr.Error(), line, col, serr.Offset}
}
return err
}

View file

@ -1,321 +0,0 @@
// Copyright 2016 The Linux Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package schema
import (
"bytes"
"compress/gzip"
"encoding/base64"
"io/ioutil"
"net/http"
"os"
"path"
"sync"
"time"
)
type _escLocalFS struct{}
var _escLocal _escLocalFS
type _escStaticFS struct{}
var _escStatic _escStaticFS
type _escDirectory struct {
fs http.FileSystem
name string
}
type _escFile struct {
compressed string
size int64
modtime int64
local string
isDir bool
once sync.Once
data []byte
name string
}
func (_escLocalFS) Open(name string) (http.File, error) {
f, present := _escData[path.Clean(name)]
if !present {
return nil, os.ErrNotExist
}
return os.Open(f.local)
}
func (_escStaticFS) prepare(name string) (*_escFile, error) {
f, present := _escData[path.Clean(name)]
if !present {
return nil, os.ErrNotExist
}
var err error
f.once.Do(func() {
f.name = path.Base(name)
if f.size == 0 {
return
}
var gr *gzip.Reader
b64 := base64.NewDecoder(base64.StdEncoding, bytes.NewBufferString(f.compressed))
gr, err = gzip.NewReader(b64)
if err != nil {
return
}
f.data, err = ioutil.ReadAll(gr)
})
if err != nil {
return nil, err
}
return f, nil
}
func (fs _escStaticFS) Open(name string) (http.File, error) {
f, err := fs.prepare(name)
if err != nil {
return nil, err
}
return f.File()
}
func (dir _escDirectory) Open(name string) (http.File, error) {
return dir.fs.Open(dir.name + name)
}
func (f *_escFile) File() (http.File, error) {
type httpFile struct {
*bytes.Reader
*_escFile
}
return &httpFile{
Reader: bytes.NewReader(f.data),
_escFile: f,
}, nil
}
func (f *_escFile) Close() error {
return nil
}
func (f *_escFile) Readdir(count int) ([]os.FileInfo, error) {
return nil, nil
}
func (f *_escFile) Stat() (os.FileInfo, error) {
return f, nil
}
func (f *_escFile) Name() string {
return f.name
}
func (f *_escFile) Size() int64 {
return f.size
}
func (f *_escFile) Mode() os.FileMode {
return 0
}
func (f *_escFile) ModTime() time.Time {
return time.Unix(f.modtime, 0)
}
func (f *_escFile) IsDir() bool {
return f.isDir
}
func (f *_escFile) Sys() interface{} {
return f
}
// _escFS returns a http.Filesystem for the embedded assets. If useLocal is true,
// the filesystem's contents are instead used.
func _escFS(useLocal bool) http.FileSystem {
if useLocal {
return _escLocal
}
return _escStatic
}
// _escDir returns a http.Filesystem for the embedded assets on a given prefix dir.
// If useLocal is true, the filesystem's contents are instead used.
func _escDir(useLocal bool, name string) http.FileSystem {
if useLocal {
return _escDirectory{fs: _escLocal, name: name}
}
return _escDirectory{fs: _escStatic, name: name}
}
// _escFSByte returns the named file from the embedded assets. If useLocal is
// true, the filesystem's contents are instead used.
func _escFSByte(useLocal bool, name string) ([]byte, error) {
if useLocal {
f, err := _escLocal.Open(name)
if err != nil {
return nil, err
}
b, err := ioutil.ReadAll(f)
f.Close()
return b, err
}
f, err := _escStatic.prepare(name)
if err != nil {
return nil, err
}
return f.data, nil
}
// _escFSMustByte is the same as _escFSByte, but panics if name is not present.
func _escFSMustByte(useLocal bool, name string) []byte {
b, err := _escFSByte(useLocal, name)
if err != nil {
panic(err)
}
return b
}
// _escFSString is the string version of _escFSByte.
func _escFSString(useLocal bool, name string) (string, error) {
b, err := _escFSByte(useLocal, name)
return string(b), err
}
// _escFSMustString is the string version of _escFSMustByte.
func _escFSMustString(useLocal bool, name string) string {
return string(_escFSMustByte(useLocal, name))
}
var _escData = map[string]*_escFile{
"/config-schema.json": {
local: "config-schema.json",
size: 2771,
modtime: 1489087148,
compressed: `
H4sIAAAAAAAA/+RWQW/bPAy9+1cYbo9t/R2+U67dbgMyINh2KIZAsemEnSVqFD3MGPLfB8vJZtmym3XI
aScDFB/f4xMl60eSplkJrmC0gmSyVZqtLZhHMqLQAKePZCrcpxsLBVZYKJ9118FuXXEArTrIQcSu8vzZ
kbnvow/E+7xkVcn9f//nfeymx2F5hrhVnpMFU5zZnIf12TlqtYe88Pw9UloLHZZ2z1BIH7NMFlgQXLZK
u3bSNCsYlED5KzCAOmE0fTkfr4i1km6lVAL3ghoyv3bsUzLVyIF4oVSYzcUBBQppGC7FkLs08+RFJHvg
iI9HXPHxDw44iMwwDlh9ztvvlhyU74nFjfG3DJU3ECr30I3ATV5ChQa7UXG5VnbjK697jfH65tucLMWs
2uxuuIQCeixjoZE0Pc6QCreW0MiYmwysu56eAoKQblHigswXpIZyR5IXVZimrsNKwzqfoxY86vKf7f0j
1Y2GyThf2P9rp/7aXX0i/oJm/wZfdc7fqR3U17ZkE9n4a1qyEbIb3BtVX2xJMvyer18mkip6WV96/ZZY
VVssJwZf/6475S91H9CCafRkx7NatcAuizuejFgzhq8Nsv8PP0U8GKtLhhXPnh/QCXEbMz00K2LU3PbM
b1D07fCyW0vviMlWxN4UcYpZ/Enjdtf+RQ3SGiZ/vj8oANpKu/UTMV9kR1SDMjPzGZ6y5MQwnZvwWfX7
2RSey6SbnWPyMwAA//9KY9sL0woAAA==
`,
},
"/content-descriptor.json": {
local: "content-descriptor.json",
size: 1085,
modtime: 1494380450,
compressed: `
H4sIAAAAAAAA/5yTwW7UMBCG73mKUVqpl27NoeIQVb3AnQPcEAevPY6nbGwznlW1oL47mniXJoAo3Vsy
+r+Zz8n4RwfQe6yOqQjl1A/QfyiY3uUklhIy6BMmgffHUGb4WNBRIGdn4lpbXFYXcbKKR5EyGPNQc9q0
6k3m0Xi2QTZvbk2rXTSO/AmpgzG5YHKnyXXGWtr4X9MbJ4eCSubtAzpptcK5IAth7QfQgwH0E3qyn1q4
lf48r0SEOadNIQfQAmNAxuTQw2LGjF8yBuU8hrp5FrvRE18Yj4ESae9qnqfP7FNr0Vf6/pKPRoASbA+C
9ZVOfxGhJG9v1xKeRqzygobjQ5E8si2RHLiI7mvdT9DYk1ZzuVZdfS1WBDnB1Z3djZlJ4nQ/3OmP9ejv
r875jkfXlf+ed/Uf9hZ21BQ1CIHzBI+RXASJVI/OMNkDbBF8fky7bD36c+xmk5WbTSnLfDtWiv+77DTZ
ERcrb5b9zhBc4s2zO7r2jN/2xKhin3+/McttXS9NB/Cle+p+BgAA///HjexwPQQAAA==
`,
},
"/defs-descriptor.json": {
local: "defs-descriptor.json",
size: 922,
modtime: 1494380450,
compressed: `
H4sIAAAAAAAA/6STX2/TMBTF3/spLl7FgDZN4QFp0Ria2DsP42lTV93ZN/Ed8R/ZrqYy9bsjJ1naFYFA
PCSyj67Pub8b52kCIBRFGdgndlZUIK6oZst5F8FjSCw3LQZIDr56sl+cTciWAlwNx1yAa0+Sa5bYecx7
09FFVJBzAIQhxfht62mUAASrnKpT8rEqS+fJyueMuHChKaPUZLBkgw2Vakwt927zZ6/Ue4uYAttmr3tM
iUKHd3d7Wdxg8WNZnK32y1cn09fF3XoxWz0t5+8/fNyVf1c2FV3Erk8SihuK6ZDuaLhJE8iw9ck1Ab1m
CVKT/B43Bvqz4GrIRe7+gWSaA9tuOwDA6Tm2jQuctLmozvOoFKmL03+cwMA1e/O5up0t1sVqVN6+q/L6
srhZFmef1sVqdkS4CW38Ax9Cyz1ELoQ6OAOPmqWGpDkOVGBwC/cEyj3a1qEi9Wv/GAJu9zInMoe5vycF
ELULBvNXEJvAYtB3LzDQWpfw5fX8n7t46Dc2PQ1UZz9FdVw8RGdPyoPfojTor7ve+/cw50l+dpOfAQAA
//8aH/C2mgMAAA==
`,
},
"/defs.json": {
local: "defs.json",
size: 1670,
modtime: 1489087148,
compressed: `
H4sIAAAAAAAA/7STza6bMBCF9zzFyO2S9oJtbGDb7hMpy6oLSiaJq2AjY6RWEe9e8RNChFuJKneRgGc8
3zmeMbcAgByxKa2qnTKa5EC+4klp1a8aaBs8grtY054vpnXgLgi7GvUXo12hNFo41FiqkyqLoTwceTOA
5NBLABClXTqvAIj7XWOvprTDM9qhckhUSquqrUgOn2KaPsLFrykcUzkEu3Amx2IrmlEpfPA+vsIzuhVP
Yy55ygT3aczJlZDgW4UyShmTNGIiTbiUIooij6Jn15N0+x/T8enQJFlxN8/GBxZJwtbozXPxoTnNeCYk
zdb8zePw8eOUcyE5jySTUZYk1Nf8WOxNz7VLQaNxdyI5fJsCMKeG9EeLfZZ8eFt8cG9Ty+eNXeivvp9G
t9frYvf09t3Ti1c6FPy1DhtnlT5vd3jXGOtf66kq6sOAHf99V8n8+Imle9ykunAOrd5bU6N1CptFEQD5
fIvD7in0ryMEy+fK1G6UfmdTE+tvpoL+1wV/AgAA//96IpqyhgYAAA==
`,
},
"/image-index-schema.json": {
local: "image-index-schema.json",
size: 3009,
modtime: 1495059260,
compressed: `
H4sIAAAAAAAA/6yWv27bMBDGdz/FQQmQJYmKIuhgBFnaJVOHBl2KDAx5ki61SPVIJ3ELvXtBMrJlUXZt
1Zt95H33+07892cGkCm0kqlxZHQ2h+xrg/qz0U6QRob7WpQI91rhG3xrUFJBUoSplz733MoKa+HzKuea
eZ4/W6OvYvTacJkrFoW7+nCTx9hZzCPVpdh5npsGtexK2pAWZ+fky+fky8dEt2rQp5qnZ5Quxho2DbIj
tNkcvCWALOZ/R7bRVgynbh8qslAQLhTYaA8tuAohVIZQGaIYvEQ1EBaEBtIOS+SAEJQneMq3MddSncuk
Rk2a6mWdzeHjJibeulgItXEkq4WmAq2zffudsmAWqx67w7o/72g7XbEv7+01G+jxr/Y+wvhrSYy+1o91
1MOjIvHg0y77YUu/BxFFJVqXrUOPPfGRhZHIbw+kC8SvhTDbewBThMXBWCCjlqggsRREzhkLn62wsFdq
3ZNrvzvOcoUFafIVbL4h6Sm0qelDOP1EIA1PK4d2EusIIGn36WY33Hv/D8GTvGqcKVk0FUmQFcqfdllD
VOhwI+Olt+H/NsI5ZA0Xt2JRGiZX1XfzW78WFaq7i+l9H66boa8lL4arJnUlYEER3U+Hgk0NrxXJCpw/
V6IXqMUKnhCUedULIxSq6dSBaidzsxCuMFyn3Mdt5rXOgHPnNoY9WzmMCZYVOZRuyTjIA8jMlqetPQx7
93GqnY5Pdp/vhe61wzomXWaDCe2YzVPiGXsaqOuX5JY8Bdxa9jSQBQr/HU7dwo3uHszty7JfNrk2DzYJ
0P7T9otgEjo9XA/p4d7/7c4jRGhtXHjgjZx+x3V5c5DlfdXJZ19fZDbmpfvVbj2Dxh1Neq2N2fgfAx40
YKZnZzb2Muw96WYAj7N29jcAAP//8RsPuMELAAA=
`,
},
"/image-layout-schema.json": {
local: "image-layout-schema.json",
size: 439,
modtime: 1489792109,
compressed: `
H4sIAAAAAAAA/2yPQUvEMBCF7/0VQ/Sg4DYVPOW6pwVhD4IX8VDTaTvLNonJVFik/12SaRXRU5g38+W9
91kBqA6TjRSYvFMG1DGg23vHLTmMcJjaAeGxvfiZ4cmOOLXqLlPXSQYDamQORutT8m4nau3joLvY9rxr
HrRoV8JRtyHJaO0DOruZpYLJtaZsrM/FWEi+BMysfzuhXbUQfcDIhEkZyG2yQyYl8TPGJLVk97fth1yA
74FHhOP+8LvyDbmy8JZ2EgZ6OuNtsS8fbrESR3LDj45unpSBl3UGUPd1UzdqnV/Lu1QAS2kS8X2miN03
8l+PKnNL9RUAAP//k31n5bcBAAA=
`,
},
"/image-manifest-schema.json": {
local: "image-manifest-schema.json",
size: 921,
modtime: 1489087148,
compressed: `
H4sIAAAAAAAA/5ySMW8iMRCF+/0VI0MJ+O501bZXUZxSJEoTpXB2x7uDWNsZmygo4r9HtnHAkCKifTvv
zTdv/dEAiB59x+QCWSNaEHcOzT9rgiKDDOtJDQj/lSGNPsC9w440dSpNL6J97rsRJxWtYwiulXLjrVlm
dWV5kD0rHZa//sqszbKP+mLxrZTWoenKVp9seVpSJJDTkSB7w95hdNuXDXZHzbF1yIHQixbiYQAiRzwi
+3xclq9vfhjJgybc9uDzheghjAhpOZTlkPPgLQeC8qAMkAk4ICeKFH7bZbKG/Uort16tmcjQtJtEC39O
mnovWpIO+YvorNE0nDcwZ9QxNqKhCcvSiOVV/H+ism/VHtmf2wuVYlb7imkdcIqjv099HJVi/ul2gENF
oYyxIb28CuXGus/TFpet9Kj9JdRM9qjJULJU9qawJlLB+Lojxoj19N07rP9JXXED8Nwcms8AAAD//7u3
Dj+ZAwAA
`,
},
"/": {
isDir: true,
local: "",
},
}

View file

@ -1,21 +0,0 @@
// Copyright 2016 The Linux Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package schema
// Generates an embbedded http.FileSystem for all schema files
// using esc (https://github.com/mjibson/esc).
// This should generally be invoked with `make schema-fs`
//go:generate esc -private -pkg=schema -include=.*\.json$ .

View file

@ -1,90 +0,0 @@
{
"description": "OpenContainer Image Index Specification",
"$schema": "http://json-schema.org/draft-04/schema#",
"id": "https://opencontainers.org/schema/image/index",
"type": "object",
"properties": {
"schemaVersion": {
"description": "This field specifies the image index schema version as an integer",
"id": "https://opencontainers.org/schema/image/index/schemaVersion",
"type": "integer",
"minimum": 2,
"maximum": 2
},
"manifests": {
"type": "array",
"items": {
"id": "https://opencontainers.org/schema/image/manifestDescriptor",
"type": "object",
"required": [
"mediaType",
"size",
"digest"
],
"properties": {
"mediaType": {
"description": "the mediatype of the referenced object",
"$ref": "defs-descriptor.json#/definitions/mediaType"
},
"size": {
"description": "the size in bytes of the referenced object",
"$ref": "defs.json#/definitions/int64"
},
"digest": {
"description": "the cryptographic checksum digest of the object, in the pattern '<algorithm>:<encoded>'",
"$ref": "defs-descriptor.json#/definitions/digest"
},
"urls": {
"description": "a list of urls from which this object may be downloaded",
"$ref": "defs-descriptor.json#/definitions/urls"
},
"platform": {
"id": "https://opencontainers.org/schema/image/platform",
"type": "object",
"required": [
"architecture",
"os"
],
"properties": {
"architecture": {
"id": "https://opencontainers.org/schema/image/platform/architecture",
"type": "string"
},
"os": {
"id": "https://opencontainers.org/schema/image/platform/os",
"type": "string"
},
"os.version": {
"id": "https://opencontainers.org/schema/image/platform/os.version",
"type": "string"
},
"os.features": {
"id": "https://opencontainers.org/schema/image/platform/os.features",
"type": "array",
"items": {
"type": "string"
}
},
"variant": {
"type": "string"
}
}
}
},
"annotations": {
"id": "https://opencontainers.org/schema/image/descriptor/annotations",
"$ref": "defs-descriptor.json#/definitions/annotations"
}
}
}
},
"annotations": {
"id": "https://opencontainers.org/schema/image/index/annotations",
"$ref": "defs-descriptor.json#/definitions/annotations"
}
},
"required": [
"schemaVersion",
"manifests"
]
}

View file

@ -1,18 +0,0 @@
{
"description": "OpenContainer Image Layout Schema",
"$schema": "http://json-schema.org/draft-04/schema#",
"id": "https://opencontainers.org/schema/image/layout",
"type": "object",
"properties": {
"imageLayoutVersion": {
"description": "version of the OCI Image Layout (in the oci-layout file)",
"type": "string",
"enum": [
"1.0.0"
]
}
},
"required": [
"imageLayoutVersion"
]
}

View file

@ -1,34 +0,0 @@
{
"description": "OpenContainer Image Manifest Specification",
"$schema": "http://json-schema.org/draft-04/schema#",
"id": "https://opencontainers.org/schema/image/manifest",
"type": "object",
"properties": {
"schemaVersion": {
"description": "This field specifies the image manifest schema version as an integer",
"id": "https://opencontainers.org/schema/image/manifest/schemaVersion",
"type": "integer",
"minimum": 2,
"maximum": 2
},
"config": {
"$ref": "content-descriptor.json"
},
"layers": {
"type": "array",
"minItems": 1,
"items": {
"$ref": "content-descriptor.json"
}
},
"annotations": {
"id": "https://opencontainers.org/schema/image/manifest/annotations",
"$ref": "defs-descriptor.json#/definitions/annotations"
}
},
"required": [
"schemaVersion",
"config",
"layers"
]
}

View file

@ -1,232 +0,0 @@
// Copyright 2016 The Linux Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package schema_test
import (
"strings"
"testing"
"github.com/opencontainers/image-spec/schema"
)
func TestImageIndex(t *testing.T) {
for i, tt := range []struct {
imageIndex string
fail bool
}{
// expected failure: mediaType does not match pattern
{
imageIndex: `
{
"schemaVersion": 2,
"manifests": [
{
"mediaType": "invalid",
"size": 7143,
"digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
"platform": {
"architecture": "ppc64le",
"os": "linux"
}
}
]
}
`,
fail: true,
},
// expected failure: manifest.size is string, expected integer
{
imageIndex: `
{
"schemaVersion": 2,
"manifests": [
{
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"size": "7682",
"digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270",
"platform": {
"architecture": "amd64",
"os": "linux"
}
}
]
}
`,
fail: true,
},
// expected failure: manifest.digest is missing, expected required
{
imageIndex: `
{
"schemaVersion": 2,
"manifests": [
{
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"size": 7682,
"platform": {
"architecture": "amd64",
"os": "linux"
}
}
]
}
`,
fail: true,
},
// expected failure: in the optional field platform platform.architecture is missing, expected required
{
imageIndex: `
{
"schemaVersion": 2,
"manifests": [
{
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"size": 7682,
"digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270",
"platform": {
"os": "linux",
}
}
]
}
`,
fail: true,
},
// expected failure: invalid referenced manifest media type
{
imageIndex: `
{
"schemaVersion": 2,
"manifests": [
{
"mediaType": "invalid",
"size": 7682,
"digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270",
"platform": {
"architecture": "amd64",
"os": "linux"
}
}
]
}
`,
fail: true,
},
// expected failure: empty referenced manifest media type
{
imageIndex: `
{
"schemaVersion": 2,
"manifests": [
{
"mediaType": "",
"size": 7682,
"digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270",
"platform": {
"architecture": "amd64",
"os": "linux"
}
}
]
}
`,
fail: true,
},
// valid image index, with optional fields
{
imageIndex: `
{
"schemaVersion": 2,
"manifests": [
{
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"size": 7143,
"digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
"platform": {
"architecture": "ppc64le",
"os": "linux"
}
},
{
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"size": 7682,
"digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270",
"platform": {
"architecture": "amd64",
"os": "linux"
}
}
],
"annotations": {
"com.example.key1": "value1",
"com.example.key2": "value2"
}
}
`,
fail: false,
},
// valid image index, with required fields only
{
imageIndex: `
{
"schemaVersion": 2,
"manifests": [
{
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"size": 7143,
"digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
}
]
}
`,
fail: false,
},
// valid image index, with customized media type of referenced manifest
{
imageIndex: `
{
"schemaVersion": 2,
"manifests": [
{
"mediaType": "application/customized.manifest+json",
"size": 7143,
"digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
"platform": {
"architecture": "ppc64le",
"os": "linux"
}
}
]
}
`,
fail: false,
},
} {
r := strings.NewReader(tt.imageIndex)
err := schema.ValidatorMediaTypeImageIndex.Validate(r)
if got := err != nil; tt.fail != got {
t.Errorf("test %d: expected validation failure %t but got %t, err %v", i, tt.fail, got, err)
}
}
}

View file

@ -1,239 +0,0 @@
// Copyright 2016 The Linux Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package schema_test
import (
"strings"
"testing"
"github.com/opencontainers/image-spec/schema"
)
func TestManifest(t *testing.T) {
for i, tt := range []struct {
manifest string
fail bool
}{
// expected failure: mediaType does not match pattern
{
manifest: `
{
"schemaVersion": 2,
"config": {
"mediaType": "invalid",
"size": 1470,
"digest": "sha256:c86f7763873b6c0aae22d963bab59b4f5debbed6685761b5951584f6efb0633b"
},
"layers": [
{
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"size": 148,
"digest": "sha256:c57089565e894899735d458f0fd4bb17a0f1e0df8d72da392b85c9b35ee777cd"
}
]
}
`,
fail: true,
},
// expected failure: config.size is a string, expected integer
{
manifest: `
{
"schemaVersion": 2,
"config": {
"mediaType": "application/vnd.oci.image.config.v1+json",
"size": "1470",
"digest": "sha256:c86f7763873b6c0aae22d963bab59b4f5debbed6685761b5951584f6efb0633b"
},
"layers": [
{
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"size": 148,
"digest": "sha256:c57089565e894899735d458f0fd4bb17a0f1e0df8d72da392b85c9b35ee777cd"
}
]
}
`,
fail: true,
},
// expected failure: layers.size is string, expected integer
{
manifest: `
{
"schemaVersion": 2,
"config": {
"mediaType": "application/vnd.oci.image.config.v1+json",
"size": 1470,
"digest": "sha256:c86f7763873b6c0aae22d963bab59b4f5debbed6685761b5951584f6efb0633b"
},
"layers": [
{
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"size": "675598",
"digest": "sha256:c86f7763873b6c0aae22d963bab59b4f5debbed6685761b5951584f6efb0633b"
}
]
}
`,
fail: true,
},
// valid manifest with optional fields
{
manifest: `
{
"schemaVersion": 2,
"config": {
"mediaType": "application/vnd.oci.image.config.v1+json",
"size": 1470,
"digest": "sha256:c86f7763873b6c0aae22d963bab59b4f5debbed6685761b5951584f6efb0633b"
},
"layers": [
{
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"size": 675598,
"digest": "sha256:9d3dd9504c685a304985025df4ed0283e47ac9ffa9bd0326fddf4d59513f0827"
},
{
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"size": 156,
"digest": "sha256:2b689805fbd00b2db1df73fae47562faac1a626d5f61744bfe29946ecff5d73d"
},
{
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"size": 148,
"digest": "sha256:c57089565e894899735d458f0fd4bb17a0f1e0df8d72da392b85c9b35ee777cd"
}
],
"annotations": {
"key1": "value1",
"key2": "value2"
}
}
`,
fail: false,
},
// valid manifest with only required fields
{
manifest: `
{
"schemaVersion": 2,
"config": {
"mediaType": "application/vnd.oci.image.config.v1+json",
"size": 1470,
"digest": "sha256:c86f7763873b6c0aae22d963bab59b4f5debbed6685761b5951584f6efb0633b"
},
"layers": [
{
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"size": 675598,
"digest": "sha256:9d3dd9504c685a304985025df4ed0283e47ac9ffa9bd0326fddf4d59513f0827"
},
{
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"size": 156,
"digest": "sha256:2b689805fbd00b2db1df73fae47562faac1a626d5f61744bfe29946ecff5d73d"
},
{
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"size": 148,
"digest": "sha256:c57089565e894899735d458f0fd4bb17a0f1e0df8d72da392b85c9b35ee777cd"
}
]
}
`,
fail: false,
},
// expected failure: empty layer, expected at least one
{
manifest: `
{
"schemaVersion": 2,
"config": {
"mediaType": "application/vnd.oci.image.config.v1+json",
"size": 1470,
"digest": "sha256:c86f7763873b6c0aae22d963bab59b4f5debbed6685761b5951584f6efb0633b"
},
"layers": []
}
`,
fail: true,
},
// expected pass: test bounds of algorithm field in digest.
{
manifest: `
{
"schemaVersion": 2,
"config": {
"mediaType": "application/vnd.oci.image.config.v1+json",
"size": 1470,
"digest": "sha256+b64:c86f7763873b6c0aae22d963bab59b4f5debbed6685761b5951584f6efb0633b"
},
"layers": [
{
"mediaType": "application/vnd.oci.image.config.v1+json",
"size": 1470,
"digest": "sha256+foo-bar:c86f7763873b6c0aae22d963bab59b4f5debbed6685761b5951584f6efb0633b"
},
{
"mediaType": "application/vnd.oci.image.config.v1+json",
"size": 1470,
"digest": "sha256.foo-bar:c86f7763873b6c0aae22d963bab59b4f5debbed6685761b5951584f6efb0633b"
},
{
"mediaType": "application/vnd.oci.image.config.v1+json",
"size": 1470,
"digest": "multihash+base58:QmRZxt2b1FVZPNqd8hsiykDL3TdBDeTSPX9Kv46HmX4Gx8"
}
]
}
`,
},
// expected failure: push bounds of algorithm field in digest too far.
{
manifest: `
{
"schemaVersion": 2,
"config": {
"mediaType": "application/vnd.oci.image.config.v1+json",
"size": 1470,
"digest": "sha256+b64:c86f7763873b6c0aae22d963bab59b4f5debbed6685761b5951584f6efb0633b"
},
"layers": [
{
"mediaType": "application/vnd.oci.image.config.v1+json",
"size": 1470,
"digest": "sha256+foo+-b:c86f7763873b6c0aae22d963bab59b4f5debbed6685761b5951584f6efb0633b"
}
]
}
`,
fail: true,
},
} {
r := strings.NewReader(tt.manifest)
err := schema.ValidatorMediaTypeManifest.Validate(r)
if got := err != nil; tt.fail != got {
t.Errorf("test %d: expected validation failure %t but got %t, err %v", i, tt.fail, got, err)
}
}
}

View file

@ -1,52 +0,0 @@
// Copyright 2016 The Linux Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package schema
import (
"net/http"
"github.com/opencontainers/image-spec/specs-go/v1"
)
// Media types for the OCI image formats
const (
ValidatorMediaTypeDescriptor Validator = v1.MediaTypeDescriptor
ValidatorMediaTypeLayoutHeader Validator = v1.MediaTypeLayoutHeader
ValidatorMediaTypeManifest Validator = v1.MediaTypeImageManifest
ValidatorMediaTypeImageIndex Validator = v1.MediaTypeImageIndex
ValidatorMediaTypeImageConfig Validator = v1.MediaTypeImageConfig
ValidatorMediaTypeImageLayer unimplemented = v1.MediaTypeImageLayer
)
var (
// fs stores the embedded http.FileSystem
// having the OCI JSON schema files in root "/".
fs = _escFS(false)
// specs maps OCI schema media types to schema files.
specs = map[Validator]string{
ValidatorMediaTypeDescriptor: "content-descriptor.json",
ValidatorMediaTypeLayoutHeader: "image-layout-schema.json",
ValidatorMediaTypeManifest: "image-manifest-schema.json",
ValidatorMediaTypeImageIndex: "image-index-schema.json",
ValidatorMediaTypeImageConfig: "config-schema.json",
}
)
// FileSystem returns an in-memory filesystem including the schema files.
// The schema files are located at the root directory.
func FileSystem() http.FileSystem {
return fs
}

View file

@ -1,191 +0,0 @@
// Copyright 2016 The Linux Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package schema_test
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/url"
"os"
"strings"
"testing"
"github.com/opencontainers/image-spec/schema"
"github.com/pkg/errors"
"github.com/russross/blackfriday"
)
var (
errFormatInvalid = errors.New("format: invalid")
)
func TestValidateDescriptor(t *testing.T) {
validate(t, "../descriptor.md")
}
func TestValidateManifest(t *testing.T) {
validate(t, "../manifest.md")
}
func TestValidateImageIndex(t *testing.T) {
validate(t, "../image-index.md")
}
func TestValidateImageLayout(t *testing.T) {
validate(t, "../image-layout.md")
}
func TestValidateConfig(t *testing.T) {
validate(t, "../config.md")
}
// TODO(sur): include examples from all specification files
func validate(t *testing.T, name string) {
m, err := os.Open(name)
if err != nil {
t.Fatal(err)
}
defer m.Close()
examples, err := extractExamples(m)
if err != nil {
t.Fatal(err)
}
for _, example := range examples {
if example.Err == errFormatInvalid && example.Mediatype == "" { // ignore
continue
}
if example.Err != nil {
printFields(t, "error", example.Mediatype, example.Title, example.Err)
t.Error(err)
continue
}
err = schema.Validator(example.Mediatype).Validate(strings.NewReader(example.Body))
if err == nil {
printFields(t, "ok", example.Mediatype, example.Title)
t.Log(example.Body, "---")
continue
}
var errs []error
if verr, ok := errors.Cause(err).(schema.ValidationError); ok {
errs = verr.Errs
} else {
printFields(t, "error", example.Mediatype, example.Title, err)
t.Error(err)
t.Log(example.Body, "---")
continue
}
for _, err := range errs {
// TOOD(stevvooe): This is nearly useless without file, line no.
printFields(t, "invalid", example.Mediatype, example.Title)
t.Error(err)
fmt.Println(example.Body, "---")
continue
}
}
}
// renderer allows one to incercept fenced blocks in markdown documents.
type renderer struct {
blackfriday.Renderer
fn func(text []byte, lang string)
}
func (r *renderer) BlockCode(out *bytes.Buffer, text []byte, lang string) {
r.fn(text, lang)
r.Renderer.BlockCode(out, text, lang)
}
type example struct {
Lang string // gets raw "lang" field
Title string
Mediatype string
Body string
Err error
// TODO(stevvooe): Figure out how to keep track of revision, file, line so
// that we can trace back verification output.
}
// parseExample treats the field as a syntax,attribute tuple separated by a comma.
// Attributes are encoded as a url values.
//
// An example of this is `json,title=Foo%20Bar&mediatype=application/json. We
// get that the "lang" is json, the title is "Foo Bar" and the mediatype is
// "application/json".
//
// This preserves syntax highlighting and lets us tag examples with further
// metadata.
func parseExample(lang, body string) (e example) {
e.Lang = lang
e.Body = body
parts := strings.SplitN(lang, ",", 2)
if len(parts) < 2 {
e.Err = errFormatInvalid
return
}
m, err := url.ParseQuery(parts[1])
if err != nil {
e.Err = err
return
}
e.Mediatype = m.Get("mediatype")
e.Title = m.Get("title")
return
}
func extractExamples(rd io.Reader) ([]example, error) {
p, err := ioutil.ReadAll(rd)
if err != nil {
return nil, err
}
var examples []example
renderer := &renderer{
Renderer: blackfriday.HtmlRenderer(0, "test test", ""),
fn: func(text []byte, lang string) {
examples = append(examples, parseExample(lang, string(text)))
},
}
// just pass over the markdown and ignore the rendered result. We just want
// the side-effect of calling back for each code block.
// TODO(stevvooe): Consider just parsing these with a scanner. It will be
// faster and we can retain file, line no.
blackfriday.MarkdownOptions(p, renderer, blackfriday.Options{
Extensions: blackfriday.EXTENSION_FENCED_CODE,
})
return examples, nil
}
// printFields prints each value tab separated.
func printFields(t *testing.T, vs ...interface{}) {
var ss []string
for _, f := range vs {
ss = append(ss, fmt.Sprint(f))
}
t.Log(strings.Join(ss, "\t"))
}

View file

@ -1,160 +0,0 @@
// Copyright 2016 The Linux Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package schema
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"regexp"
digest "github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/xeipuuv/gojsonschema"
)
// Validator wraps a media type string identifier
// and implements validation against a JSON schema.
type Validator string
type validateDescendantsFunc func(r io.Reader) error
var mapValidateDescendants = map[Validator]validateDescendantsFunc{
ValidatorMediaTypeManifest: validateManifestDescendants,
ValidatorMediaTypeDescriptor: validateDescriptorDescendants,
}
// ValidationError contains all the errors that happened during validation.
type ValidationError struct {
Errs []error
}
func (e ValidationError) Error() string {
return fmt.Sprintf("%v", e.Errs)
}
// Validate validates the given reader against the schema of the wrapped media type.
func (v Validator) Validate(src io.Reader) error {
buf, err := ioutil.ReadAll(src)
if err != nil {
return errors.Wrap(err, "unable to read the document file")
}
if f, ok := mapValidateDescendants[v]; ok {
if f == nil {
return fmt.Errorf("internal error: mapValidateDescendents[%q] is nil", v)
}
err = f(bytes.NewReader(buf))
if err != nil {
return err
}
}
sl := gojsonschema.NewReferenceLoaderFileSystem("file:///"+specs[v], fs)
ml := gojsonschema.NewStringLoader(string(buf))
result, err := gojsonschema.Validate(sl, ml)
if err != nil {
return errors.Wrapf(
WrapSyntaxError(bytes.NewReader(buf), err),
"schema %s: unable to validate", v)
}
if result.Valid() {
return nil
}
errs := make([]error, 0, len(result.Errors()))
for _, desc := range result.Errors() {
errs = append(errs, fmt.Errorf("%s", desc))
}
return ValidationError{
Errs: errs,
}
}
type unimplemented string
func (v unimplemented) Validate(src io.Reader) error {
return fmt.Errorf("%s: unimplemented", v)
}
func validateManifestDescendants(r io.Reader) error {
header := v1.Manifest{}
buf, err := ioutil.ReadAll(r)
if err != nil {
return errors.Wrapf(err, "error reading the io stream")
}
err = json.Unmarshal(buf, &header)
if err != nil {
return errors.Wrap(err, "manifest format mismatch")
}
if header.Config.MediaType != string(v1.MediaTypeImageConfig) {
fmt.Printf("warning: config %s has an unknown media type: %s\n", header.Config.Digest, header.Config.MediaType)
}
for _, layer := range header.Layers {
if layer.MediaType != string(v1.MediaTypeImageLayer) &&
layer.MediaType != string(v1.MediaTypeImageLayerGzip) &&
layer.MediaType != string(v1.MediaTypeImageLayerNonDistributable) &&
layer.MediaType != string(v1.MediaTypeImageLayerNonDistributableGzip) {
fmt.Printf("warning: layer %s has an unknown media type: %s\n", layer.Digest, layer.MediaType)
}
}
return nil
}
var (
sha256EncodedRegexp = regexp.MustCompile(`^[a-f0-9]{64}$`)
sha512EncodedRegexp = regexp.MustCompile(`^[a-f0-9]{128}$`)
)
func validateDescriptorDescendants(r io.Reader) error {
header := v1.Descriptor{}
buf, err := ioutil.ReadAll(r)
if err != nil {
return errors.Wrapf(err, "error reading the io stream")
}
err = json.Unmarshal(buf, &header)
if err != nil {
return errors.Wrap(err, "descriptor format mismatch")
}
if header.Digest.Validate() != nil {
// we ignore unsupported algorithms
fmt.Printf("warning: unsupported digest: %q: %v\n", header.Digest, err)
return nil
}
switch header.Digest.Algorithm() {
case digest.SHA256:
if !sha256EncodedRegexp.MatchString(header.Digest.Hex()) {
return errors.Errorf("unexpected sha256 digest: %q", header.Digest)
}
case digest.SHA512:
if !sha512EncodedRegexp.MatchString(header.Digest.Hex()) {
return errors.Errorf("unexpected sha512 digest: %q", header.Digest)
}
}
return nil
}

View file

@ -1,67 +0,0 @@
# Open Container Initiative
## Image Format Specification
This specification defines an OCI Image, consisting of a [manifest](manifest.md), an [image index](image-index.md) (optional), a set of [filesystem layers](layer.md), and a [configuration](config.md).
The goal of this specification is to enable the creation of interoperable tools for building, transporting, and preparing a container image to run.
### Table of Contents
- [Introduction](spec.md)
- [Notational Conventions](#notational-conventions)
- [Overview](#overview)
- [Understanding the Specification](#understanding-the-specification)
- [Media Types](media-types.md)
- [Content Descriptors](descriptor.md)
- [Image Layout](image-layout.md)
- [Image Manifest](manifest.md)
- [Image Index](image-index.md)
- [Filesystem Layers](layer.md)
- [Image Configuration](config.md)
- [Annotations](annotations.md)
- [Considerations](considerations.md)
- [Extensibility](considerations.md#extensibility)
- [Canonicalization](considerations.md#canonicalization)
## Notational Conventions
The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "NOT RECOMMENDED", "MAY", and "OPTIONAL" are to be interpreted as described in [RFC 2119](http://tools.ietf.org/html/rfc2119) (Bradner, S., "Key words for use in RFCs to Indicate Requirement Levels", BCP 14, RFC 2119, March 1997).
The key words "unspecified", "undefined", and "implementation-defined" are to be interpreted as described in the [rationale for the C99 standard][c99-unspecified].
An implementation is not compliant if it fails to satisfy one or more of the MUST, MUST NOT, REQUIRED, SHALL, or SHALL NOT requirements for the protocols it implements.
An implementation is compliant if it satisfies all the MUST, MUST NOT, REQUIRED, SHALL, and SHALL NOT requirements for the protocols it implements.
## Overview
At a high level the image manifest contains metadata about the contents and dependencies of the image including the content-addressable identity of one or more [filesystem layer changeset](layer.md) archives that will be unpacked to make up the final runnable filesystem.
The image configuration includes information such as application arguments, environments, etc.
The image index is a higher-level manifest which points to a list of manifests and descriptors.
Typically, these manifests may provide different implementations of the image, possibly varying by platform or other attributes.
![](img/build-diagram.png)
Once built the OCI Image can then be discovered by name, downloaded, verified by hash, trusted through a signature, and unpacked into an [OCI Runtime Bundle](https://github.com/opencontainers/runtime-spec/blob/master/bundle.md).
![](img/run-diagram.png)
### Understanding the Specification
The [OCI Image Media Types](media-types.md) document is a starting point to understanding the overall structure of the specification.
The high-level components of the spec include:
* [Image Manifest](manifest.md) - a document describing the components that make up a container image
* [Image Index](image-index.md) - an annotated index of image manifests
* [Image Layout](image-layout.md) - a filesystem layout representing the contents of an image
* [Filesystem Layer](layer.md) - a changeset that describes a container's filesystem
* [Image Configuration](config.md) - a document determining layer ordering and configuration of the image suitable for translation into a [runtime bundle][runtime-spec]
* [Descriptor](descriptor.md) - a reference that describes the type, metadata and content address of referenced content
Future versions of this specification may include the following OPTIONAL features:
* Signatures that are based on signing image content address
* Naming that is federated based on DNS and can be delegated
[c99-unspecified]: http://www.open-std.org/jtc1/sc22/wg14/www/C99RationaleV5.10.pdf#page=18
[runtime-spec]: https://github.com/opencontainers/runtime-spec

View file

@ -1,6 +0,0 @@
vendor/pkg
/runc
contrib/cmd/recvtty/recvtty
Godeps/_workspace/src/github.com/opencontainers/runc
man/man8
release

View file

@ -1,10 +0,0 @@
approve_by_comment: true
approve_regex: ^LGTM
reject_regex: ^Rejected
reset_on_push: true
author_approval: ignored
reviewers:
teams:
- runc-maintainers
name: default
required: 2

View file

@ -1,26 +0,0 @@
language: go
go:
- 1.6.x
- 1.7.x
# - master
# `make ci` uses Docker.
sudo: required
services:
- docker
env:
global:
- BUILDTAGS="seccomp apparmor selinux ambient"
before_install:
- sudo apt-get -qq update
- sudo apt-get install -y libseccomp-dev libapparmor-dev
- go get -u github.com/golang/lint/golint
- go get -u github.com/vbatts/git-validation
- go get -u github.com/mvdan/sh/cmd/shfmt
script:
- git-validation -run DCO,short-subject -v -range ${TRAVIS_COMMIT_RANGE}
- make BUILDTAGS="${BUILDTAGS}"
- make BUILDTAGS="${BUILDTAGS}" clean validate test

View file

@ -1,124 +0,0 @@
## Contribution Guidelines
### Security issues
If you are reporting a security issue, do not create an issue or file a pull
request on GitHub. Instead, disclose the issue responsibly by sending an email
to security@opencontainers.org (which is inhabited only by the maintainers of
the various OCI projects).
### Pull requests are always welcome
We are always thrilled to receive pull requests, and do our best to
process them as fast as possible. Not sure if that typo is worth a pull
request? Do it! We will appreciate it.
If your pull request is not accepted on the first try, don't be
discouraged! If there's a problem with the implementation, hopefully you
received feedback on what to improve.
We're trying very hard to keep runc lean and focused. We don't want it
to do everything for everybody. This means that we might decide against
incorporating a new feature. However, there might be a way to implement
that feature *on top of* runc.
### Conventions
Fork the repo and make changes on your fork in a feature branch:
- If it's a bugfix branch, name it XXX-something where XXX is the number of the
issue
- If it's a feature branch, create an enhancement issue to announce your
intentions, and name it XXX-something where XXX is the number of the issue.
Submit unit tests for your changes. Go has a great test framework built in; use
it! Take a look at existing tests for inspiration. Run the full test suite on
your branch before submitting a pull request.
Update the documentation when creating or modifying features. Test
your documentation changes for clarity, concision, and correctness, as
well as a clean documentation build. See ``docs/README.md`` for more
information on building the docs and how docs get released.
Write clean code. Universally formatted code promotes ease of writing, reading,
and maintenance. Always run `gofmt -s -w file.go` on each changed file before
committing your changes. Most editors have plugins that do this automatically.
Pull requests descriptions should be as clear as possible and include a
reference to all the issues that they address.
Pull requests must not contain commits from other users or branches.
Commit messages must start with a capitalized and short summary (max. 50
chars) written in the imperative, followed by an optional, more detailed
explanatory text which is separated from the summary by an empty line.
Code review comments may be added to your pull request. Discuss, then make the
suggested modifications and push additional commits to your feature branch. Be
sure to post a comment after pushing. The new commits will show up in the pull
request automatically, but the reviewers will not be notified unless you
comment.
Before the pull request is merged, make sure that you squash your commits into
logical units of work using `git rebase -i` and `git push -f`. After every
commit the test suite should be passing. Include documentation changes in the
same commit so that a revert would remove all traces of the feature or fix.
Commits that fix or close an issue should include a reference like `Closes #XXX`
or `Fixes #XXX`, which will automatically close the issue when merged.
### Sign your work
The sign-off is a simple line at the end of the explanation for the
patch, which certifies that you wrote it or otherwise have the right to
pass it on as an open-source patch. The rules are pretty simple: if you
can certify the below (from
[developercertificate.org](http://developercertificate.org/)):
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
660 York Street, Suite 102,
San Francisco, CA 94110 USA
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
```
then you just add a line to every git commit message:
Signed-off-by: Joe Smith <joe@gmail.com>
using your real name (sorry, no pseudonyms or anonymous contributions.)
You can add the sign off when creating the git commit via `git commit -s`.

View file

@ -1,60 +0,0 @@
FROM golang:1.7.4
# libseccomp in jessie is not _quite_ new enough -- need backports version
RUN echo 'deb http://httpredir.debian.org/debian jessie-backports main' > /etc/apt/sources.list.d/backports.list
RUN apt-get update && apt-get install -y \
build-essential \
curl \
gawk \
iptables \
jq \
pkg-config \
libaio-dev \
libcap-dev \
libprotobuf-dev \
libprotobuf-c0-dev \
libseccomp2/jessie-backports \
libseccomp-dev/jessie-backports \
protobuf-c-compiler \
protobuf-compiler \
python-minimal \
--no-install-recommends \
&& apt-get clean
# install bats
RUN cd /tmp \
&& git clone https://github.com/sstephenson/bats.git \
&& cd bats \
&& git reset --hard 03608115df2071fff4eaaff1605768c275e5f81f \
&& ./install.sh /usr/local \
&& rm -rf /tmp/bats
# install criu
ENV CRIU_VERSION 1.7
RUN mkdir -p /usr/src/criu \
&& curl -sSL https://github.com/xemul/criu/archive/v${CRIU_VERSION}.tar.gz | tar -v -C /usr/src/criu/ -xz --strip-components=1 \
&& cd /usr/src/criu \
&& make install-criu \
&& rm -rf /usr/src/criu
# install shfmt
RUN mkdir -p /go/src/github.com/mvdan \
&& cd /go/src/github.com/mvdan \
&& git clone https://github.com/mvdan/sh \
&& cd sh \
&& git checkout -f v0.4.0 \
&& go install ./cmd/shfmt \
&& rm -rf /go/src/github.com/mvdan
# setup a playground for us to spawn containers in
ENV ROOTFS /busybox
RUN mkdir -p ${ROOTFS} \
&& curl -o- -sSL 'https://github.com/docker-library/busybox/raw/a0558a9006ce0dd6f6ec5d56cfd3f32ebeeb815f/glibc/busybox.tar.xz' | tar xfJC - ${ROOTFS}
COPY script/tmpmount /
WORKDIR /go/src/github.com/opencontainers/runc
ENTRYPOINT ["/tmpmount"]
ADD . /go/src/github.com/opencontainers/runc

View file

@ -1,87 +0,0 @@
{
"ImportPath": "github.com/opencontainers/runc",
"GoVersion": "go1.5.3",
"GodepVersion": "v75",
"Deps": [
{
"ImportPath": "github.com/Sirupsen/logrus",
"Comment": "v0.7.3-2-g26709e2",
"Rev": "26709e2714106fb8ad40b773b711ebce25b78914"
},
{
"ImportPath": "github.com/urfave/cli",
"Comment": "v1.18.0-67-gd53eb99",
"Rev": "d53eb991652b1d438abdd34ce4bfa3ef1539108e"
},
{
"ImportPath": "github.com/coreos/go-systemd/activation",
"Comment": "v14",
"Rev": "48702e0da86bd25e76cfef347e2adeb434a0d0a6"
},
{
"ImportPath": "github.com/coreos/go-systemd/dbus",
"Comment": "v14",
"Rev": "48702e0da86bd25e76cfef347e2adeb434a0d0a6"
},
{
"ImportPath": "github.com/coreos/go-systemd/util",
"Comment": "v14",
"Rev": "48702e0da86bd25e76cfef347e2adeb434a0d0a6"
},
{
"ImportPath": "github.com/docker/docker/pkg/mount",
"Comment": "v1.4.1-4831-g0f5c9d3",
"Rev": "0f5c9d301b9b1cca66b3ea0f9dec3b5317d3686d"
},
{
"ImportPath": "github.com/docker/docker/pkg/symlink",
"Comment": "v1.4.1-4831-g0f5c9d3",
"Rev": "0f5c9d301b9b1cca66b3ea0f9dec3b5317d3686d"
},
{
"ImportPath": "github.com/docker/docker/pkg/term",
"Comment": "v1.4.1-4831-g0f5c9d3",
"Rev": "0f5c9d301b9b1cca66b3ea0f9dec3b5317d3686d"
},
{
"ImportPath": "github.com/docker/go-units",
"Comment": "v0.1.0",
"Rev": "9b001659dd36225e356b4467c465d732e745f53d"
},
{
"ImportPath": "github.com/godbus/dbus",
"Comment": "v3",
"Rev": "c7fdd8b5cd55e87b4e1f4e372cdb1db61dd6c66f"
},
{
"ImportPath": "github.com/golang/protobuf/proto",
"Rev": "f7137ae6b19afbfd61a94b746fda3b3fe0491874"
},
{
"ImportPath": "github.com/opencontainers/runtime-spec/specs-go",
"Comment": "v1.0.0-rc3",
"Rev": "794ca7ac88234607f9d2c76da8a6e9bbbade8cb9"
},
{
"ImportPath": "github.com/seccomp/libseccomp-golang",
"Rev": "32f571b70023028bd57d9288c20efbcb237f3ce0"
},
{
"ImportPath": "github.com/syndtr/gocapability/capability",
"Rev": "e7cb7fa329f456b3855136a2642b197bad7366ba"
},
{
"ImportPath": "github.com/vishvananda/netlink",
"Rev": "1e2e08e8a2dcdacaae3f14ac44c5cfa31361f270"
},
{
"ImportPath": "github.com/mrunalp/fileutils",
"Rev": "ed869b029674c0e9ce4c0dfa781405c2d9946d08"
},
{
"ImportPath": "github.com/coreos/pkg/dlopen",
"Comment": "v3",
"Rev": "3ac0863d7acf3bc44daf49afef8919af12f704ef"
}
]
}

View file

@ -1,5 +0,0 @@
This directory tree is generated automatically by godep.
Please do not edit.
See https://github.com/tools/godep for more information.

View file

@ -1,2 +0,0 @@
/pkg
/bin

View file

@ -1,8 +0,0 @@
language: go
go:
- 1.2
- 1.3
- 1.4
- tip
install:
- go get -t ./...

View file

@ -1,7 +0,0 @@
# 0.7.3
formatter/\*: allow configuration of timestamp layout
# 0.7.2
formatter/text: Add configuration option for time format (#158)

View file

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014 Simon Eskildsen
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View file

@ -1,349 +0,0 @@
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus)&nbsp;[![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc]
Logrus is a structured logger for Go (golang), completely API compatible with
the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
yet stable (pre 1.0). Logrus itself is completely stable and has been used in
many large deployments. The core API is unlikely to change much but please
version control your Logrus to make sure you aren't fetching latest `master` on
every build.**
Nicely color-coded in development (when a TTY is attached, otherwise just
plain text):
![Colored](http://i.imgur.com/PY7qMwd.png)
With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash
or Splunk:
```json
{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
{"level":"warning","msg":"The group's number increased tremendously!",
"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
```
With the default `log.Formatter = new(logrus.TextFormatter)` when a TTY is not
attached, the output is compatible with the
[logfmt](http://godoc.org/github.com/kr/logfmt) format:
```text
time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
exit status 1
```
#### Example
The simplest way to use Logrus is simply the package-level exported logger:
```go
package main
import (
log "github.com/Sirupsen/logrus"
)
func main() {
log.WithFields(log.Fields{
"animal": "walrus",
}).Info("A walrus appears")
}
```
Note that it's completely api-compatible with the stdlib logger, so you can
replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
and you'll now have the flexibility of Logrus. You can customize it all you
want:
```go
package main
import (
"os"
log "github.com/Sirupsen/logrus"
"github.com/Sirupsen/logrus/hooks/airbrake"
)
func init() {
// Log as JSON instead of the default ASCII formatter.
log.SetFormatter(&log.JSONFormatter{})
// Use the Airbrake hook to report errors that have Error severity or above to
// an exception tracker. You can create custom hooks, see the Hooks section.
log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development"))
// Output to stderr instead of stdout, could also be a file.
log.SetOutput(os.Stderr)
// Only log the warning severity or above.
log.SetLevel(log.WarnLevel)
}
func main() {
log.WithFields(log.Fields{
"animal": "walrus",
"size": 10,
}).Info("A group of walrus emerges from the ocean")
log.WithFields(log.Fields{
"omg": true,
"number": 122,
}).Warn("The group's number increased tremendously!")
log.WithFields(log.Fields{
"omg": true,
"number": 100,
}).Fatal("The ice breaks!")
// A common pattern is to re-use fields between logging statements by re-using
// the logrus.Entry returned from WithFields()
contextLogger := log.WithFields(log.Fields{
"common": "this is a common field",
"other": "I also should be logged always",
})
contextLogger.Info("I'll be logged with common and other field")
contextLogger.Info("Me too")
}
```
For more advanced usage such as logging to multiple locations from the same
application, you can also create an instance of the `logrus` Logger:
```go
package main
import (
"github.com/Sirupsen/logrus"
)
// Create a new instance of the logger. You can have any number of instances.
var log = logrus.New()
func main() {
// The API for setting attributes is a little different than the package level
// exported logger. See Godoc.
log.Out = os.Stderr
log.WithFields(logrus.Fields{
"animal": "walrus",
"size": 10,
}).Info("A group of walrus emerges from the ocean")
}
```
#### Fields
Logrus encourages careful, structured logging though logging fields instead of
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
to send event %s to topic %s with key %d")`, you should log the much more
discoverable:
```go
log.WithFields(log.Fields{
"event": event,
"topic": topic,
"key": key,
}).Fatal("Failed to send event")
```
We've found this API forces you to think about logging in a way that produces
much more useful logging messages. We've been in countless situations where just
a single added field to a log statement that was already there would've saved us
hours. The `WithFields` call is optional.
In general, with Logrus using any of the `printf`-family functions should be
seen as a hint you should add a field, however, you can still use the
`printf`-family functions with Logrus.
#### Hooks
You can add hooks for logging levels. For example to send errors to an exception
tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
multiple places simultaneously, e.g. syslog.
Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
`init`:
```go
import (
log "github.com/Sirupsen/logrus"
"github.com/Sirupsen/logrus/hooks/airbrake"
"github.com/Sirupsen/logrus/hooks/syslog"
"log/syslog"
)
func init() {
log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development"))
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
if err != nil {
log.Error("Unable to connect to local syslog daemon")
} else {
log.AddHook(hook)
}
}
```
| Hook | Description |
| ----- | ----------- |
| [Airbrake](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) | Send errors to an exception tracking service compatible with the Airbrake API. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
| [Papertrail](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) | Send errors to the Papertrail hosted logging service via UDP. |
| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
| [BugSnag](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
| [Graylog](https://github.com/gemnasium/logrus-hooks/tree/master/graylog) | Hook for logging to [Graylog](http://graylog2.org/) |
#### Level logging
Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
```go
log.Debug("Useful debugging information.")
log.Info("Something noteworthy happened!")
log.Warn("You should probably take a look at this.")
log.Error("Something failed but I'm not quitting.")
// Calls os.Exit(1) after logging
log.Fatal("Bye.")
// Calls panic() after logging
log.Panic("I'm bailing.")
```
You can set the logging level on a `Logger`, then it will only log entries with
that severity or anything above it:
```go
// Will log anything that is info or above (warn, error, fatal, panic). Default.
log.SetLevel(log.InfoLevel)
```
It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
environment if your application has that.
#### Entries
Besides the fields added with `WithField` or `WithFields` some fields are
automatically added to all logging events:
1. `time`. The timestamp when the entry was created.
2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
the `AddFields` call. E.g. `Failed to send event.`
3. `level`. The logging level. E.g. `info`.
#### Environments
Logrus has no notion of environment.
If you wish for hooks and formatters to only be used in specific environments,
you should handle that yourself. For example, if your application has a global
variable `Environment`, which is a string representation of the environment you
could do:
```go
import (
log "github.com/Sirupsen/logrus"
)
init() {
// do something here to set environment depending on an environment variable
// or command-line flag
if Environment == "production" {
log.SetFormatter(logrus.JSONFormatter)
} else {
// The TextFormatter is default, you don't actually have to do this.
log.SetFormatter(logrus.TextFormatter)
}
}
```
This configuration is how `logrus` was intended to be used, but JSON in
production is mostly only useful if you do log aggregation with tools like
Splunk or Logstash.
#### Formatters
The built-in logging formatters are:
* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
without colors.
* *Note:* to force colored output when there is no TTY, set the `ForceColors`
field to `true`. To force no colored output even if there is a TTY set the
`DisableColors` field to `true`
* `logrus.JSONFormatter`. Logs fields as JSON.
* `logrus_logstash.LogstashFormatter`. Logs fields as Logstash Events (http://logstash.net).
```go
logrus.SetFormatter(&logrus_logstash.LogstashFormatter{Type: “application_name"})
```
Third party logging formatters:
* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
You can define your formatter by implementing the `Formatter` interface,
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
`Fields` type (`map[string]interface{}`) with all your fields as well as the
default ones (see Entries section above):
```go
type MyJSONFormatter struct {
}
log.SetFormatter(new(MyJSONFormatter))
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
// Note this doesn't include Time, Level and Message which are available on
// the Entry. Consult `godoc` on information about those fields or read the
// source of the official loggers.
serialized, err := json.Marshal(entry.Data)
if err != nil {
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
}
return append(serialized, '\n'), nil
}
```
#### Logger as an `io.Writer`
Logrus can be transormed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
```go
w := logger.Writer()
defer w.Close()
srv := http.Server{
// create a stdlib log.Logger that writes to
// logrus.Logger.
ErrorLog: log.New(w, "", 0),
}
```
Each line written to that writer will be printed the usual way, using formatters
and hooks. The level for those entries is `info`.
#### Rotation
Log rotation is not provided with Logrus. Log rotation should be done by an
external program (like `logrotate(8)`) that can compress and delete old log
entries. It should not be a feature of the application-level logger.
[godoc]: https://godoc.org/github.com/Sirupsen/logrus

View file

@ -1,252 +0,0 @@
package logrus
import (
"bytes"
"fmt"
"io"
"os"
"time"
)
// An entry is the final or intermediate Logrus logging entry. It contains all
// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
// passed around as much as you wish to avoid field duplication.
type Entry struct {
Logger *Logger
// Contains all the fields set by the user.
Data Fields
// Time at which the log entry was created
Time time.Time
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
Level Level
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
Message string
}
func NewEntry(logger *Logger) *Entry {
return &Entry{
Logger: logger,
// Default is three fields, give a little extra room
Data: make(Fields, 5),
}
}
// Returns a reader for the entry, which is a proxy to the formatter.
func (entry *Entry) Reader() (*bytes.Buffer, error) {
serialized, err := entry.Logger.Formatter.Format(entry)
return bytes.NewBuffer(serialized), err
}
// Returns the string representation from the reader and ultimately the
// formatter.
func (entry *Entry) String() (string, error) {
reader, err := entry.Reader()
if err != nil {
return "", err
}
return reader.String(), err
}
// Add a single field to the Entry.
func (entry *Entry) WithField(key string, value interface{}) *Entry {
return entry.WithFields(Fields{key: value})
}
// Add a map of fields to the Entry.
func (entry *Entry) WithFields(fields Fields) *Entry {
data := Fields{}
for k, v := range entry.Data {
data[k] = v
}
for k, v := range fields {
data[k] = v
}
return &Entry{Logger: entry.Logger, Data: data}
}
func (entry *Entry) log(level Level, msg string) {
entry.Time = time.Now()
entry.Level = level
entry.Message = msg
if err := entry.Logger.Hooks.Fire(level, entry); err != nil {
entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
entry.Logger.mu.Unlock()
}
reader, err := entry.Reader()
if err != nil {
entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
entry.Logger.mu.Unlock()
}
entry.Logger.mu.Lock()
defer entry.Logger.mu.Unlock()
_, err = io.Copy(entry.Logger.Out, reader)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
}
// To avoid Entry#log() returning a value that only would make sense for
// panic() to use in Entry#Panic(), we avoid the allocation by checking
// directly here.
if level <= PanicLevel {
panic(entry)
}
}
func (entry *Entry) Debug(args ...interface{}) {
if entry.Logger.Level >= DebugLevel {
entry.log(DebugLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Print(args ...interface{}) {
entry.Info(args...)
}
func (entry *Entry) Info(args ...interface{}) {
if entry.Logger.Level >= InfoLevel {
entry.log(InfoLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Warn(args ...interface{}) {
if entry.Logger.Level >= WarnLevel {
entry.log(WarnLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Warning(args ...interface{}) {
entry.Warn(args...)
}
func (entry *Entry) Error(args ...interface{}) {
if entry.Logger.Level >= ErrorLevel {
entry.log(ErrorLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Fatal(args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
entry.log(FatalLevel, fmt.Sprint(args...))
}
os.Exit(1)
}
func (entry *Entry) Panic(args ...interface{}) {
if entry.Logger.Level >= PanicLevel {
entry.log(PanicLevel, fmt.Sprint(args...))
}
panic(fmt.Sprint(args...))
}
// Entry Printf family functions
func (entry *Entry) Debugf(format string, args ...interface{}) {
if entry.Logger.Level >= DebugLevel {
entry.Debug(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Infof(format string, args ...interface{}) {
if entry.Logger.Level >= InfoLevel {
entry.Info(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Printf(format string, args ...interface{}) {
entry.Infof(format, args...)
}
func (entry *Entry) Warnf(format string, args ...interface{}) {
if entry.Logger.Level >= WarnLevel {
entry.Warn(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Warningf(format string, args ...interface{}) {
entry.Warnf(format, args...)
}
func (entry *Entry) Errorf(format string, args ...interface{}) {
if entry.Logger.Level >= ErrorLevel {
entry.Error(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Fatalf(format string, args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
entry.Fatal(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Panicf(format string, args ...interface{}) {
if entry.Logger.Level >= PanicLevel {
entry.Panic(fmt.Sprintf(format, args...))
}
}
// Entry Println family functions
func (entry *Entry) Debugln(args ...interface{}) {
if entry.Logger.Level >= DebugLevel {
entry.Debug(entry.sprintlnn(args...))
}
}
func (entry *Entry) Infoln(args ...interface{}) {
if entry.Logger.Level >= InfoLevel {
entry.Info(entry.sprintlnn(args...))
}
}
func (entry *Entry) Println(args ...interface{}) {
entry.Infoln(args...)
}
func (entry *Entry) Warnln(args ...interface{}) {
if entry.Logger.Level >= WarnLevel {
entry.Warn(entry.sprintlnn(args...))
}
}
func (entry *Entry) Warningln(args ...interface{}) {
entry.Warnln(args...)
}
func (entry *Entry) Errorln(args ...interface{}) {
if entry.Logger.Level >= ErrorLevel {
entry.Error(entry.sprintlnn(args...))
}
}
func (entry *Entry) Fatalln(args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
entry.Fatal(entry.sprintlnn(args...))
}
}
func (entry *Entry) Panicln(args ...interface{}) {
if entry.Logger.Level >= PanicLevel {
entry.Panic(entry.sprintlnn(args...))
}
}
// Sprintlnn => Sprint no newline. This is to get the behavior of how
// fmt.Sprintln where spaces are always added between operands, regardless of
// their type. Instead of vendoring the Sprintln implementation to spare a
// string allocation, we do the simplest thing.
func (entry *Entry) sprintlnn(args ...interface{}) string {
msg := fmt.Sprintln(args...)
return msg[:len(msg)-1]
}

View file

@ -1,50 +0,0 @@
package main
import (
"github.com/Sirupsen/logrus"
)
var log = logrus.New()
func init() {
log.Formatter = new(logrus.JSONFormatter)
log.Formatter = new(logrus.TextFormatter) // default
log.Level = logrus.DebugLevel
}
func main() {
defer func() {
err := recover()
if err != nil {
log.WithFields(logrus.Fields{
"omg": true,
"err": err,
"number": 100,
}).Fatal("The ice breaks!")
}
}()
log.WithFields(logrus.Fields{
"animal": "walrus",
"number": 8,
}).Debug("Started observing beach")
log.WithFields(logrus.Fields{
"animal": "walrus",
"size": 10,
}).Info("A group of walrus emerges from the ocean")
log.WithFields(logrus.Fields{
"omg": true,
"number": 122,
}).Warn("The group's number increased tremendously!")
log.WithFields(logrus.Fields{
"temperature": -4,
}).Debug("Temperature changes")
log.WithFields(logrus.Fields{
"animal": "orca",
"size": 9009,
}).Panic("It's over 9000!")
}

View file

@ -1,30 +0,0 @@
package main
import (
"github.com/Sirupsen/logrus"
"github.com/Sirupsen/logrus/hooks/airbrake"
)
var log = logrus.New()
func init() {
log.Formatter = new(logrus.TextFormatter) // default
log.Hooks.Add(airbrake.NewHook("https://example.com", "xyz", "development"))
}
func main() {
log.WithFields(logrus.Fields{
"animal": "walrus",
"size": 10,
}).Info("A group of walrus emerges from the ocean")
log.WithFields(logrus.Fields{
"omg": true,
"number": 122,
}).Warn("The group's number increased tremendously!")
log.WithFields(logrus.Fields{
"omg": true,
"number": 100,
}).Fatal("The ice breaks!")
}

View file

@ -1,188 +0,0 @@
package logrus
import (
"io"
)
var (
// std is the name of the standard logger in stdlib `log`
std = New()
)
func StandardLogger() *Logger {
return std
}
// SetOutput sets the standard logger output.
func SetOutput(out io.Writer) {
std.mu.Lock()
defer std.mu.Unlock()
std.Out = out
}
// SetFormatter sets the standard logger formatter.
func SetFormatter(formatter Formatter) {
std.mu.Lock()
defer std.mu.Unlock()
std.Formatter = formatter
}
// SetLevel sets the standard logger level.
func SetLevel(level Level) {
std.mu.Lock()
defer std.mu.Unlock()
std.Level = level
}
// GetLevel returns the standard logger level.
func GetLevel() Level {
std.mu.Lock()
defer std.mu.Unlock()
return std.Level
}
// AddHook adds a hook to the standard logger hooks.
func AddHook(hook Hook) {
std.mu.Lock()
defer std.mu.Unlock()
std.Hooks.Add(hook)
}
// WithField creates an entry from the standard logger and adds a field to
// it. If you want multiple fields, use `WithFields`.
//
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
// or Panic on the Entry it returns.
func WithField(key string, value interface{}) *Entry {
return std.WithField(key, value)
}
// WithFields creates an entry from the standard logger and adds multiple
// fields to it. This is simply a helper for `WithField`, invoking it
// once for each field.
//
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
// or Panic on the Entry it returns.
func WithFields(fields Fields) *Entry {
return std.WithFields(fields)
}
// Debug logs a message at level Debug on the standard logger.
func Debug(args ...interface{}) {
std.Debug(args...)
}
// Print logs a message at level Info on the standard logger.
func Print(args ...interface{}) {
std.Print(args...)
}
// Info logs a message at level Info on the standard logger.
func Info(args ...interface{}) {
std.Info(args...)
}
// Warn logs a message at level Warn on the standard logger.
func Warn(args ...interface{}) {
std.Warn(args...)
}
// Warning logs a message at level Warn on the standard logger.
func Warning(args ...interface{}) {
std.Warning(args...)
}
// Error logs a message at level Error on the standard logger.
func Error(args ...interface{}) {
std.Error(args...)
}
// Panic logs a message at level Panic on the standard logger.
func Panic(args ...interface{}) {
std.Panic(args...)
}
// Fatal logs a message at level Fatal on the standard logger.
func Fatal(args ...interface{}) {
std.Fatal(args...)
}
// Debugf logs a message at level Debug on the standard logger.
func Debugf(format string, args ...interface{}) {
std.Debugf(format, args...)
}
// Printf logs a message at level Info on the standard logger.
func Printf(format string, args ...interface{}) {
std.Printf(format, args...)
}
// Infof logs a message at level Info on the standard logger.
func Infof(format string, args ...interface{}) {
std.Infof(format, args...)
}
// Warnf logs a message at level Warn on the standard logger.
func Warnf(format string, args ...interface{}) {
std.Warnf(format, args...)
}
// Warningf logs a message at level Warn on the standard logger.
func Warningf(format string, args ...interface{}) {
std.Warningf(format, args...)
}
// Errorf logs a message at level Error on the standard logger.
func Errorf(format string, args ...interface{}) {
std.Errorf(format, args...)
}
// Panicf logs a message at level Panic on the standard logger.
func Panicf(format string, args ...interface{}) {
std.Panicf(format, args...)
}
// Fatalf logs a message at level Fatal on the standard logger.
func Fatalf(format string, args ...interface{}) {
std.Fatalf(format, args...)
}
// Debugln logs a message at level Debug on the standard logger.
func Debugln(args ...interface{}) {
std.Debugln(args...)
}
// Println logs a message at level Info on the standard logger.
func Println(args ...interface{}) {
std.Println(args...)
}
// Infoln logs a message at level Info on the standard logger.
func Infoln(args ...interface{}) {
std.Infoln(args...)
}
// Warnln logs a message at level Warn on the standard logger.
func Warnln(args ...interface{}) {
std.Warnln(args...)
}
// Warningln logs a message at level Warn on the standard logger.
func Warningln(args ...interface{}) {
std.Warningln(args...)
}
// Errorln logs a message at level Error on the standard logger.
func Errorln(args ...interface{}) {
std.Errorln(args...)
}
// Panicln logs a message at level Panic on the standard logger.
func Panicln(args ...interface{}) {
std.Panicln(args...)
}
// Fatalln logs a message at level Fatal on the standard logger.
func Fatalln(args ...interface{}) {
std.Fatalln(args...)
}

View file

@ -1,48 +0,0 @@
package logrus
import "time"
const DefaultTimestampFormat = time.RFC3339
// The Formatter interface is used to implement a custom Formatter. It takes an
// `Entry`. It exposes all the fields, including the default ones:
//
// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
// * `entry.Data["time"]`. The timestamp.
// * `entry.Data["level"]. The level the entry was logged at.
//
// Any additional fields added with `WithField` or `WithFields` are also in
// `entry.Data`. Format is expected to return an array of bytes which are then
// logged to `logger.Out`.
type Formatter interface {
Format(*Entry) ([]byte, error)
}
// This is to not silently overwrite `time`, `msg` and `level` fields when
// dumping it. If this code wasn't there doing:
//
// logrus.WithField("level", 1).Info("hello")
//
// Would just silently drop the user provided level. Instead with this code
// it'll logged as:
//
// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
//
// It's not exported because it's still using Data in an opinionated way. It's to
// avoid code duplication between the two default formatters.
func prefixFieldClashes(data Fields) {
_, ok := data["time"]
if ok {
data["fields.time"] = data["time"]
}
_, ok = data["msg"]
if ok {
data["fields.msg"] = data["msg"]
}
_, ok = data["level"]
if ok {
data["fields.level"] = data["level"]
}
}

View file

@ -1,56 +0,0 @@
package logstash
import (
"encoding/json"
"fmt"
"github.com/Sirupsen/logrus"
)
// Formatter generates json in logstash format.
// Logstash site: http://logstash.net/
type LogstashFormatter struct {
Type string // if not empty use for logstash type field.
// TimestampFormat sets the format used for timestamps.
TimestampFormat string
}
func (f *LogstashFormatter) Format(entry *logrus.Entry) ([]byte, error) {
entry.Data["@version"] = 1
if f.TimestampFormat == "" {
f.TimestampFormat = logrus.DefaultTimestampFormat
}
entry.Data["@timestamp"] = entry.Time.Format(f.TimestampFormat)
// set message field
v, ok := entry.Data["message"]
if ok {
entry.Data["fields.message"] = v
}
entry.Data["message"] = entry.Message
// set level field
v, ok = entry.Data["level"]
if ok {
entry.Data["fields.level"] = v
}
entry.Data["level"] = entry.Level.String()
// set type field
if f.Type != "" {
v, ok = entry.Data["type"]
if ok {
entry.Data["fields.type"] = v
}
entry.Data["type"] = f.Type
}
serialized, err := json.Marshal(entry.Data)
if err != nil {
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
}
return append(serialized, '\n'), nil
}

View file

@ -1,34 +0,0 @@
package logrus
// A hook to be fired when logging on the logging levels returned from
// `Levels()` on your implementation of the interface. Note that this is not
// fired in a goroutine or a channel with workers, you should handle such
// functionality yourself if your call is non-blocking and you don't wish for
// the logging calls for levels returned from `Levels()` to block.
type Hook interface {
Levels() []Level
Fire(*Entry) error
}
// Internal type for storing the hooks on a logger instance.
type levelHooks map[Level][]Hook
// Add a hook to an instance of logger. This is called with
// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
func (hooks levelHooks) Add(hook Hook) {
for _, level := range hook.Levels() {
hooks[level] = append(hooks[level], hook)
}
}
// Fire all the hooks for the passed level. Used by `entry.log` to fire
// appropriate hooks for a log entry.
func (hooks levelHooks) Fire(level Level, entry *Entry) error {
for _, hook := range hooks[level] {
if err := hook.Fire(entry); err != nil {
return err
}
}
return nil
}

View file

@ -1,54 +0,0 @@
package airbrake
import (
"errors"
"fmt"
"github.com/Sirupsen/logrus"
"github.com/tobi/airbrake-go"
)
// AirbrakeHook to send exceptions to an exception-tracking service compatible
// with the Airbrake API.
type airbrakeHook struct {
APIKey string
Endpoint string
Environment string
}
func NewHook(endpoint, apiKey, env string) *airbrakeHook {
return &airbrakeHook{
APIKey: apiKey,
Endpoint: endpoint,
Environment: env,
}
}
func (hook *airbrakeHook) Fire(entry *logrus.Entry) error {
airbrake.ApiKey = hook.APIKey
airbrake.Endpoint = hook.Endpoint
airbrake.Environment = hook.Environment
var notifyErr error
err, ok := entry.Data["error"].(error)
if ok {
notifyErr = err
} else {
notifyErr = errors.New(entry.Message)
}
airErr := airbrake.Notify(notifyErr)
if airErr != nil {
return fmt.Errorf("Failed to send error to Airbrake: %s", airErr)
}
return nil
}
func (hook *airbrakeHook) Levels() []logrus.Level {
return []logrus.Level{
logrus.ErrorLevel,
logrus.FatalLevel,
logrus.PanicLevel,
}
}

View file

@ -1,68 +0,0 @@
package logrus_bugsnag
import (
"errors"
"github.com/Sirupsen/logrus"
"github.com/bugsnag/bugsnag-go"
)
type bugsnagHook struct{}
// ErrBugsnagUnconfigured is returned if NewBugsnagHook is called before
// bugsnag.Configure. Bugsnag must be configured before the hook.
var ErrBugsnagUnconfigured = errors.New("bugsnag must be configured before installing this logrus hook")
// ErrBugsnagSendFailed indicates that the hook failed to submit an error to
// bugsnag. The error was successfully generated, but `bugsnag.Notify()`
// failed.
type ErrBugsnagSendFailed struct {
err error
}
func (e ErrBugsnagSendFailed) Error() string {
return "failed to send error to Bugsnag: " + e.err.Error()
}
// NewBugsnagHook initializes a logrus hook which sends exceptions to an
// exception-tracking service compatible with the Bugsnag API. Before using
// this hook, you must call bugsnag.Configure(). The returned object should be
// registered with a log via `AddHook()`
//
// Entries that trigger an Error, Fatal or Panic should now include an "error"
// field to send to Bugsnag.
func NewBugsnagHook() (*bugsnagHook, error) {
if bugsnag.Config.APIKey == "" {
return nil, ErrBugsnagUnconfigured
}
return &bugsnagHook{}, nil
}
// Fire forwards an error to Bugsnag. Given a logrus.Entry, it extracts the
// "error" field (or the Message if the error isn't present) and sends it off.
func (hook *bugsnagHook) Fire(entry *logrus.Entry) error {
var notifyErr error
err, ok := entry.Data["error"].(error)
if ok {
notifyErr = err
} else {
notifyErr = errors.New(entry.Message)
}
bugsnagErr := bugsnag.Notify(notifyErr)
if bugsnagErr != nil {
return ErrBugsnagSendFailed{bugsnagErr}
}
return nil
}
// Levels enumerates the log levels on which the error should be forwarded to
// bugsnag: everything at or above the "Error" level.
func (hook *bugsnagHook) Levels() []logrus.Level {
return []logrus.Level{
logrus.ErrorLevel,
logrus.FatalLevel,
logrus.PanicLevel,
}
}

View file

@ -1,28 +0,0 @@
# Papertrail Hook for Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:" />
[Papertrail](https://papertrailapp.com) provides hosted log management. Once stored in Papertrail, you can [group](http://help.papertrailapp.com/kb/how-it-works/groups/) your logs on various dimensions, [search](http://help.papertrailapp.com/kb/how-it-works/search-syntax) them, and trigger [alerts](http://help.papertrailapp.com/kb/how-it-works/alerts).
In most deployments, you'll want to send logs to Papertrail via their [remote_syslog](http://help.papertrailapp.com/kb/configuration/configuring-centralized-logging-from-text-log-files-in-unix/) daemon, which requires no application-specific configuration. This hook is intended for relatively low-volume logging, likely in managed cloud hosting deployments where installing `remote_syslog` is not possible.
## Usage
You can find your Papertrail UDP port on your [Papertrail account page](https://papertrailapp.com/account/destinations). Substitute it below for `YOUR_PAPERTRAIL_UDP_PORT`.
For `YOUR_APP_NAME`, substitute a short string that will readily identify your application or service in the logs.
```go
import (
"log/syslog"
"github.com/Sirupsen/logrus"
"github.com/Sirupsen/logrus/hooks/papertrail"
)
func main() {
log := logrus.New()
hook, err := logrus_papertrail.NewPapertrailHook("logs.papertrailapp.com", YOUR_PAPERTRAIL_UDP_PORT, YOUR_APP_NAME)
if err == nil {
log.Hooks.Add(hook)
}
}
```

View file

@ -1,55 +0,0 @@
package logrus_papertrail
import (
"fmt"
"net"
"os"
"time"
"github.com/Sirupsen/logrus"
)
const (
format = "Jan 2 15:04:05"
)
// PapertrailHook to send logs to a logging service compatible with the Papertrail API.
type PapertrailHook struct {
Host string
Port int
AppName string
UDPConn net.Conn
}
// NewPapertrailHook creates a hook to be added to an instance of logger.
func NewPapertrailHook(host string, port int, appName string) (*PapertrailHook, error) {
conn, err := net.Dial("udp", fmt.Sprintf("%s:%d", host, port))
return &PapertrailHook{host, port, appName, conn}, err
}
// Fire is called when a log event is fired.
func (hook *PapertrailHook) Fire(entry *logrus.Entry) error {
date := time.Now().Format(format)
msg, _ := entry.String()
payload := fmt.Sprintf("<22> %s %s: %s", date, hook.AppName, msg)
bytesWritten, err := hook.UDPConn.Write([]byte(payload))
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to send log line to Papertrail via UDP. Wrote %d bytes before error: %v", bytesWritten, err)
return err
}
return nil
}
// Levels returns the available logging levels.
func (hook *PapertrailHook) Levels() []logrus.Level {
return []logrus.Level{
logrus.PanicLevel,
logrus.FatalLevel,
logrus.ErrorLevel,
logrus.WarnLevel,
logrus.InfoLevel,
logrus.DebugLevel,
}
}

View file

@ -1,61 +0,0 @@
# Sentry Hook for Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:" />
[Sentry](https://getsentry.com) provides both self-hosted and hosted
solutions for exception tracking.
Both client and server are
[open source](https://github.com/getsentry/sentry).
## Usage
Every sentry application defined on the server gets a different
[DSN](https://www.getsentry.com/docs/). In the example below replace
`YOUR_DSN` with the one created for your application.
```go
import (
"github.com/Sirupsen/logrus"
"github.com/Sirupsen/logrus/hooks/sentry"
)
func main() {
log := logrus.New()
hook, err := logrus_sentry.NewSentryHook(YOUR_DSN, []logrus.Level{
logrus.PanicLevel,
logrus.FatalLevel,
logrus.ErrorLevel,
})
if err == nil {
log.Hooks.Add(hook)
}
}
```
## Special fields
Some logrus fields have a special meaning in this hook,
these are server_name and logger.
When logs are sent to sentry these fields are treated differently.
- server_name (also known as hostname) is the name of the server which
is logging the event (hostname.example.com)
- logger is the part of the application which is logging the event.
In go this usually means setting it to the name of the package.
## Timeout
`Timeout` is the time the sentry hook will wait for a response
from the sentry server.
If this time elapses with no response from
the server an error will be returned.
If `Timeout` is set to 0 the SentryHook will not wait for a reply
and will assume a correct delivery.
The SentryHook has a default timeout of `100 milliseconds` when created
with a call to `NewSentryHook`. This can be changed by assigning a value to the `Timeout` field:
```go
hook, _ := logrus_sentry.NewSentryHook(...)
hook.Timeout = 20*time.Second
```

View file

@ -1,100 +0,0 @@
package logrus_sentry
import (
"fmt"
"time"
"github.com/Sirupsen/logrus"
"github.com/getsentry/raven-go"
)
var (
severityMap = map[logrus.Level]raven.Severity{
logrus.DebugLevel: raven.DEBUG,
logrus.InfoLevel: raven.INFO,
logrus.WarnLevel: raven.WARNING,
logrus.ErrorLevel: raven.ERROR,
logrus.FatalLevel: raven.FATAL,
logrus.PanicLevel: raven.FATAL,
}
)
func getAndDel(d logrus.Fields, key string) (string, bool) {
var (
ok bool
v interface{}
val string
)
if v, ok = d[key]; !ok {
return "", false
}
if val, ok = v.(string); !ok {
return "", false
}
delete(d, key)
return val, true
}
// SentryHook delivers logs to a sentry server.
type SentryHook struct {
// Timeout sets the time to wait for a delivery error from the sentry server.
// If this is set to zero the server will not wait for any response and will
// consider the message correctly sent
Timeout time.Duration
client *raven.Client
levels []logrus.Level
}
// NewSentryHook creates a hook to be added to an instance of logger
// and initializes the raven client.
// This method sets the timeout to 100 milliseconds.
func NewSentryHook(DSN string, levels []logrus.Level) (*SentryHook, error) {
client, err := raven.NewClient(DSN, nil)
if err != nil {
return nil, err
}
return &SentryHook{100 * time.Millisecond, client, levels}, nil
}
// Called when an event should be sent to sentry
// Special fields that sentry uses to give more information to the server
// are extracted from entry.Data (if they are found)
// These fields are: logger and server_name
func (hook *SentryHook) Fire(entry *logrus.Entry) error {
packet := &raven.Packet{
Message: entry.Message,
Timestamp: raven.Timestamp(entry.Time),
Level: severityMap[entry.Level],
Platform: "go",
}
d := entry.Data
if logger, ok := getAndDel(d, "logger"); ok {
packet.Logger = logger
}
if serverName, ok := getAndDel(d, "server_name"); ok {
packet.ServerName = serverName
}
packet.Extra = map[string]interface{}(d)
_, errCh := hook.client.Capture(packet, nil)
timeout := hook.Timeout
if timeout != 0 {
timeoutCh := time.After(timeout)
select {
case err := <-errCh:
return err
case <-timeoutCh:
return fmt.Errorf("no response from sentry server in %s", timeout)
}
}
return nil
}
// Levels returns the available logging levels.
func (hook *SentryHook) Levels() []logrus.Level {
return hook.levels
}

View file

@ -1,20 +0,0 @@
# Syslog Hooks for Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>
## Usage
```go
import (
"log/syslog"
"github.com/Sirupsen/logrus"
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
)
func main() {
log := logrus.New()
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
if err == nil {
log.Hooks.Add(hook)
}
}
```

View file

@ -1,59 +0,0 @@
package logrus_syslog
import (
"fmt"
"github.com/Sirupsen/logrus"
"log/syslog"
"os"
)
// SyslogHook to send logs via syslog.
type SyslogHook struct {
Writer *syslog.Writer
SyslogNetwork string
SyslogRaddr string
}
// Creates a hook to be added to an instance of logger. This is called with
// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")`
// `if err == nil { log.Hooks.Add(hook) }`
func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) {
w, err := syslog.Dial(network, raddr, priority, tag)
return &SyslogHook{w, network, raddr}, err
}
func (hook *SyslogHook) Fire(entry *logrus.Entry) error {
line, err := entry.String()
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err)
return err
}
switch entry.Level {
case logrus.PanicLevel:
return hook.Writer.Crit(line)
case logrus.FatalLevel:
return hook.Writer.Crit(line)
case logrus.ErrorLevel:
return hook.Writer.Err(line)
case logrus.WarnLevel:
return hook.Writer.Warning(line)
case logrus.InfoLevel:
return hook.Writer.Info(line)
case logrus.DebugLevel:
return hook.Writer.Debug(line)
default:
return nil
}
}
func (hook *SyslogHook) Levels() []logrus.Level {
return []logrus.Level{
logrus.PanicLevel,
logrus.FatalLevel,
logrus.ErrorLevel,
logrus.WarnLevel,
logrus.InfoLevel,
logrus.DebugLevel,
}
}

View file

@ -1,40 +0,0 @@
package logrus
import (
"encoding/json"
"fmt"
)
type JSONFormatter struct {
// TimestampFormat sets the format used for marshaling timestamps.
TimestampFormat string
}
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
data := make(Fields, len(entry.Data)+3)
for k, v := range entry.Data {
switch v := v.(type) {
case error:
// Otherwise errors are ignored by `encoding/json`
// https://github.com/Sirupsen/logrus/issues/137
data[k] = v.Error()
default:
data[k] = v
}
}
prefixFieldClashes(data)
if f.TimestampFormat == "" {
f.TimestampFormat = DefaultTimestampFormat
}
data["time"] = entry.Time.Format(f.TimestampFormat)
data["msg"] = entry.Message
data["level"] = entry.Level.String()
serialized, err := json.Marshal(data)
if err != nil {
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
}
return append(serialized, '\n'), nil
}

View file

@ -1,203 +0,0 @@
package logrus
import (
"io"
"os"
"sync"
)
type Logger struct {
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
// file, or leave it default which is `os.Stdout`. You can also set this to
// something more adventorous, such as logging to Kafka.
Out io.Writer
// Hooks for the logger instance. These allow firing events based on logging
// levels and log entries. For example, to send errors to an error tracking
// service, log to StatsD or dump the core on fatal errors.
Hooks levelHooks
// All log entries pass through the formatter before logged to Out. The
// included formatters are `TextFormatter` and `JSONFormatter` for which
// TextFormatter is the default. In development (when a TTY is attached) it
// logs with colors, but to a file it wouldn't. You can easily implement your
// own that implements the `Formatter` interface, see the `README` or included
// formatters for examples.
Formatter Formatter
// The logging level the logger should log at. This is typically (and defaults
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
// logged. `logrus.Debug` is useful in
Level Level
// Used to sync writing to the log.
mu sync.Mutex
}
// Creates a new logger. Configuration should be set by changing `Formatter`,
// `Out` and `Hooks` directly on the default logger instance. You can also just
// instantiate your own:
//
// var log = &Logger{
// Out: os.Stderr,
// Formatter: new(JSONFormatter),
// Hooks: make(levelHooks),
// Level: logrus.DebugLevel,
// }
//
// It's recommended to make this a global instance called `log`.
func New() *Logger {
return &Logger{
Out: os.Stdout,
Formatter: new(TextFormatter),
Hooks: make(levelHooks),
Level: InfoLevel,
}
}
// Adds a field to the log entry, note that you it doesn't log until you call
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
// Ff you want multiple fields, use `WithFields`.
func (logger *Logger) WithField(key string, value interface{}) *Entry {
return NewEntry(logger).WithField(key, value)
}
// Adds a struct of fields to the log entry. All it does is call `WithField` for
// each `Field`.
func (logger *Logger) WithFields(fields Fields) *Entry {
return NewEntry(logger).WithFields(fields)
}
func (logger *Logger) Debugf(format string, args ...interface{}) {
if logger.Level >= DebugLevel {
NewEntry(logger).Debugf(format, args...)
}
}
func (logger *Logger) Infof(format string, args ...interface{}) {
if logger.Level >= InfoLevel {
NewEntry(logger).Infof(format, args...)
}
}
func (logger *Logger) Printf(format string, args ...interface{}) {
NewEntry(logger).Printf(format, args...)
}
func (logger *Logger) Warnf(format string, args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warnf(format, args...)
}
}
func (logger *Logger) Warningf(format string, args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warnf(format, args...)
}
}
func (logger *Logger) Errorf(format string, args ...interface{}) {
if logger.Level >= ErrorLevel {
NewEntry(logger).Errorf(format, args...)
}
}
func (logger *Logger) Fatalf(format string, args ...interface{}) {
if logger.Level >= FatalLevel {
NewEntry(logger).Fatalf(format, args...)
}
}
func (logger *Logger) Panicf(format string, args ...interface{}) {
if logger.Level >= PanicLevel {
NewEntry(logger).Panicf(format, args...)
}
}
func (logger *Logger) Debug(args ...interface{}) {
if logger.Level >= DebugLevel {
NewEntry(logger).Debug(args...)
}
}
func (logger *Logger) Info(args ...interface{}) {
if logger.Level >= InfoLevel {
NewEntry(logger).Info(args...)
}
}
func (logger *Logger) Print(args ...interface{}) {
NewEntry(logger).Info(args...)
}
func (logger *Logger) Warn(args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warn(args...)
}
}
func (logger *Logger) Warning(args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warn(args...)
}
}
func (logger *Logger) Error(args ...interface{}) {
if logger.Level >= ErrorLevel {
NewEntry(logger).Error(args...)
}
}
func (logger *Logger) Fatal(args ...interface{}) {
if logger.Level >= FatalLevel {
NewEntry(logger).Fatal(args...)
}
}
func (logger *Logger) Panic(args ...interface{}) {
if logger.Level >= PanicLevel {
NewEntry(logger).Panic(args...)
}
}
func (logger *Logger) Debugln(args ...interface{}) {
if logger.Level >= DebugLevel {
NewEntry(logger).Debugln(args...)
}
}
func (logger *Logger) Infoln(args ...interface{}) {
if logger.Level >= InfoLevel {
NewEntry(logger).Infoln(args...)
}
}
func (logger *Logger) Println(args ...interface{}) {
NewEntry(logger).Println(args...)
}
func (logger *Logger) Warnln(args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warnln(args...)
}
}
func (logger *Logger) Warningln(args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warnln(args...)
}
}
func (logger *Logger) Errorln(args ...interface{}) {
if logger.Level >= ErrorLevel {
NewEntry(logger).Errorln(args...)
}
}
func (logger *Logger) Fatalln(args ...interface{}) {
if logger.Level >= FatalLevel {
NewEntry(logger).Fatalln(args...)
}
}
func (logger *Logger) Panicln(args ...interface{}) {
if logger.Level >= PanicLevel {
NewEntry(logger).Panicln(args...)
}
}

View file

@ -1,94 +0,0 @@
package logrus
import (
"fmt"
"log"
)
// Fields type, used to pass to `WithFields`.
type Fields map[string]interface{}
// Level type
type Level uint8
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
func (level Level) String() string {
switch level {
case DebugLevel:
return "debug"
case InfoLevel:
return "info"
case WarnLevel:
return "warning"
case ErrorLevel:
return "error"
case FatalLevel:
return "fatal"
case PanicLevel:
return "panic"
}
return "unknown"
}
// ParseLevel takes a string level and returns the Logrus log level constant.
func ParseLevel(lvl string) (Level, error) {
switch lvl {
case "panic":
return PanicLevel, nil
case "fatal":
return FatalLevel, nil
case "error":
return ErrorLevel, nil
case "warn", "warning":
return WarnLevel, nil
case "info":
return InfoLevel, nil
case "debug":
return DebugLevel, nil
}
var l Level
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
}
// These are the different logging levels. You can set the logging level to log
// on your instance of logger, obtained with `logrus.New()`.
const (
// PanicLevel level, highest level of severity. Logs and then calls panic with the
// message passed to Debug, Info, ...
PanicLevel Level = iota
// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
// logging level is set to Panic.
FatalLevel
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
// Commonly used for hooks to send errors to an error tracking service.
ErrorLevel
// WarnLevel level. Non-critical entries that deserve eyes.
WarnLevel
// InfoLevel level. General operational entries about what's going on inside the
// application.
InfoLevel
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
DebugLevel
)
// Won't compile if StdLogger can't be realized by a log.Logger
var _ StdLogger = &log.Logger{}
// StdLogger is what your logrus-enabled library should take, that way
// it'll accept a stdlib logger and a logrus logger. There's no standard
// interface, this is the closest we get, unfortunately.
type StdLogger interface {
Print(...interface{})
Printf(string, ...interface{})
Println(...interface{})
Fatal(...interface{})
Fatalf(string, ...interface{})
Fatalln(...interface{})
Panic(...interface{})
Panicf(string, ...interface{})
Panicln(...interface{})
}

View file

@ -1,12 +0,0 @@
// Based on ssh/terminal:
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package logrus
import "syscall"
const ioctlReadTermios = syscall.TIOCGETA
type Termios syscall.Termios

View file

@ -1,20 +0,0 @@
/*
Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin.
*/
package logrus
import (
"syscall"
)
const ioctlReadTermios = syscall.TIOCGETA
type Termios struct {
Iflag uint32
Oflag uint32
Cflag uint32
Lflag uint32
Cc [20]uint8
Ispeed uint32
Ospeed uint32
}

View file

@ -1,12 +0,0 @@
// Based on ssh/terminal:
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package logrus
import "syscall"
const ioctlReadTermios = syscall.TCGETS
type Termios syscall.Termios

View file

@ -1,21 +0,0 @@
// Based on ssh/terminal:
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux darwin freebsd openbsd
package logrus
import (
"syscall"
"unsafe"
)
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal() bool {
fd := syscall.Stdout
var termios Termios
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
return err == 0
}

View file

@ -1,7 +0,0 @@
package logrus
import "syscall"
const ioctlReadTermios = syscall.TIOCGETA
type Termios syscall.Termios

View file

@ -1,27 +0,0 @@
// Based on ssh/terminal:
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build windows
package logrus
import (
"syscall"
"unsafe"
)
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
var (
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
)
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal() bool {
fd := syscall.Stdout
var st uint32
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
return r != 0 && e == 0
}

View file

@ -1,149 +0,0 @@
package logrus
import (
"bytes"
"fmt"
"sort"
"strings"
"time"
)
const (
nocolor = 0
red = 31
green = 32
yellow = 33
blue = 34
gray = 37
)
var (
baseTimestamp time.Time
isTerminal bool
)
func init() {
baseTimestamp = time.Now()
isTerminal = IsTerminal()
}
func miniTS() int {
return int(time.Since(baseTimestamp) / time.Second)
}
type TextFormatter struct {
// Set to true to bypass checking for a TTY before outputting colors.
ForceColors bool
// Force disabling colors.
DisableColors bool
// Disable timestamp logging. useful when output is redirected to logging
// system that already adds timestamps.
DisableTimestamp bool
// Enable logging the full timestamp when a TTY is attached instead of just
// the time passed since beginning of execution.
FullTimestamp bool
// TimestampFormat to use for display when a full timestamp is printed
TimestampFormat string
// The fields are sorted by default for a consistent output. For applications
// that log extremely frequently and don't use the JSON formatter this may not
// be desired.
DisableSorting bool
}
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
var keys []string = make([]string, 0, len(entry.Data))
for k := range entry.Data {
keys = append(keys, k)
}
if !f.DisableSorting {
sort.Strings(keys)
}
b := &bytes.Buffer{}
prefixFieldClashes(entry.Data)
isColored := (f.ForceColors || isTerminal) && !f.DisableColors
if f.TimestampFormat == "" {
f.TimestampFormat = DefaultTimestampFormat
}
if isColored {
f.printColored(b, entry, keys)
} else {
if !f.DisableTimestamp {
f.appendKeyValue(b, "time", entry.Time.Format(f.TimestampFormat))
}
f.appendKeyValue(b, "level", entry.Level.String())
f.appendKeyValue(b, "msg", entry.Message)
for _, key := range keys {
f.appendKeyValue(b, key, entry.Data[key])
}
}
b.WriteByte('\n')
return b.Bytes(), nil
}
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string) {
var levelColor int
switch entry.Level {
case DebugLevel:
levelColor = gray
case WarnLevel:
levelColor = yellow
case ErrorLevel, FatalLevel, PanicLevel:
levelColor = red
default:
levelColor = blue
}
levelText := strings.ToUpper(entry.Level.String())[0:4]
if !f.FullTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
} else {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(f.TimestampFormat), entry.Message)
}
for _, k := range keys {
v := entry.Data[k]
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v)
}
}
func needsQuoting(text string) bool {
for _, ch := range text {
if !((ch >= 'a' && ch <= 'z') ||
(ch >= 'A' && ch <= 'Z') ||
(ch >= '0' && ch <= '9') ||
ch == '-' || ch == '.') {
return false
}
}
return true
}
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key, value interface{}) {
switch value.(type) {
case string:
if needsQuoting(value.(string)) {
fmt.Fprintf(b, "%v=%s ", key, value)
} else {
fmt.Fprintf(b, "%v=%q ", key, value)
}
case error:
if needsQuoting(value.(error).Error()) {
fmt.Fprintf(b, "%v=%s ", key, value)
} else {
fmt.Fprintf(b, "%v=%q ", key, value)
}
default:
fmt.Fprintf(b, "%v=%v ", key, value)
}
}

View file

@ -1,31 +0,0 @@
package logrus
import (
"bufio"
"io"
"runtime"
)
func (logger *Logger) Writer() *io.PipeWriter {
reader, writer := io.Pipe()
go logger.writerScanner(reader)
runtime.SetFinalizer(writer, writerFinalizer)
return writer
}
func (logger *Logger) writerScanner(reader *io.PipeReader) {
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
logger.Print(scanner.Text())
}
if err := scanner.Err(); err != nil {
logger.Errorf("Error while reading from Writer: %s", err)
}
reader.Close()
}
func writerFinalizer(writer *io.PipeWriter) {
writer.Close()
}

View file

@ -1,191 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and
distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright
owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities
that control, are controlled by, or are under common control with that entity.
For the purposes of this definition, "control" means (i) the power, direct or
indirect, to cause the direction or management of such entity, whether by
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising
permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including
but not limited to software source code, documentation source, and configuration
files.
"Object" form shall mean any form resulting from mechanical transformation or
translation of a Source form, including but not limited to compiled object code,
generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made
available under the License, as indicated by a copyright notice that is included
in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that
is based on (or derived from) the Work and for which the editorial revisions,
annotations, elaborations, or other modifications represent, as a whole, an
original work of authorship. For the purposes of this License, Derivative Works
shall not include works that remain separable from, or merely link (or bind by
name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version
of the Work and any modifications or additions to that Work or Derivative Works
thereof, that is intentionally submitted to Licensor for inclusion in the Work
by the copyright owner or by an individual or Legal Entity authorized to submit
on behalf of the copyright owner. For the purposes of this definition,
"submitted" means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems, and
issue tracking systems that are managed by, or on behalf of, the Licensor for
the purpose of discussing and improving the Work, but excluding communication
that is conspicuously marked or otherwise designated in writing by the copyright
owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
of whom a Contribution has been received by Licensor and subsequently
incorporated within the Work.
2. Grant of Copyright License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the Work and such
Derivative Works in Source or Object form.
3. Grant of Patent License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to make, have
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
such license applies only to those patent claims licensable by such Contributor
that are necessarily infringed by their Contribution(s) alone or by combination
of their Contribution(s) with the Work to which such Contribution(s) was
submitted. If You institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
Contribution incorporated within the Work constitutes direct or contributory
patent infringement, then any patent licenses granted to You under this License
for that Work shall terminate as of the date such litigation is filed.
4. Redistribution.
You may reproduce and distribute copies of the Work or Derivative Works thereof
in any medium, with or without modifications, and in Source or Object form,
provided that You meet the following conditions:
You must give any other recipients of the Work or Derivative Works a copy of
this License; and
You must cause any modified files to carry prominent notices stating that You
changed the files; and
You must retain, in the Source form of any Derivative Works that You distribute,
all copyright, patent, trademark, and attribution notices from the Source form
of the Work, excluding those notices that do not pertain to any part of the
Derivative Works; and
If the Work includes a "NOTICE" text file as part of its distribution, then any
Derivative Works that You distribute must include a readable copy of the
attribution notices contained within such NOTICE file, excluding those notices
that do not pertain to any part of the Derivative Works, in at least one of the
following places: within a NOTICE text file distributed as part of the
Derivative Works; within the Source form or documentation, if provided along
with the Derivative Works; or, within a display generated by the Derivative
Works, if and wherever such third-party notices normally appear. The contents of
the NOTICE file are for informational purposes only and do not modify the
License. You may add Your own attribution notices within Derivative Works that
You distribute, alongside or as an addendum to the NOTICE text from the Work,
provided that such additional attribution notices cannot be construed as
modifying the License.
You may add Your own copyright statement to Your modifications and may provide
additional or different license terms and conditions for use, reproduction, or
distribution of Your modifications, or for any such Derivative Works as a whole,
provided Your use, reproduction, and distribution of the Work otherwise complies
with the conditions stated in this License.
5. Submission of Contributions.
Unless You explicitly state otherwise, any Contribution intentionally submitted
for inclusion in the Work by You to the Licensor shall be under the terms and
conditions of this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify the terms of
any separate license agreement you may have executed with Licensor regarding
such Contributions.
6. Trademarks.
This License does not grant permission to use the trade names, trademarks,
service marks, or product names of the Licensor, except as required for
reasonable and customary use in describing the origin of the Work and
reproducing the content of the NOTICE file.
7. Disclaimer of Warranty.
Unless required by applicable law or agreed to in writing, Licensor provides the
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including, without limitation, any warranties or conditions of TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
solely responsible for determining the appropriateness of using or
redistributing the Work and assume any risks associated with Your exercise of
permissions under this License.
8. Limitation of Liability.
In no event and under no legal theory, whether in tort (including negligence),
contract, or otherwise, unless required by applicable law (such as deliberate
and grossly negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special, incidental,
or consequential damages of any character arising as a result of this License or
out of the use or inability to use the Work (including but not limited to
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
any and all other commercial damages or losses), even if such Contributor has
been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability.
While redistributing the Work or Derivative Works thereof, You may choose to
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
other liability obligations and/or rights consistent with this License. However,
in accepting such obligations, You may act only on Your own behalf and on Your
sole responsibility, not on behalf of any other Contributor, and only if You
agree to indemnify, defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason of your
accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work
To apply the Apache License to your work, attach the following boilerplate
notice, with the fields enclosed by brackets "[]" replaced with your own
identifying information. (Don't include the brackets!) The text should be
enclosed in the appropriate comment syntax for the file format. We also
recommend that a file or class name and description of purpose be included on
the same "printed page" as the copyright notice for easier identification within
third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,52 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package activation implements primitives for systemd socket activation.
package activation
import (
"os"
"strconv"
"syscall"
)
// based on: https://gist.github.com/alberts/4640792
const (
listenFdsStart = 3
)
func Files(unsetEnv bool) []*os.File {
if unsetEnv {
defer os.Unsetenv("LISTEN_PID")
defer os.Unsetenv("LISTEN_FDS")
}
pid, err := strconv.Atoi(os.Getenv("LISTEN_PID"))
if err != nil || pid != os.Getpid() {
return nil
}
nfds, err := strconv.Atoi(os.Getenv("LISTEN_FDS"))
if err != nil || nfds == 0 {
return nil
}
files := make([]*os.File, 0, nfds)
for fd := listenFdsStart; fd < listenFdsStart+nfds; fd++ {
syscall.CloseOnExec(fd)
files = append(files, os.NewFile(uintptr(fd), "LISTEN_FD_"+strconv.Itoa(fd)))
}
return files
}

Some files were not shown because too many files have changed in this diff Show more