Switch to github.com/golang/dep for vendoring
Signed-off-by: Mrunal Patel <mrunalp@gmail.com>
This commit is contained in:
parent
d6ab91be27
commit
8e5b17cf13
15431 changed files with 3971413 additions and 8881 deletions
120
vendor/github.com/containers/storage/hack/.vendor-helpers.sh
generated
vendored
Executable file
120
vendor/github.com/containers/storage/hack/.vendor-helpers.sh
generated
vendored
Executable file
|
@ -0,0 +1,120 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
PROJECT=github.com/containers/storage
|
||||
|
||||
# Downloads dependencies into vendor/ directory
|
||||
mkdir -p vendor
|
||||
|
||||
if ! go list github.com/containers/storage/storage &> /dev/null; then
|
||||
rm -rf .gopath
|
||||
mkdir -p .gopath/src/github.com/containers
|
||||
ln -sf ../../../.. .gopath/src/${PROJECT}
|
||||
export GOPATH="${PWD}/.gopath:${PWD}/vendor"
|
||||
fi
|
||||
export GOPATH="$GOPATH:${PWD}/vendor"
|
||||
|
||||
find='find'
|
||||
if [ "$(go env GOHOSTOS)" = 'windows' ]; then
|
||||
find='/usr/bin/find'
|
||||
fi
|
||||
|
||||
clone() {
|
||||
local vcs="$1"
|
||||
local pkg="$2"
|
||||
local rev="$3"
|
||||
local url="$4"
|
||||
|
||||
: ${url:=https://$pkg}
|
||||
local target="vendor/src/$pkg"
|
||||
|
||||
echo -n "$pkg @ $rev: "
|
||||
|
||||
if [ -d "$target" ]; then
|
||||
echo -n 'rm old, '
|
||||
rm -rf "$target"
|
||||
fi
|
||||
|
||||
echo -n 'clone, '
|
||||
case "$vcs" in
|
||||
git)
|
||||
git clone --quiet --no-checkout "$url" "$target"
|
||||
( cd "$target" && git checkout --quiet "$rev" && git reset --quiet --hard "$rev" )
|
||||
;;
|
||||
hg)
|
||||
hg clone --quiet --updaterev "$rev" "$url" "$target"
|
||||
;;
|
||||
esac
|
||||
|
||||
echo -n 'rm VCS, '
|
||||
( cd "$target" && rm -rf .{git,hg} )
|
||||
|
||||
echo -n 'rm vendor, '
|
||||
( cd "$target" && rm -rf vendor Godeps/_workspace )
|
||||
|
||||
echo done
|
||||
}
|
||||
|
||||
clean() {
|
||||
local packages=(
|
||||
"${PROJECT}/cmd/oci-storage"
|
||||
)
|
||||
local storagePlatforms=( ${STORAGE_OSARCH:="linux/amd64 linux/i386 linux/arm freebsd/amd64 freebsd/386 freebsd/arm windows/amd64"} )
|
||||
|
||||
local buildTagCombos=(
|
||||
''
|
||||
'experimental'
|
||||
)
|
||||
|
||||
echo
|
||||
|
||||
echo -n 'collecting import graph, '
|
||||
local IFS=$'\n'
|
||||
local imports=( $(
|
||||
for platform in "${storagePlatforms[@]}"; do
|
||||
export GOOS="${platform%/*}";
|
||||
export GOARCH="${platform##*/}";
|
||||
for buildTags in "${buildTagCombos[@]}"; do
|
||||
go list -e -tags "$buildTags" -f '{{join .Deps "\n"}}' "${packages[@]}"
|
||||
go list -e -tags "$buildTags" -f '{{join .TestImports "\n"}}' "${packages[@]}"
|
||||
done
|
||||
done | grep -vE "^${PROJECT}/" | sort -u
|
||||
) )
|
||||
imports=( $(go list -e -f '{{if not .Standard}}{{.ImportPath}}{{end}}' "${imports[@]}") )
|
||||
unset IFS
|
||||
|
||||
echo -n 'pruning unused packages, '
|
||||
findArgs=-false
|
||||
|
||||
for import in "${imports[@]}"; do
|
||||
[ "${#findArgs[@]}" -eq 0 ] || findArgs+=( -or )
|
||||
findArgs+=( -path "vendor/src/$import" )
|
||||
done
|
||||
|
||||
local IFS=$'\n'
|
||||
local prune=( $($find vendor -depth -type d -not '(' "${findArgs[@]}" ')') )
|
||||
unset IFS
|
||||
for dir in "${prune[@]}"; do
|
||||
$find "$dir" -maxdepth 1 -not -type d -not -name 'LICENSE*' -not -name 'COPYING*' -exec rm -v -f '{}' ';'
|
||||
rmdir "$dir" 2>/dev/null || true
|
||||
done
|
||||
|
||||
echo -n 'pruning unused files, '
|
||||
$find vendor -type f -name '*_test.go' -exec rm -v '{}' ';'
|
||||
$find vendor -type f -name 'Vagrantfile' -exec rm -v '{}' ';'
|
||||
|
||||
# These are the files that are left over after fix_rewritten_imports is run.
|
||||
echo -n 'pruning .orig files, '
|
||||
$find vendor -type f -name '*.orig' -exec rm -v '{}' ';'
|
||||
|
||||
echo done
|
||||
}
|
||||
|
||||
# Fix up hard-coded imports that refer to Godeps paths so they'll work with our vendoring
|
||||
fix_rewritten_imports () {
|
||||
local pkg="$1"
|
||||
local remove="${pkg}/Godeps/_workspace/src/"
|
||||
local target="vendor/src/$pkg"
|
||||
|
||||
echo "$pkg: fixing rewritten imports"
|
||||
$find "$target" -name \*.go -exec sed -i'.orig' -e "s|\"${remove}|\"|g" {} \;
|
||||
}
|
35
vendor/github.com/containers/storage/hack/Jenkins/W2L/postbuild.sh
generated
vendored
Normal file
35
vendor/github.com/containers/storage/hack/Jenkins/W2L/postbuild.sh
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
set +x
|
||||
set +e
|
||||
|
||||
echo ""
|
||||
echo ""
|
||||
echo "---"
|
||||
echo "Now starting POST-BUILD steps"
|
||||
echo "---"
|
||||
echo ""
|
||||
|
||||
echo INFO: Pointing to $DOCKER_HOST
|
||||
|
||||
if [ ! $(docker ps -aq | wc -l) -eq 0 ]; then
|
||||
echo INFO: Removing containers...
|
||||
! docker rm -vf $(docker ps -aq)
|
||||
fi
|
||||
|
||||
# Remove all images which don't have docker or debian in the name
|
||||
if [ ! $(docker images | sed -n '1!p' | grep -v 'docker' | grep -v 'debian' | awk '{ print $3 }' | wc -l) -eq 0 ]; then
|
||||
echo INFO: Removing images...
|
||||
! docker rmi -f $(docker images | sed -n '1!p' | grep -v 'docker' | grep -v 'debian' | awk '{ print $3 }')
|
||||
fi
|
||||
|
||||
# Kill off any instances of git, go and docker, just in case
|
||||
! taskkill -F -IM git.exe -T >& /dev/null
|
||||
! taskkill -F -IM go.exe -T >& /dev/null
|
||||
! taskkill -F -IM docker.exe -T >& /dev/null
|
||||
|
||||
# Remove everything
|
||||
! cd /c/jenkins/gopath/src/github.com/docker/docker
|
||||
! rm -rfd * >& /dev/null
|
||||
! rm -rfd .* >& /dev/null
|
||||
|
||||
echo INFO: Cleanup complete
|
||||
exit 0
|
309
vendor/github.com/containers/storage/hack/Jenkins/W2L/setup.sh
generated
vendored
Normal file
309
vendor/github.com/containers/storage/hack/Jenkins/W2L/setup.sh
generated
vendored
Normal file
|
@ -0,0 +1,309 @@
|
|||
# Jenkins CI script for Windows to Linux CI.
|
||||
# Heavily modified by John Howard (@jhowardmsft) December 2015 to try to make it more reliable.
|
||||
set +xe
|
||||
SCRIPT_VER="Wed Apr 20 18:30:19 UTC 2016"
|
||||
|
||||
# TODO to make (even) more resilient:
|
||||
# - Wait for daemon to be running before executing docker commands
|
||||
# - Check if jq is installed
|
||||
# - Make sure bash is v4.3 or later. Can't do until all Azure nodes on the latest version
|
||||
# - Make sure we are not running as local system. Can't do until all Azure nodes are updated.
|
||||
# - Error if docker versions are not equal. Can't do until all Azure nodes are updated
|
||||
# - Error if go versions are not equal. Can't do until all Azure nodes are updated.
|
||||
# - Error if running 32-bit posix tools. Probably can take from bash --version and check contains "x86_64"
|
||||
# - Warn if the CI directory cannot be deleted afterwards. Otherwise turdlets are left behind
|
||||
# - Use %systemdrive% ($SYSTEMDRIVE) rather than hard code to c: for TEMP
|
||||
# - Consider cross builing the Windows binary and copy across. That's a bit of a heavy lift. Only reason
|
||||
# for doing that is that it mirrors the actual release process for docker.exe which is cross-built.
|
||||
# However, should absolutely not be a problem if built natively, so nit-picking.
|
||||
# - Tidy up of images and containers. Either here, or in the teardown script.
|
||||
|
||||
ec=0
|
||||
uniques=1
|
||||
echo INFO: Started at `date`. Script version $SCRIPT_VER
|
||||
|
||||
|
||||
# !README!
|
||||
# There are two daemons running on the remote Linux host:
|
||||
# - outer: specified by DOCKER_HOST, this is the daemon that will build and run the inner docker daemon
|
||||
# from the sources matching the PR.
|
||||
# - inner: runs on the host network, on a port number similar to that of DOCKER_HOST but the last two digits are inverted
|
||||
# (2357 if DOCKER_HOST had port 2375; and 2367 if DOCKER_HOST had port 2376).
|
||||
# The windows integration tests are run against this inner daemon.
|
||||
|
||||
# get the ip, inner and outer ports.
|
||||
ip="${DOCKER_HOST#*://}"
|
||||
port_outer="${ip#*:}"
|
||||
# inner port is like outer port with last two digits inverted.
|
||||
port_inner=$(echo "$port_outer" | sed -E 's/(.)(.)$/\2\1/')
|
||||
ip="${ip%%:*}"
|
||||
|
||||
echo "INFO: IP=$ip PORT_OUTER=$port_outer PORT_INNER=$port_inner"
|
||||
|
||||
# If TLS is enabled
|
||||
if [ -n "$DOCKER_TLS_VERIFY" ]; then
|
||||
protocol=https
|
||||
if [ -z "$DOCKER_MACHINE_NAME" ]; then
|
||||
ec=1
|
||||
echo "ERROR: DOCKER_MACHINE_NAME is undefined"
|
||||
fi
|
||||
certs=$(echo ~/.docker/machine/machines/$DOCKER_MACHINE_NAME)
|
||||
curlopts="--cacert $certs/ca.pem --cert $certs/cert.pem --key $certs/key.pem"
|
||||
run_extra_args="-v tlscerts:/etc/docker"
|
||||
daemon_extra_args="--tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem"
|
||||
else
|
||||
protocol=http
|
||||
fi
|
||||
|
||||
# Save for use by make.sh and scripts it invokes
|
||||
export MAIN_DOCKER_HOST="tcp://$ip:$port_inner"
|
||||
|
||||
# Verify we can get the remote node to respond to _ping
|
||||
if [ $ec -eq 0 ]; then
|
||||
reply=`curl -s $curlopts $protocol://$ip:$port_outer/_ping`
|
||||
if [ "$reply" != "OK" ]; then
|
||||
ec=1
|
||||
echo "ERROR: Failed to get an 'OK' response from the docker daemon on the Linux node"
|
||||
echo " at $ip:$port_outer when called with an http request for '_ping'. This implies that"
|
||||
echo " either the daemon has crashed/is not running, or the Linux node is unavailable."
|
||||
echo
|
||||
echo " A regular ping to the remote Linux node is below. It should reply. If not, the"
|
||||
echo " machine cannot be reached at all and may have crashed. If it does reply, it is"
|
||||
echo " likely a case of the Linux daemon not running or having crashed, which requires"
|
||||
echo " further investigation."
|
||||
echo
|
||||
echo " Try re-running this CI job, or ask on #docker-dev or #docker-maintainers"
|
||||
echo " for someone to perform further diagnostics, or take this node out of rotation."
|
||||
echo
|
||||
ping $ip
|
||||
else
|
||||
echo "INFO: The Linux nodes outer daemon replied to a ping. Good!"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Get the version from the remote node. Note this may fail if jq is not installed.
|
||||
# That's probably worth checking to make sure, just in case.
|
||||
if [ $ec -eq 0 ]; then
|
||||
remoteVersion=`curl -s $curlopts $protocol://$ip:$port_outer/version | jq -c '.Version'`
|
||||
echo "INFO: Remote daemon is running docker version $remoteVersion"
|
||||
fi
|
||||
|
||||
# Compare versions. We should really fail if result is no 1. Output at end of script.
|
||||
if [ $ec -eq 0 ]; then
|
||||
uniques=`docker version | grep Version | /usr/bin/sort -u | wc -l`
|
||||
fi
|
||||
|
||||
# Make sure we are in repo
|
||||
if [ $ec -eq 0 ]; then
|
||||
if [ ! -d hack ]; then
|
||||
echo "ERROR: Are you sure this is being launched from a the root of docker repository?"
|
||||
echo " If this is a Windows CI machine, it should be c:\jenkins\gopath\src\github.com\docker\docker."
|
||||
echo " Current directory is `pwd`"
|
||||
ec=1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Are we in split binary mode?
|
||||
if [ `grep DOCKER_CLIENTONLY Makefile | wc -l` -gt 0 ]; then
|
||||
splitBinary=0
|
||||
echo "INFO: Running in single binary mode"
|
||||
else
|
||||
splitBinary=1
|
||||
echo "INFO: Running in split binary mode"
|
||||
fi
|
||||
|
||||
|
||||
# Get the commit has and verify we have something
|
||||
if [ $ec -eq 0 ]; then
|
||||
export COMMITHASH=$(git rev-parse --short HEAD)
|
||||
echo INFO: Commmit hash is $COMMITHASH
|
||||
if [ -z $COMMITHASH ]; then
|
||||
echo "ERROR: Failed to get commit hash. Are you sure this is a docker repository?"
|
||||
ec=1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Redirect to a temporary location. Check is here for local runs from Jenkins machines just in case not
|
||||
# in the right directory where the repo is cloned. We also redirect TEMP to not use the environment
|
||||
# TEMP as when running as a standard user (not local system), it otherwise exposes a bug in posix tar which
|
||||
# will cause CI to fail from Windows to Linux. Obviously it's not best practice to ever run as local system...
|
||||
if [ $ec -eq 0 ]; then
|
||||
export TEMP=/c/CI/CI-$COMMITHASH
|
||||
export TMP=$TEMP
|
||||
/usr/bin/mkdir -p $TEMP # Make sure Linux mkdir for -p
|
||||
fi
|
||||
|
||||
# Tidy up time
|
||||
if [ $ec -eq 0 ]; then
|
||||
echo INFO: Deleting pre-existing containers and images...
|
||||
|
||||
# Force remove all containers based on a previously built image with this commit
|
||||
! docker rm -f $(docker ps -aq --filter "ancestor=docker:$COMMITHASH") &>/dev/null
|
||||
|
||||
# Force remove any container with this commithash as a name
|
||||
! docker rm -f $(docker ps -aq --filter "name=docker-$COMMITHASH") &>/dev/null
|
||||
|
||||
# This SHOULD never happen, but just in case, also blow away any containers
|
||||
# that might be around.
|
||||
! if [ ! $(docker ps -aq | wc -l) -eq 0 ]; then
|
||||
echo WARN: There were some leftover containers. Cleaning them up.
|
||||
! docker rm -f $(docker ps -aq)
|
||||
fi
|
||||
|
||||
# Force remove the image if it exists
|
||||
! docker rmi -f "docker-$COMMITHASH" &>/dev/null
|
||||
fi
|
||||
|
||||
# Provide the docker version for debugging purposes. If these fail, game over.
|
||||
# as the Linux box isn't responding for some reason.
|
||||
if [ $ec -eq 0 ]; then
|
||||
echo INFO: Docker version and info of the outer daemon on the Linux node
|
||||
echo
|
||||
docker version
|
||||
ec=$?
|
||||
if [ 0 -ne $ec ]; then
|
||||
echo "ERROR: The main linux daemon does not appear to be running. Has the Linux node crashed?"
|
||||
fi
|
||||
echo
|
||||
fi
|
||||
|
||||
# Same as above, but docker info
|
||||
if [ $ec -eq 0 ]; then
|
||||
echo
|
||||
docker info
|
||||
ec=$?
|
||||
if [ 0 -ne $ec ]; then
|
||||
echo "ERROR: The main linux daemon does not appear to be running. Has the Linux node crashed?"
|
||||
fi
|
||||
echo
|
||||
fi
|
||||
|
||||
# build the daemon image
|
||||
if [ $ec -eq 0 ]; then
|
||||
echo "INFO: Running docker build on Linux host at $DOCKER_HOST"
|
||||
if [ $splitBinary -eq 0 ]; then
|
||||
set -x
|
||||
docker build --rm --force-rm --build-arg APT_MIRROR=cdn-fastly.deb.debian.org -t "docker:$COMMITHASH" .
|
||||
cat <<EOF | docker build --rm --force-rm -t "docker:$COMMITHASH" -
|
||||
FROM docker:$COMMITHASH
|
||||
RUN hack/make.sh binary
|
||||
RUN cp bundles/latest/binary/docker /bin/docker
|
||||
CMD docker daemon -D -H tcp://0.0.0.0:$port_inner $daemon_extra_args
|
||||
EOF
|
||||
else
|
||||
set -x
|
||||
docker build --rm --force-rm --build-arg APT_MIRROR=cdn-fastly.deb.debian.org -t "docker:$COMMITHASH" .
|
||||
cat <<EOF | docker build --rm --force-rm -t "docker:$COMMITHASH" -
|
||||
FROM docker:$COMMITHASH
|
||||
RUN hack/make.sh binary
|
||||
RUN cp bundles/latest/binary-daemon/dockerd /bin/dockerd
|
||||
CMD dockerd -D -H tcp://0.0.0.0:$port_inner $daemon_extra_args
|
||||
EOF
|
||||
|
||||
fi
|
||||
ec=$?
|
||||
set +x
|
||||
if [ 0 -ne $ec ]; then
|
||||
echo "ERROR: docker build failed"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Start the docker-in-docker daemon from the image we just built
|
||||
if [ $ec -eq 0 ]; then
|
||||
echo "INFO: Starting build of a Linux daemon to test against, and starting it..."
|
||||
set -x
|
||||
# aufs in aufs is faster than vfs in aufs
|
||||
docker run -d $run_extra_args -e DOCKER_GRAPHDRIVER=aufs --pid host --privileged --name "docker-$COMMITHASH" --net host "docker:$COMMITHASH"
|
||||
ec=$?
|
||||
set +x
|
||||
if [ 0 -ne $ec ]; then
|
||||
echo "ERROR: Failed to compile and start the linux daemon"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Build locally.
|
||||
if [ $ec -eq 0 ]; then
|
||||
echo "INFO: Starting local build of Windows binary..."
|
||||
set -x
|
||||
export TIMEOUT="120m"
|
||||
export DOCKER_HOST="tcp://$ip:$port_inner"
|
||||
# This can be removed
|
||||
export DOCKER_TEST_HOST="tcp://$ip:$port_inner"
|
||||
unset DOCKER_CLIENTONLY
|
||||
export DOCKER_REMOTE_DAEMON=1
|
||||
hack/make.sh binary
|
||||
ec=$?
|
||||
set +x
|
||||
if [ 0 -ne $ec ]; then
|
||||
echo "ERROR: Build of binary on Windows failed"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Make a local copy of the built binary and ensure that is first in our path
|
||||
if [ $ec -eq 0 ]; then
|
||||
VERSION=$(< ./VERSION)
|
||||
if [ $splitBinary -eq 0 ]; then
|
||||
cp bundles/$VERSION/binary/docker.exe $TEMP
|
||||
else
|
||||
cp bundles/$VERSION/binary-client/docker.exe $TEMP
|
||||
fi
|
||||
ec=$?
|
||||
if [ 0 -ne $ec ]; then
|
||||
echo "ERROR: Failed to copy built binary to $TEMP"
|
||||
fi
|
||||
export PATH=$TEMP:$PATH
|
||||
fi
|
||||
|
||||
# Run the integration tests
|
||||
if [ $ec -eq 0 ]; then
|
||||
echo "INFO: Running Integration tests..."
|
||||
set -x
|
||||
export DOCKER_TEST_TLS_VERIFY="$DOCKER_TLS_VERIFY"
|
||||
export DOCKER_TEST_CERT_PATH="$DOCKER_CERT_PATH"
|
||||
#export TESTFLAGS='-check.vv'
|
||||
hack/make.sh test-integration-cli
|
||||
ec=$?
|
||||
set +x
|
||||
if [ 0 -ne $ec ]; then
|
||||
echo "ERROR: CLI test failed."
|
||||
# Next line is useful, but very long winded if included
|
||||
docker -H=$MAIN_DOCKER_HOST logs --tail 100 "docker-$COMMITHASH"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Tidy up any temporary files from the CI run
|
||||
if [ ! -z $COMMITHASH ]; then
|
||||
rm -rf $TEMP
|
||||
fi
|
||||
|
||||
# CI Integrity check - ensure we are using the same version of go as present in the Dockerfile
|
||||
GOVER_DOCKERFILE=`grep 'ENV GO_VERSION' Dockerfile | awk '{print $3}'`
|
||||
GOVER_INSTALLED=`go version | awk '{print $3}'`
|
||||
if [ "${GOVER_INSTALLED:2}" != "$GOVER_DOCKERFILE" ]; then
|
||||
#ec=1 # Uncomment to make CI fail once all nodes are updated.
|
||||
echo
|
||||
echo "---------------------------------------------------------------------------"
|
||||
echo "WARN: CI should be using go version $GOVER_DOCKERFILE, but is using ${GOVER_INSTALLED:2}"
|
||||
echo " Please ping #docker-maintainers on IRC to get this CI server updated."
|
||||
echo "---------------------------------------------------------------------------"
|
||||
echo
|
||||
fi
|
||||
|
||||
# Check the Linux box is running a matching version of docker
|
||||
if [ "$uniques" -ne 1 ]; then
|
||||
ec=0 # Uncomment to make CI fail once all nodes are updated.
|
||||
echo
|
||||
echo "---------------------------------------------------------------------------"
|
||||
echo "ERROR: This CI node is not running the same version of docker as the daemon."
|
||||
echo " This is a CI configuration issue."
|
||||
echo "---------------------------------------------------------------------------"
|
||||
echo
|
||||
fi
|
||||
|
||||
# Tell the user how we did.
|
||||
if [ $ec -eq 0 ]; then
|
||||
echo INFO: Completed successfully at `date`.
|
||||
else
|
||||
echo ERROR: Failed with exitcode $ec at `date`.
|
||||
fi
|
||||
exit $ec
|
33
vendor/github.com/containers/storage/hack/dind
generated
vendored
Executable file
33
vendor/github.com/containers/storage/hack/dind
generated
vendored
Executable file
|
@ -0,0 +1,33 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# DinD: a wrapper script which allows docker to be run inside a docker container.
|
||||
# Original version by Jerome Petazzoni <jerome@docker.com>
|
||||
# See the blog post: https://blog.docker.com/2013/09/docker-can-now-run-within-docker/
|
||||
#
|
||||
# This script should be executed inside a docker container in privileged mode
|
||||
# ('docker run --privileged', introduced in docker 0.6).
|
||||
|
||||
# Usage: dind CMD [ARG...]
|
||||
|
||||
# apparmor sucks and Docker needs to know that it's in a container (c) @tianon
|
||||
export container=docker
|
||||
|
||||
if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then
|
||||
mount -t securityfs none /sys/kernel/security || {
|
||||
echo >&2 'Could not mount /sys/kernel/security.'
|
||||
echo >&2 'AppArmor detection and --privileged mode might break.'
|
||||
}
|
||||
fi
|
||||
|
||||
# Mount /tmp (conditionally)
|
||||
if ! mountpoint -q /tmp; then
|
||||
mount -t tmpfs none /tmp
|
||||
fi
|
||||
|
||||
if [ $# -gt 0 ]; then
|
||||
exec "$@"
|
||||
fi
|
||||
|
||||
echo >&2 'ERROR: No command specified.'
|
||||
echo >&2 'You probably want to run hack/make.sh, or maybe a shell?'
|
15
vendor/github.com/containers/storage/hack/generate-authors.sh
generated
vendored
Executable file
15
vendor/github.com/containers/storage/hack/generate-authors.sh
generated
vendored
Executable file
|
@ -0,0 +1,15 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/.."
|
||||
|
||||
# see also ".mailmap" for how email addresses and names are deduplicated
|
||||
|
||||
{
|
||||
cat <<-'EOH'
|
||||
# This file lists all individuals having contributed content to the repository.
|
||||
# For how it is generated, see `hack/generate-authors.sh`.
|
||||
EOH
|
||||
echo
|
||||
git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf
|
||||
} > AUTHORS
|
517
vendor/github.com/containers/storage/hack/install.sh
generated
vendored
Normal file
517
vendor/github.com/containers/storage/hack/install.sh
generated
vendored
Normal file
|
@ -0,0 +1,517 @@
|
|||
#!/bin/sh
|
||||
set -e
|
||||
#
|
||||
# This script is meant for quick & easy install via:
|
||||
# 'curl -sSL https://get.docker.com/ | sh'
|
||||
# or:
|
||||
# 'wget -qO- https://get.docker.com/ | sh'
|
||||
#
|
||||
# For test builds (ie. release candidates):
|
||||
# 'curl -fsSL https://test.docker.com/ | sh'
|
||||
# or:
|
||||
# 'wget -qO- https://test.docker.com/ | sh'
|
||||
#
|
||||
# For experimental builds:
|
||||
# 'curl -fsSL https://experimental.docker.com/ | sh'
|
||||
# or:
|
||||
# 'wget -qO- https://experimental.docker.com/ | sh'
|
||||
#
|
||||
# Docker Maintainers:
|
||||
# To update this script on https://get.docker.com,
|
||||
# use hack/release.sh during a normal release,
|
||||
# or the following one-liner for script hotfixes:
|
||||
# aws s3 cp --acl public-read hack/install.sh s3://get.docker.com/index
|
||||
#
|
||||
|
||||
url="https://get.docker.com/"
|
||||
apt_url="https://apt.dockerproject.org"
|
||||
yum_url="https://yum.dockerproject.org"
|
||||
gpg_fingerprint="58118E89F3A912897C070ADBF76221572C52609D"
|
||||
|
||||
key_servers="
|
||||
ha.pool.sks-keyservers.net
|
||||
pgp.mit.edu
|
||||
keyserver.ubuntu.com
|
||||
"
|
||||
|
||||
command_exists() {
|
||||
command -v "$@" > /dev/null 2>&1
|
||||
}
|
||||
|
||||
echo_docker_as_nonroot() {
|
||||
if command_exists docker && [ -e /var/run/docker.sock ]; then
|
||||
(
|
||||
set -x
|
||||
$sh_c 'docker version'
|
||||
) || true
|
||||
fi
|
||||
your_user=your-user
|
||||
[ "$user" != 'root' ] && your_user="$user"
|
||||
# intentionally mixed spaces and tabs here -- tabs are stripped by "<<-EOF", spaces are kept in the output
|
||||
cat <<-EOF
|
||||
|
||||
If you would like to use Docker as a non-root user, you should now consider
|
||||
adding your user to the "docker" group with something like:
|
||||
|
||||
sudo usermod -aG docker $your_user
|
||||
|
||||
Remember that you will have to log out and back in for this to take effect!
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
# Check if this is a forked Linux distro
|
||||
check_forked() {
|
||||
|
||||
# Check for lsb_release command existence, it usually exists in forked distros
|
||||
if command_exists lsb_release; then
|
||||
# Check if the `-u` option is supported
|
||||
set +e
|
||||
lsb_release -a -u > /dev/null 2>&1
|
||||
lsb_release_exit_code=$?
|
||||
set -e
|
||||
|
||||
# Check if the command has exited successfully, it means we're in a forked distro
|
||||
if [ "$lsb_release_exit_code" = "0" ]; then
|
||||
# Print info about current distro
|
||||
cat <<-EOF
|
||||
You're using '$lsb_dist' version '$dist_version'.
|
||||
EOF
|
||||
|
||||
# Get the upstream release info
|
||||
lsb_dist=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'id' | cut -d ':' -f 2 | tr -d '[[:space:]]')
|
||||
dist_version=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'codename' | cut -d ':' -f 2 | tr -d '[[:space:]]')
|
||||
|
||||
# Print info about upstream distro
|
||||
cat <<-EOF
|
||||
Upstream release is '$lsb_dist' version '$dist_version'.
|
||||
EOF
|
||||
else
|
||||
if [ -r /etc/debian_version ] && [ "$lsb_dist" != "ubuntu" ]; then
|
||||
# We're Debian and don't even know it!
|
||||
lsb_dist=debian
|
||||
dist_version="$(cat /etc/debian_version | sed 's/\/.*//' | sed 's/\..*//')"
|
||||
case "$dist_version" in
|
||||
8|'Kali Linux 2')
|
||||
dist_version="jessie"
|
||||
;;
|
||||
7)
|
||||
dist_version="wheezy"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
rpm_import_repository_key() {
|
||||
local key=$1; shift
|
||||
local tmpdir=$(mktemp -d)
|
||||
chmod 600 "$tmpdir"
|
||||
for key_server in $key_servers ; do
|
||||
gpg --homedir "$tmpdir" --keyserver "$key_server" --recv-keys "$key" && break
|
||||
done
|
||||
gpg --homedir "$tmpdir" -k "$key" >/dev/null
|
||||
gpg --homedir "$tmpdir" --export --armor "$key" > "$tmpdir"/repo.key
|
||||
rpm --import "$tmpdir"/repo.key
|
||||
rm -rf "$tmpdir"
|
||||
}
|
||||
|
||||
semverParse() {
|
||||
major="${1%%.*}"
|
||||
minor="${1#$major.}"
|
||||
minor="${minor%%.*}"
|
||||
patch="${1#$major.$minor.}"
|
||||
patch="${patch%%[-.]*}"
|
||||
}
|
||||
|
||||
do_install() {
|
||||
case "$(uname -m)" in
|
||||
*64)
|
||||
;;
|
||||
*)
|
||||
cat >&2 <<-'EOF'
|
||||
Error: you are not using a 64bit platform.
|
||||
Docker currently only supports 64bit platforms.
|
||||
EOF
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
if command_exists docker; then
|
||||
version="$(docker -v | awk -F '[ ,]+' '{ print $3 }')"
|
||||
MAJOR_W=1
|
||||
MINOR_W=10
|
||||
|
||||
semverParse $version
|
||||
|
||||
shouldWarn=0
|
||||
if [ $major -lt $MAJOR_W ]; then
|
||||
shouldWarn=1
|
||||
fi
|
||||
|
||||
if [ $major -le $MAJOR_W ] && [ $minor -lt $MINOR_W ]; then
|
||||
shouldWarn=1
|
||||
fi
|
||||
|
||||
cat >&2 <<-'EOF'
|
||||
Warning: the "docker" command appears to already exist on this system.
|
||||
|
||||
If you already have Docker installed, this script can cause trouble, which is
|
||||
why we're displaying this warning and provide the opportunity to cancel the
|
||||
installation.
|
||||
|
||||
If you installed the current Docker package using this script and are using it
|
||||
EOF
|
||||
|
||||
if [ $shouldWarn -eq 1 ]; then
|
||||
cat >&2 <<-'EOF'
|
||||
again to update Docker, we urge you to migrate your image store before upgrading
|
||||
to v1.10+.
|
||||
|
||||
You can find instructions for this here:
|
||||
https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration
|
||||
EOF
|
||||
else
|
||||
cat >&2 <<-'EOF'
|
||||
again to update Docker, you can safely ignore this message.
|
||||
EOF
|
||||
fi
|
||||
|
||||
cat >&2 <<-'EOF'
|
||||
|
||||
You may press Ctrl+C now to abort this script.
|
||||
EOF
|
||||
( set -x; sleep 20 )
|
||||
fi
|
||||
|
||||
user="$(id -un 2>/dev/null || true)"
|
||||
|
||||
sh_c='sh -c'
|
||||
if [ "$user" != 'root' ]; then
|
||||
if command_exists sudo; then
|
||||
sh_c='sudo -E sh -c'
|
||||
elif command_exists su; then
|
||||
sh_c='su -c'
|
||||
else
|
||||
cat >&2 <<-'EOF'
|
||||
Error: this installer needs the ability to run commands as root.
|
||||
We are unable to find either "sudo" or "su" available to make this happen.
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
curl=''
|
||||
if command_exists curl; then
|
||||
curl='curl -sSL'
|
||||
elif command_exists wget; then
|
||||
curl='wget -qO-'
|
||||
elif command_exists busybox && busybox --list-modules | grep -q wget; then
|
||||
curl='busybox wget -qO-'
|
||||
fi
|
||||
|
||||
# check to see which repo they are trying to install from
|
||||
if [ -z "$repo" ]; then
|
||||
repo='main'
|
||||
if [ "https://test.docker.com/" = "$url" ]; then
|
||||
repo='testing'
|
||||
elif [ "https://experimental.docker.com/" = "$url" ]; then
|
||||
repo='experimental'
|
||||
fi
|
||||
fi
|
||||
|
||||
# perform some very rudimentary platform detection
|
||||
lsb_dist=''
|
||||
dist_version=''
|
||||
if command_exists lsb_release; then
|
||||
lsb_dist="$(lsb_release -si)"
|
||||
fi
|
||||
if [ -z "$lsb_dist" ] && [ -r /etc/lsb-release ]; then
|
||||
lsb_dist="$(. /etc/lsb-release && echo "$DISTRIB_ID")"
|
||||
fi
|
||||
if [ -z "$lsb_dist" ] && [ -r /etc/debian_version ]; then
|
||||
lsb_dist='debian'
|
||||
fi
|
||||
if [ -z "$lsb_dist" ] && [ -r /etc/fedora-release ]; then
|
||||
lsb_dist='fedora'
|
||||
fi
|
||||
if [ -z "$lsb_dist" ] && [ -r /etc/oracle-release ]; then
|
||||
lsb_dist='oracleserver'
|
||||
fi
|
||||
if [ -z "$lsb_dist" ] && [ -r /etc/centos-release ]; then
|
||||
lsb_dist='centos'
|
||||
fi
|
||||
if [ -z "$lsb_dist" ] && [ -r /etc/redhat-release ]; then
|
||||
lsb_dist='redhat'
|
||||
fi
|
||||
if [ -z "$lsb_dist" ] && [ -r /etc/os-release ]; then
|
||||
lsb_dist="$(. /etc/os-release && echo "$ID")"
|
||||
fi
|
||||
|
||||
lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')"
|
||||
|
||||
# Special case redhatenterpriseserver
|
||||
if [ "${lsb_dist}" = "redhatenterpriseserver" ]; then
|
||||
# Set it to redhat, it will be changed to centos below anyways
|
||||
lsb_dist='redhat'
|
||||
fi
|
||||
|
||||
case "$lsb_dist" in
|
||||
|
||||
ubuntu)
|
||||
if command_exists lsb_release; then
|
||||
dist_version="$(lsb_release --codename | cut -f2)"
|
||||
fi
|
||||
if [ -z "$dist_version" ] && [ -r /etc/lsb-release ]; then
|
||||
dist_version="$(. /etc/lsb-release && echo "$DISTRIB_CODENAME")"
|
||||
fi
|
||||
;;
|
||||
|
||||
debian)
|
||||
dist_version="$(cat /etc/debian_version | sed 's/\/.*//' | sed 's/\..*//')"
|
||||
case "$dist_version" in
|
||||
8)
|
||||
dist_version="jessie"
|
||||
;;
|
||||
7)
|
||||
dist_version="wheezy"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
|
||||
oracleserver)
|
||||
# need to switch lsb_dist to match yum repo URL
|
||||
lsb_dist="oraclelinux"
|
||||
dist_version="$(rpm -q --whatprovides redhat-release --queryformat "%{VERSION}\n" | sed 's/\/.*//' | sed 's/\..*//' | sed 's/Server*//')"
|
||||
;;
|
||||
|
||||
fedora|centos|redhat)
|
||||
dist_version="$(rpm -q --whatprovides ${lsb_dist}-release --queryformat "%{VERSION}\n" | sed 's/\/.*//' | sed 's/\..*//' | sed 's/Server*//' | sort | tail -1)"
|
||||
;;
|
||||
|
||||
*)
|
||||
if command_exists lsb_release; then
|
||||
dist_version="$(lsb_release --codename | cut -f2)"
|
||||
fi
|
||||
if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then
|
||||
dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
|
||||
fi
|
||||
;;
|
||||
|
||||
|
||||
esac
|
||||
|
||||
# Check if this is a forked Linux distro
|
||||
check_forked
|
||||
|
||||
# Run setup for each distro accordingly
|
||||
case "$lsb_dist" in
|
||||
amzn)
|
||||
(
|
||||
set -x
|
||||
$sh_c 'sleep 3; yum -y -q install docker'
|
||||
)
|
||||
echo_docker_as_nonroot
|
||||
exit 0
|
||||
;;
|
||||
|
||||
'opensuse project'|opensuse)
|
||||
echo 'Going to perform the following operations:'
|
||||
if [ "$repo" != 'main' ]; then
|
||||
echo ' * add repository obs://Virtualization:containers'
|
||||
fi
|
||||
echo ' * install Docker'
|
||||
$sh_c 'echo "Press CTRL-C to abort"; sleep 3'
|
||||
|
||||
if [ "$repo" != 'main' ]; then
|
||||
# install experimental packages from OBS://Virtualization:containers
|
||||
(
|
||||
set -x
|
||||
zypper -n ar -f obs://Virtualization:containers Virtualization:containers
|
||||
rpm_import_repository_key 55A0B34D49501BB7CA474F5AA193FBB572174FC2
|
||||
)
|
||||
fi
|
||||
(
|
||||
set -x
|
||||
zypper -n install docker
|
||||
)
|
||||
echo_docker_as_nonroot
|
||||
exit 0
|
||||
;;
|
||||
'suse linux'|sle[sd])
|
||||
echo 'Going to perform the following operations:'
|
||||
if [ "$repo" != 'main' ]; then
|
||||
echo ' * add repository obs://Virtualization:containers'
|
||||
echo ' * install experimental Docker using packages NOT supported by SUSE'
|
||||
else
|
||||
echo ' * add the "Containers" module'
|
||||
echo ' * install Docker using packages supported by SUSE'
|
||||
fi
|
||||
$sh_c 'echo "Press CTRL-C to abort"; sleep 3'
|
||||
|
||||
if [ "$repo" != 'main' ]; then
|
||||
# install experimental packages from OBS://Virtualization:containers
|
||||
echo >&2 'Warning: installing experimental packages from OBS, these packages are NOT supported by SUSE'
|
||||
(
|
||||
set -x
|
||||
zypper -n ar -f obs://Virtualization:containers/SLE_12 Virtualization:containers
|
||||
rpm_import_repository_key 55A0B34D49501BB7CA474F5AA193FBB572174FC2
|
||||
)
|
||||
else
|
||||
# Add the containers module
|
||||
# Note well-1: the SLE machine must already be registered against SUSE Customer Center
|
||||
# Note well-2: the `-r ""` is required to workaround a known issue of SUSEConnect
|
||||
(
|
||||
set -x
|
||||
SUSEConnect -p sle-module-containers/12/x86_64 -r ""
|
||||
)
|
||||
fi
|
||||
(
|
||||
set -x
|
||||
zypper -n install docker
|
||||
)
|
||||
echo_docker_as_nonroot
|
||||
exit 0
|
||||
;;
|
||||
|
||||
ubuntu|debian)
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
did_apt_get_update=
|
||||
apt_get_update() {
|
||||
if [ -z "$did_apt_get_update" ]; then
|
||||
( set -x; $sh_c 'sleep 3; apt-get update' )
|
||||
did_apt_get_update=1
|
||||
fi
|
||||
}
|
||||
|
||||
# aufs is preferred over devicemapper; try to ensure the driver is available.
|
||||
if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then
|
||||
if uname -r | grep -q -- '-generic' && dpkg -l 'linux-image-*-generic' | grep -qE '^ii|^hi' 2>/dev/null; then
|
||||
kern_extras="linux-image-extra-$(uname -r) linux-image-extra-virtual"
|
||||
|
||||
apt_get_update
|
||||
( set -x; $sh_c 'sleep 3; apt-get install -y -q '"$kern_extras" ) || true
|
||||
|
||||
if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then
|
||||
echo >&2 'Warning: tried to install '"$kern_extras"' (for AUFS)'
|
||||
echo >&2 ' but we still have no AUFS. Docker may not work. Proceeding anyways!'
|
||||
( set -x; sleep 10 )
|
||||
fi
|
||||
else
|
||||
echo >&2 'Warning: current kernel is not supported by the linux-image-extra-virtual'
|
||||
echo >&2 ' package. We have no AUFS support. Consider installing the packages'
|
||||
echo >&2 ' linux-image-virtual kernel and linux-image-extra-virtual for AUFS support.'
|
||||
( set -x; sleep 10 )
|
||||
fi
|
||||
fi
|
||||
|
||||
# install apparmor utils if they're missing and apparmor is enabled in the kernel
|
||||
# otherwise Docker will fail to start
|
||||
if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then
|
||||
if command -v apparmor_parser >/dev/null 2>&1; then
|
||||
echo 'apparmor is enabled in the kernel and apparmor utils were already installed'
|
||||
else
|
||||
echo 'apparmor is enabled in the kernel, but apparmor_parser missing'
|
||||
apt_get_update
|
||||
( set -x; $sh_c 'sleep 3; apt-get install -y -q apparmor' )
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -e /usr/lib/apt/methods/https ]; then
|
||||
apt_get_update
|
||||
( set -x; $sh_c 'sleep 3; apt-get install -y -q apt-transport-https ca-certificates' )
|
||||
fi
|
||||
if [ -z "$curl" ]; then
|
||||
apt_get_update
|
||||
( set -x; $sh_c 'sleep 3; apt-get install -y -q curl ca-certificates' )
|
||||
curl='curl -sSL'
|
||||
fi
|
||||
(
|
||||
set -x
|
||||
for key_server in $key_servers ; do
|
||||
$sh_c "apt-key adv --keyserver hkp://${key_server}:80 --recv-keys ${gpg_fingerprint}" && break
|
||||
done
|
||||
$sh_c "apt-key adv -k ${gpg_fingerprint} >/dev/null"
|
||||
$sh_c "mkdir -p /etc/apt/sources.list.d"
|
||||
$sh_c "echo deb \[arch=$(dpkg --print-architecture)\] ${apt_url}/repo ${lsb_dist}-${dist_version} ${repo} > /etc/apt/sources.list.d/docker.list"
|
||||
$sh_c 'sleep 3; apt-get update; apt-get install -y -q docker-engine'
|
||||
)
|
||||
echo_docker_as_nonroot
|
||||
exit 0
|
||||
;;
|
||||
|
||||
fedora|centos|redhat|oraclelinux)
|
||||
if [ "${lsb_dist}" = "redhat" ]; then
|
||||
# we use the centos repository for both redhat and centos releases
|
||||
lsb_dist='centos'
|
||||
fi
|
||||
$sh_c "cat >/etc/yum.repos.d/docker-${repo}.repo" <<-EOF
|
||||
[docker-${repo}-repo]
|
||||
name=Docker ${repo} Repository
|
||||
baseurl=${yum_url}/repo/${repo}/${lsb_dist}/${dist_version}
|
||||
enabled=1
|
||||
gpgcheck=1
|
||||
gpgkey=${yum_url}/gpg
|
||||
EOF
|
||||
if [ "$lsb_dist" = "fedora" ] && [ "$dist_version" -ge "22" ]; then
|
||||
(
|
||||
set -x
|
||||
$sh_c 'sleep 3; dnf -y -q install docker-engine'
|
||||
)
|
||||
else
|
||||
(
|
||||
set -x
|
||||
$sh_c 'sleep 3; yum -y -q install docker-engine'
|
||||
)
|
||||
fi
|
||||
echo_docker_as_nonroot
|
||||
exit 0
|
||||
;;
|
||||
gentoo)
|
||||
if [ "$url" = "https://test.docker.com/" ]; then
|
||||
# intentionally mixed spaces and tabs here -- tabs are stripped by "<<-'EOF'", spaces are kept in the output
|
||||
cat >&2 <<-'EOF'
|
||||
|
||||
You appear to be trying to install the latest nightly build in Gentoo.'
|
||||
The portage tree should contain the latest stable release of Docker, but'
|
||||
if you want something more recent, you can always use the live ebuild'
|
||||
provided in the "docker" overlay available via layman. For more'
|
||||
instructions, please see the following URL:'
|
||||
|
||||
https://github.com/tianon/docker-overlay#using-this-overlay'
|
||||
|
||||
After adding the "docker" overlay, you should be able to:'
|
||||
|
||||
emerge -av =app-emulation/docker-9999'
|
||||
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
|
||||
(
|
||||
set -x
|
||||
$sh_c 'sleep 3; emerge app-emulation/docker'
|
||||
)
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
|
||||
# intentionally mixed spaces and tabs here -- tabs are stripped by "<<-'EOF'", spaces are kept in the output
|
||||
cat >&2 <<-'EOF'
|
||||
|
||||
Either your platform is not easily detectable, is not supported by this
|
||||
installer script (yet - PRs welcome! [hack/install.sh]), or does not yet have
|
||||
a package for Docker. Please visit the following URL for more detailed
|
||||
installation instructions:
|
||||
|
||||
https://docs.docker.com/engine/installation/
|
||||
|
||||
EOF
|
||||
exit 1
|
||||
}
|
||||
|
||||
# wrapped up in a function so that we have some protection against only getting
|
||||
# half the file during "curl | sh"
|
||||
do_install
|
262
vendor/github.com/containers/storage/hack/make.sh
generated
vendored
Executable file
262
vendor/github.com/containers/storage/hack/make.sh
generated
vendored
Executable file
|
@ -0,0 +1,262 @@
|
|||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# This script builds various binary artifacts from a checkout of the storage
|
||||
# source code.
|
||||
#
|
||||
# Requirements:
|
||||
# - The current directory should be a checkout of the storage source code
|
||||
# (https://github.com/containers/storage). Whatever version is checked out will
|
||||
# be built.
|
||||
# - The VERSION file, at the root of the repository, should exist, and
|
||||
# will be used as the oci-storage binary version and package version.
|
||||
# - The hash of the git commit will also be included in the oci-storage binary,
|
||||
# with the suffix -unsupported if the repository isn't clean.
|
||||
# - The right way to call this script is to invoke "make" from
|
||||
# your checkout of the storage repository.
|
||||
|
||||
set -o pipefail
|
||||
|
||||
export PKG='github.com/containers/storage'
|
||||
export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
export MAKEDIR="$SCRIPTDIR/make"
|
||||
export PKG_CONFIG=${PKG_CONFIG:-pkg-config}
|
||||
|
||||
: ${TEST_REPEAT:=0}
|
||||
|
||||
# List of bundles to create when no argument is passed
|
||||
DEFAULT_BUNDLES=(
|
||||
validate-dco
|
||||
validate-gofmt
|
||||
validate-lint
|
||||
validate-pkg
|
||||
validate-test
|
||||
validate-toml
|
||||
validate-vet
|
||||
|
||||
binary
|
||||
|
||||
test-unit
|
||||
|
||||
gccgo
|
||||
cross
|
||||
)
|
||||
|
||||
VERSION=$(< ./VERSION)
|
||||
if command -v git &> /dev/null && [ -d .git ] && git rev-parse &> /dev/null; then
|
||||
GITCOMMIT=$(git rev-parse --short HEAD)
|
||||
if [ -n "$(git status --porcelain --untracked-files=no)" ]; then
|
||||
GITCOMMIT="$GITCOMMIT-unsupported"
|
||||
echo "#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
|
||||
echo "# GITCOMMIT = $GITCOMMIT"
|
||||
echo "# The version you are building is listed as unsupported because"
|
||||
echo "# there are some files in the git repository that are in an uncommited state."
|
||||
echo "# Commit these changes, or add to .gitignore to remove the -unsupported from the version."
|
||||
echo "# Here is the current list:"
|
||||
git status --porcelain --untracked-files=no
|
||||
echo "#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
|
||||
fi
|
||||
! BUILDTIME=$(date --rfc-3339 ns 2> /dev/null | sed -e 's/ /T/') &> /dev/null
|
||||
if [ -z $BUILDTIME ]; then
|
||||
# If using bash 3.1 which doesn't support --rfc-3389, eg Windows CI
|
||||
BUILDTIME=$(date -u)
|
||||
fi
|
||||
elif [ -n "$GITCOMMIT" ]; then
|
||||
:
|
||||
else
|
||||
echo >&2 'error: .git directory missing and GITCOMMIT not specified'
|
||||
echo >&2 ' Please either build with the .git directory accessible, or specify the'
|
||||
echo >&2 ' exact (--short) commit hash you are building using GITCOMMIT for'
|
||||
echo >&2 ' future accountability in diagnosing build issues. Thanks!'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$AUTO_GOPATH" ]; then
|
||||
rm -rf .gopath
|
||||
mkdir -p .gopath/src/"$(dirname "${PKG}")"
|
||||
ln -sf ../../../.. .gopath/src/"${PKG}"
|
||||
export GOPATH="${PWD}/.gopath:${PWD}/vendor"
|
||||
|
||||
if [ "$(go env GOOS)" = 'solaris' ]; then
|
||||
# sys/unix is installed outside the standard library on solaris
|
||||
# TODO need to allow for version change, need to get version from go
|
||||
export GOPATH="${GOPATH}:/usr/lib/gocode/1.6.2"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! "$GOPATH" ]; then
|
||||
echo >&2 'error: missing GOPATH; please see https://golang.org/doc/code.html#GOPATH'
|
||||
echo >&2 ' alternatively, set AUTO_GOPATH=1'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$EXPERIMENTAL" ]; then
|
||||
echo >&2 '# WARNING! EXPERIMENTAL is set: building experimental features'
|
||||
echo >&2
|
||||
BUILDTAGS+=" experimental"
|
||||
fi
|
||||
|
||||
# test whether "btrfs/version.h" exists and apply btrfs_noversion appropriately
|
||||
if \
|
||||
command -v gcc &> /dev/null \
|
||||
&& ! gcc -E - -o /dev/null &> /dev/null <<<'#include <btrfs/version.h>' \
|
||||
; then
|
||||
BUILDTAGS+=' btrfs_noversion'
|
||||
fi
|
||||
|
||||
# test whether "libdevmapper.h" is new enough to support deferred remove
|
||||
# functionality.
|
||||
if \
|
||||
command -v gcc &> /dev/null \
|
||||
&& ! ( echo -e '#include <libdevmapper.h>\nint main() { dm_task_deferred_remove(NULL); }'| gcc -xc - -o /dev/null -ldevmapper &> /dev/null ) \
|
||||
; then
|
||||
BUILDTAGS+=' libdm_no_deferred_remove'
|
||||
fi
|
||||
|
||||
# Use these flags when compiling the tests and final binary
|
||||
source "$SCRIPTDIR/make/.go-autogen"
|
||||
if [ -z "$DEBUG" ]; then
|
||||
LDFLAGS='-w'
|
||||
fi
|
||||
|
||||
BUILDFLAGS=( $BUILDFLAGS "${ORIG_BUILDFLAGS[@]}" )
|
||||
|
||||
if [ "$(uname -s)" = 'FreeBSD' ]; then
|
||||
# Tell cgo the compiler is Clang, not GCC
|
||||
# https://code.google.com/p/go/source/browse/src/cmd/cgo/gcc.go?spec=svne77e74371f2340ee08622ce602e9f7b15f29d8d3&r=e6794866ebeba2bf8818b9261b54e2eef1c9e588#752
|
||||
export CC=clang
|
||||
|
||||
# "-extld clang" is a workaround for
|
||||
# https://code.google.com/p/go/issues/detail?id=6845
|
||||
LDFLAGS="$LDFLAGS -extld clang"
|
||||
fi
|
||||
|
||||
HAVE_GO_TEST_COVER=
|
||||
if \
|
||||
go help testflag | grep -- -cover > /dev/null \
|
||||
&& go tool -n cover > /dev/null 2>&1 \
|
||||
; then
|
||||
HAVE_GO_TEST_COVER=1
|
||||
fi
|
||||
TIMEOUT=5m
|
||||
|
||||
# If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'.
|
||||
# You can use this to select certain tests to run, eg.
|
||||
#
|
||||
# TESTFLAGS='-test.run ^TestBuild$' ./hack/make.sh test-unit
|
||||
#
|
||||
# For integration-cli test, we use [gocheck](https://labix.org/gocheck), if you want
|
||||
# to run certain tests on your local host, you should run with command:
|
||||
#
|
||||
# TESTFLAGS='-check.f DockerSuite.TestBuild*' ./hack/make.sh binary test-integration-cli
|
||||
#
|
||||
go_test_dir() {
|
||||
dir=$1
|
||||
coverpkg=$2
|
||||
testcover=()
|
||||
testcoverprofile=()
|
||||
testbinary="$DEST/test.main"
|
||||
if [ "$HAVE_GO_TEST_COVER" ]; then
|
||||
# if our current go install has -cover, we want to use it :)
|
||||
mkdir -p "$DEST/coverprofiles"
|
||||
coverprofile="storage${dir#.}"
|
||||
coverprofile="$ABS_DEST/coverprofiles/${coverprofile//\//-}"
|
||||
testcover=( -test.cover )
|
||||
testcoverprofile=( -test.coverprofile "$coverprofile" $coverpkg )
|
||||
fi
|
||||
(
|
||||
echo '+ go test' $TESTFLAGS "${PKG}${dir#.}"
|
||||
cd "$dir"
|
||||
export DEST="$ABS_DEST" # we're in a subshell, so this is safe -- our integration-cli tests need DEST, and "cd" screws it up
|
||||
go test -c -o "$testbinary" ${testcover[@]} -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}"
|
||||
i=0
|
||||
while ((++i)); do
|
||||
test_env "$testbinary" ${testcoverprofile[@]} $TESTFLAGS
|
||||
if [ $i -gt "$TEST_REPEAT" ]; then
|
||||
break
|
||||
fi
|
||||
echo "Repeating test ($i)"
|
||||
done
|
||||
)
|
||||
}
|
||||
test_env() {
|
||||
# use "env -i" to tightly control the environment variables that bleed into the tests
|
||||
env -i \
|
||||
DEST="$DEST" \
|
||||
GOPATH="$GOPATH" \
|
||||
GOTRACEBACK=all \
|
||||
HOME="$ABS_DEST/fake-HOME" \
|
||||
PATH="$PATH" \
|
||||
TEMP="$TEMP" \
|
||||
"$@"
|
||||
}
|
||||
|
||||
# a helper to provide ".exe" when it's appropriate
|
||||
binary_extension() {
|
||||
echo -n $(go env GOEXE)
|
||||
}
|
||||
|
||||
hash_files() {
|
||||
while [ $# -gt 0 ]; do
|
||||
f="$1"
|
||||
shift
|
||||
dir="$(dirname "$f")"
|
||||
base="$(basename "$f")"
|
||||
for hashAlgo in md5 sha256; do
|
||||
if command -v "${hashAlgo}sum" &> /dev/null; then
|
||||
(
|
||||
# subshell and cd so that we get output files like:
|
||||
# $HASH oci-storage-$VERSION
|
||||
# instead of:
|
||||
# $HASH /go/src/github.com/.../$VERSION/binary/oci-storage-$VERSION
|
||||
cd "$dir"
|
||||
"${hashAlgo}sum" "$base" > "$base.$hashAlgo"
|
||||
)
|
||||
fi
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
bundle() {
|
||||
local bundle="$1"; shift
|
||||
echo "---> Making bundle: $(basename "$bundle") (in $DEST)"
|
||||
source "$SCRIPTDIR/make/$bundle" "$@"
|
||||
}
|
||||
|
||||
main() {
|
||||
# We want this to fail if the bundles already exist and cannot be removed.
|
||||
# This is to avoid mixing bundles from different versions of the code.
|
||||
mkdir -p bundles
|
||||
if [ -e "bundles/$VERSION" ] && [ -z "$KEEPBUNDLE" ]; then
|
||||
echo "bundles/$VERSION already exists. Removing."
|
||||
rm -fr "bundles/$VERSION" && mkdir "bundles/$VERSION" || exit 1
|
||||
echo
|
||||
fi
|
||||
|
||||
if [ "$(go env GOHOSTOS)" != 'windows' ]; then
|
||||
# Windows and symlinks don't get along well
|
||||
|
||||
rm -f bundles/latest
|
||||
ln -s "$VERSION" bundles/latest
|
||||
fi
|
||||
|
||||
if [ $# -lt 1 ]; then
|
||||
bundles=(${DEFAULT_BUNDLES[@]})
|
||||
else
|
||||
bundles=($@)
|
||||
fi
|
||||
|
||||
for bundle in ${bundles[@]}; do
|
||||
export DEST="bundles/$VERSION/$(basename "$bundle")"
|
||||
# Cygdrive paths don't play well with go build -o.
|
||||
if [[ "$(uname -s)" == CYGWIN* ]]; then
|
||||
export DEST="$(cygpath -mw "$DEST")"
|
||||
fi
|
||||
mkdir -p "$DEST"
|
||||
ABS_DEST="$(cd "$DEST" && pwd -P)"
|
||||
bundle "$bundle"
|
||||
echo
|
||||
done
|
||||
}
|
||||
|
||||
main "$@"
|
64
vendor/github.com/containers/storage/hack/make/.binary
generated
vendored
Normal file
64
vendor/github.com/containers/storage/hack/make/.binary
generated
vendored
Normal file
|
@ -0,0 +1,64 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
BINARY_NAME="$BINARY_SHORT_NAME-$VERSION"
|
||||
BINARY_EXTENSION="$(binary_extension)"
|
||||
BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION"
|
||||
|
||||
source "${MAKEDIR}/.go-autogen"
|
||||
|
||||
(
|
||||
export GOGC=${DOCKER_BUILD_GOGC:-1000}
|
||||
|
||||
if [ "$(go env GOOS)/$(go env GOARCH)" != "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" ]; then
|
||||
# must be cross-compiling!
|
||||
case "$(go env GOOS)/$(go env GOARCH)" in
|
||||
windows/amd64)
|
||||
export CC=x86_64-w64-mingw32-gcc
|
||||
export CGO_ENABLED=1
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
if [ "$(go env GOOS)" == "linux" ] ; then
|
||||
case "$(go env GOARCH)" in
|
||||
arm*|386)
|
||||
# linking for Linux on arm or x86 needs external linking to avoid
|
||||
# https://github.com/golang/go/issues/9510 until we move to Go 1.6
|
||||
if [ "$IAMSTATIC" == "true" ] ; then
|
||||
export EXTLDFLAGS_STATIC="$EXTLDFLAGS_STATIC -zmuldefs"
|
||||
export LDFLAGS_STATIC_DOCKER="$LDFLAGS_STATIC -extldflags \"$EXTLDFLAGS_STATIC\""
|
||||
|
||||
else
|
||||
export LDFLAGS="$LDFLAGS -extldflags -zmuldefs"
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
if [ "$IAMSTATIC" == "true" ] && [ "$(go env GOHOSTOS)" == "linux" ]; then
|
||||
if [ "${GOOS}/${GOARCH}" == "darwin/amd64" ]; then
|
||||
export CGO_ENABLED=1
|
||||
export CC=o64-clang
|
||||
export LDFLAGS='-linkmode external -s'
|
||||
export LDFLAGS_STATIC_DOCKER='-extld='${CC}
|
||||
else
|
||||
export BUILDFLAGS=( "${BUILDFLAGS[@]/pkcs11 /}" ) # we cannot dlopen in pkcs11 in a static binary
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Building: $DEST/$BINARY_FULLNAME"
|
||||
go build \
|
||||
-o "$DEST/$BINARY_FULLNAME" \
|
||||
"${BUILDFLAGS[@]}" ${BUILDTAGS:+-tags "${BUILDTAGS}"} \
|
||||
-ldflags "
|
||||
$LDFLAGS
|
||||
$LDFLAGS_STATIC_DOCKER
|
||||
" \
|
||||
$SOURCE_PATH
|
||||
)
|
||||
|
||||
echo "Created binary: $DEST/$BINARY_FULLNAME"
|
||||
ln -sf "$BINARY_FULLNAME" "$DEST/$BINARY_SHORT_NAME$BINARY_EXTENSION"
|
||||
|
||||
hash_files "$DEST/$BINARY_FULLNAME"
|
5
vendor/github.com/containers/storage/hack/make/.binary-setup
generated
vendored
Normal file
5
vendor/github.com/containers/storage/hack/make/.binary-setup
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
#!/bin/bash
|
||||
|
||||
DOCKER_CLIENT_BINARY_NAME='docker'
|
||||
DOCKER_DAEMON_BINARY_NAME='dockerd'
|
||||
DOCKER_PROXY_BINARY_NAME='docker-proxy'
|
1
vendor/github.com/containers/storage/hack/make/.build-deb/compat
generated
vendored
Normal file
1
vendor/github.com/containers/storage/hack/make/.build-deb/compat
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
9
|
29
vendor/github.com/containers/storage/hack/make/.build-deb/control
generated
vendored
Normal file
29
vendor/github.com/containers/storage/hack/make/.build-deb/control
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
Source: docker-engine
|
||||
Section: admin
|
||||
Priority: optional
|
||||
Maintainer: Docker <support@docker.com>
|
||||
Standards-Version: 3.9.6
|
||||
Homepage: https://dockerproject.org
|
||||
Vcs-Browser: https://github.com/docker/docker
|
||||
Vcs-Git: git://github.com/docker/docker.git
|
||||
|
||||
Package: docker-engine
|
||||
Architecture: linux-any
|
||||
Depends: iptables, ${misc:Depends}, ${perl:Depends}, ${shlibs:Depends}
|
||||
Recommends: aufs-tools,
|
||||
ca-certificates,
|
||||
cgroupfs-mount | cgroup-lite,
|
||||
git,
|
||||
xz-utils,
|
||||
${apparmor:Recommends}
|
||||
Conflicts: docker (<< 1.5~), docker.io, lxc-docker, lxc-docker-virtual-package, docker-engine-cs
|
||||
Description: Docker: the open-source application container engine
|
||||
Docker is an open source project to build, ship and run any application as a
|
||||
lightweight container
|
||||
.
|
||||
Docker containers are both hardware-agnostic and platform-agnostic. This means
|
||||
they can run anywhere, from your laptop to the largest EC2 compute instance and
|
||||
everything in between - and they don't require you to use a particular
|
||||
language, framework or packaging system. That makes them great building blocks
|
||||
for deploying and scaling web apps, databases, and backend services without
|
||||
depending on a particular stack or provider.
|
1
vendor/github.com/containers/storage/hack/make/.build-deb/docker-engine.bash-completion
generated
vendored
Normal file
1
vendor/github.com/containers/storage/hack/make/.build-deb/docker-engine.bash-completion
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
contrib/completion/bash/docker
|
12
vendor/github.com/containers/storage/hack/make/.build-deb/docker-engine.install
generated
vendored
Normal file
12
vendor/github.com/containers/storage/hack/make/.build-deb/docker-engine.install
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
#contrib/syntax/vim/doc/* /usr/share/vim/vimfiles/doc/
|
||||
#contrib/syntax/vim/ftdetect/* /usr/share/vim/vimfiles/ftdetect/
|
||||
#contrib/syntax/vim/syntax/* /usr/share/vim/vimfiles/syntax/
|
||||
contrib/*-integration usr/share/docker-engine/contrib/
|
||||
contrib/check-config.sh usr/share/docker-engine/contrib/
|
||||
contrib/completion/fish/docker.fish usr/share/fish/vendor_completions.d/
|
||||
contrib/completion/zsh/_docker usr/share/zsh/vendor-completions/
|
||||
contrib/init/systemd/docker.service lib/systemd/system/
|
||||
contrib/init/systemd/docker.socket lib/systemd/system/
|
||||
contrib/mk* usr/share/docker-engine/contrib/
|
||||
contrib/nuke-graph-directory.sh usr/share/docker-engine/contrib/
|
||||
contrib/syntax/nano/Dockerfile.nanorc usr/share/nano/
|
1
vendor/github.com/containers/storage/hack/make/.build-deb/docker-engine.manpages
generated
vendored
Normal file
1
vendor/github.com/containers/storage/hack/make/.build-deb/docker-engine.manpages
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
man/man*/*
|
20
vendor/github.com/containers/storage/hack/make/.build-deb/docker-engine.postinst
generated
vendored
Normal file
20
vendor/github.com/containers/storage/hack/make/.build-deb/docker-engine.postinst
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
case "$1" in
|
||||
configure)
|
||||
if [ -z "$2" ]; then
|
||||
if ! getent group docker > /dev/null; then
|
||||
groupadd --system docker
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
abort-*)
|
||||
# How'd we get here??
|
||||
exit 1
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
#DEBHELPER#
|
1
vendor/github.com/containers/storage/hack/make/.build-deb/docs
generated
vendored
Normal file
1
vendor/github.com/containers/storage/hack/make/.build-deb/docs
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
README.md
|
54
vendor/github.com/containers/storage/hack/make/.build-deb/rules
generated
vendored
Executable file
54
vendor/github.com/containers/storage/hack/make/.build-deb/rules
generated
vendored
Executable file
|
@ -0,0 +1,54 @@
|
|||
#!/usr/bin/make -f
|
||||
|
||||
VERSION = $(shell cat VERSION)
|
||||
SYSTEMD_VERSION := $(shell dpkg-query -W -f='$${Version}\n' systemd | cut -d- -f1)
|
||||
SYSTEMD_GT_227 := $(shell [ '$(SYSTEMD_VERSION)' ] && [ '$(SYSTEMD_VERSION)' -gt 227 ] && echo true )
|
||||
|
||||
override_dh_gencontrol:
|
||||
# if we're on Ubuntu, we need to Recommends: apparmor
|
||||
echo 'apparmor:Recommends=$(shell dpkg-vendor --is Ubuntu && echo apparmor)' >> debian/docker-engine.substvars
|
||||
dh_gencontrol
|
||||
|
||||
override_dh_auto_build:
|
||||
./hack/make.sh dynbinary
|
||||
# ./man/md2man-all.sh runs outside the build container (if at all), since we don't have go-md2man here
|
||||
|
||||
override_dh_auto_test:
|
||||
./bundles/$(VERSION)/dynbinary-daemon/dockerd -v
|
||||
./bundles/$(VERSION)/dynbinary-client/docker -v
|
||||
|
||||
override_dh_strip:
|
||||
# Go has lots of problems with stripping, so just don't
|
||||
|
||||
override_dh_auto_install:
|
||||
mkdir -p debian/docker-engine/usr/bin
|
||||
cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary-client/docker)" debian/docker-engine/usr/bin/docker
|
||||
cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary-daemon/dockerd)" debian/docker-engine/usr/bin/dockerd
|
||||
cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary-daemon/docker-proxy)" debian/docker-engine/usr/bin/docker-proxy
|
||||
cp -aT /usr/local/bin/containerd debian/docker-engine/usr/bin/docker-containerd
|
||||
cp -aT /usr/local/bin/containerd-shim debian/docker-engine/usr/bin/docker-containerd-shim
|
||||
cp -aT /usr/local/bin/ctr debian/docker-engine/usr/bin/docker-containerd-ctr
|
||||
cp -aT /usr/local/sbin/runc debian/docker-engine/usr/bin/docker-runc
|
||||
mkdir -p debian/docker-engine/usr/lib/docker
|
||||
|
||||
override_dh_installinit:
|
||||
# use "docker" as our service name, not "docker-engine"
|
||||
dh_installinit --name=docker
|
||||
ifeq (true, $(SYSTEMD_GT_227))
|
||||
$(warning "Setting TasksMax=infinity")
|
||||
sed -i -- 's/#TasksMax=infinity/TasksMax=infinity/' debian/docker-engine/lib/systemd/system/docker.service
|
||||
endif
|
||||
|
||||
override_dh_installudev:
|
||||
# match our existing priority
|
||||
dh_installudev --priority=z80
|
||||
|
||||
override_dh_install:
|
||||
dh_install
|
||||
dh_apparmor --profile-name=docker-engine -pdocker-engine
|
||||
|
||||
override_dh_shlibdeps:
|
||||
dh_shlibdeps --dpkg-shlibdeps-params=--ignore-missing-info
|
||||
|
||||
%:
|
||||
dh $@ --with=bash-completion $(shell command -v dh_systemd_enable > /dev/null 2>&1 && echo --with=systemd)
|
109
vendor/github.com/containers/storage/hack/make/.build-rpm/docker-engine-selinux.spec
generated
vendored
Normal file
109
vendor/github.com/containers/storage/hack/make/.build-rpm/docker-engine-selinux.spec
generated
vendored
Normal file
|
@ -0,0 +1,109 @@
|
|||
# Some bits borrowed from the openstack-selinux package
|
||||
Name: docker-engine-selinux
|
||||
Version: %{_version}
|
||||
Release: %{_release}%{?dist}
|
||||
Summary: SELinux Policies for the open-source application container engine
|
||||
BuildArch: noarch
|
||||
Group: Tools/Docker
|
||||
|
||||
License: GPLv2
|
||||
Source: %{name}.tar.gz
|
||||
|
||||
URL: https://dockerproject.org
|
||||
Vendor: Docker
|
||||
Packager: Docker <support@docker.com>
|
||||
|
||||
# Version of SELinux we were using
|
||||
%if 0%{?fedora} == 20
|
||||
%global selinux_policyver 3.12.1-197
|
||||
%endif # fedora 20
|
||||
%if 0%{?fedora} == 21
|
||||
%global selinux_policyver 3.13.1-105
|
||||
%endif # fedora 21
|
||||
%if 0%{?fedora} >= 22
|
||||
%global selinux_policyver 3.13.1-128
|
||||
%endif # fedora 22
|
||||
%if 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7
|
||||
%global selinux_policyver 3.13.1-23
|
||||
%endif # centos,rhel,oraclelinux 7
|
||||
|
||||
%global selinuxtype targeted
|
||||
%global moduletype services
|
||||
%global modulenames docker
|
||||
|
||||
Requires(post): selinux-policy-base >= %{selinux_policyver}, selinux-policy-targeted >= %{selinux_policyver}, policycoreutils, policycoreutils-python libselinux-utils
|
||||
BuildRequires: selinux-policy selinux-policy-devel
|
||||
|
||||
# conflicting packages
|
||||
Conflicts: docker-selinux
|
||||
|
||||
# Usage: _format var format
|
||||
# Expand 'modulenames' into various formats as needed
|
||||
# Format must contain '$x' somewhere to do anything useful
|
||||
%global _format() export %1=""; for x in %{modulenames}; do %1+=%2; %1+=" "; done;
|
||||
|
||||
# Relabel files
|
||||
%global relabel_files() \
|
||||
/sbin/restorecon -R %{_bindir}/docker %{_localstatedir}/run/docker.sock %{_localstatedir}/run/docker.pid %{_sysconfdir}/docker %{_localstatedir}/log/docker %{_localstatedir}/log/lxc %{_localstatedir}/lock/lxc %{_usr}/lib/systemd/system/docker.service /root/.docker &> /dev/null || : \
|
||||
|
||||
%description
|
||||
SELinux policy modules for use with Docker
|
||||
|
||||
%prep
|
||||
%if 0%{?centos} <= 6
|
||||
%setup -n %{name}
|
||||
%else
|
||||
%autosetup -n %{name}
|
||||
%endif
|
||||
|
||||
%build
|
||||
make SHARE="%{_datadir}" TARGETS="%{modulenames}"
|
||||
|
||||
%install
|
||||
|
||||
# Install SELinux interfaces
|
||||
%_format INTERFACES $x.if
|
||||
install -d %{buildroot}%{_datadir}/selinux/devel/include/%{moduletype}
|
||||
install -p -m 644 $INTERFACES %{buildroot}%{_datadir}/selinux/devel/include/%{moduletype}
|
||||
|
||||
# Install policy modules
|
||||
%_format MODULES $x.pp.bz2
|
||||
install -d %{buildroot}%{_datadir}/selinux/packages
|
||||
install -m 0644 $MODULES %{buildroot}%{_datadir}/selinux/packages
|
||||
|
||||
%post
|
||||
#
|
||||
# Install all modules in a single transaction
|
||||
#
|
||||
if [ $1 -eq 1 ]; then
|
||||
%{_sbindir}/setsebool -P -N virt_use_nfs=1 virt_sandbox_use_all_caps=1
|
||||
fi
|
||||
%_format MODULES %{_datadir}/selinux/packages/$x.pp.bz2
|
||||
%{_sbindir}/semodule -n -s %{selinuxtype} -i $MODULES
|
||||
if %{_sbindir}/selinuxenabled ; then
|
||||
%{_sbindir}/load_policy
|
||||
%relabel_files
|
||||
if [ $1 -eq 1 ]; then
|
||||
restorecon -R %{_sharedstatedir}/docker
|
||||
fi
|
||||
fi
|
||||
|
||||
%postun
|
||||
if [ $1 -eq 0 ]; then
|
||||
%{_sbindir}/semodule -n -r %{modulenames} &> /dev/null || :
|
||||
if %{_sbindir}/selinuxenabled ; then
|
||||
%{_sbindir}/load_policy
|
||||
%relabel_files
|
||||
fi
|
||||
fi
|
||||
|
||||
%files
|
||||
%doc LICENSE
|
||||
%defattr(-,root,root,0755)
|
||||
%attr(0644,root,root) %{_datadir}/selinux/packages/*.pp.bz2
|
||||
%attr(0644,root,root) %{_datadir}/selinux/devel/include/%{moduletype}/*.if
|
||||
|
||||
%changelog
|
||||
* Tue Dec 1 2015 Jessica Frazelle <acidburn@docker.com> 1.9.1-1
|
||||
- add licence to rpm
|
||||
- add selinux-policy and docker-engine-selinux rpm
|
244
vendor/github.com/containers/storage/hack/make/.build-rpm/docker-engine.spec
generated
vendored
Normal file
244
vendor/github.com/containers/storage/hack/make/.build-rpm/docker-engine.spec
generated
vendored
Normal file
|
@ -0,0 +1,244 @@
|
|||
Name: docker-engine
|
||||
Version: %{_version}
|
||||
Release: %{_release}%{?dist}
|
||||
Summary: The open-source application container engine
|
||||
Group: Tools/Docker
|
||||
|
||||
License: ASL 2.0
|
||||
Source: %{name}.tar.gz
|
||||
|
||||
URL: https://dockerproject.org
|
||||
Vendor: Docker
|
||||
Packager: Docker <support@docker.com>
|
||||
|
||||
# is_systemd conditional
|
||||
%if 0%{?fedora} >= 21 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?suse_version} >= 1210
|
||||
%global is_systemd 1
|
||||
%endif
|
||||
|
||||
# required packages for build
|
||||
# most are already in the container (see contrib/builder/rpm/ARCH/generate.sh)
|
||||
# only require systemd on those systems
|
||||
%if 0%{?is_systemd}
|
||||
%if 0%{?suse_version} >= 1210
|
||||
BuildRequires: systemd-rpm-macros
|
||||
%{?systemd_requires}
|
||||
%else
|
||||
BuildRequires: pkgconfig(systemd)
|
||||
Requires: systemd-units
|
||||
BuildRequires: pkgconfig(libsystemd-journal)
|
||||
%endif
|
||||
%else
|
||||
Requires(post): chkconfig
|
||||
Requires(preun): chkconfig
|
||||
# This is for /sbin/service
|
||||
Requires(preun): initscripts
|
||||
%endif
|
||||
|
||||
# required packages on install
|
||||
Requires: /bin/sh
|
||||
Requires: iptables
|
||||
%if !0%{?suse_version}
|
||||
Requires: libcgroup
|
||||
%else
|
||||
Requires: libcgroup1
|
||||
%endif
|
||||
Requires: tar
|
||||
Requires: xz
|
||||
%if 0%{?fedora} >= 21 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7
|
||||
# Resolves: rhbz#1165615
|
||||
Requires: device-mapper-libs >= 1.02.90-1
|
||||
%endif
|
||||
%if 0%{?oraclelinux} >= 6
|
||||
# Require Oracle Unbreakable Enterprise Kernel R4 and newer device-mapper
|
||||
Requires: kernel-uek >= 4.1
|
||||
Requires: device-mapper >= 1.02.90-2
|
||||
%endif
|
||||
|
||||
# docker-selinux conditional
|
||||
%if 0%{?fedora} >= 20 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7
|
||||
%global with_selinux 1
|
||||
%endif
|
||||
|
||||
# DWZ problem with multiple golang binary, see bug
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=995136#c12
|
||||
%if 0%{?fedora} >= 20 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7
|
||||
%global _dwz_low_mem_die_limit 0
|
||||
%endif
|
||||
|
||||
# start if with_selinux
|
||||
%if 0%{?with_selinux}
|
||||
# Version of SELinux we were using
|
||||
%if 0%{?fedora} == 20
|
||||
%global selinux_policyver 3.12.1-197
|
||||
%endif # fedora 20
|
||||
%if 0%{?fedora} == 21
|
||||
%global selinux_policyver 3.13.1-105
|
||||
%endif # fedora 21
|
||||
%if 0%{?fedora} >= 22
|
||||
%global selinux_policyver 3.13.1-128
|
||||
%endif # fedora 22
|
||||
%if 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7
|
||||
%global selinux_policyver 3.13.1-23
|
||||
%endif # centos,oraclelinux 7
|
||||
%endif # with_selinux
|
||||
|
||||
# RE: rhbz#1195804 - ensure min NVR for selinux-policy
|
||||
%if 0%{?with_selinux}
|
||||
Requires: selinux-policy >= %{selinux_policyver}
|
||||
Requires(pre): %{name}-selinux >= %{version}-%{release}
|
||||
%endif # with_selinux
|
||||
|
||||
# conflicting packages
|
||||
Conflicts: docker
|
||||
Conflicts: docker-io
|
||||
Conflicts: docker-engine-cs
|
||||
|
||||
%description
|
||||
Docker is an open source project to build, ship and run any application as a
|
||||
lightweight container.
|
||||
|
||||
Docker containers are both hardware-agnostic and platform-agnostic. This means
|
||||
they can run anywhere, from your laptop to the largest EC2 compute instance and
|
||||
everything in between - and they don't require you to use a particular
|
||||
language, framework or packaging system. That makes them great building blocks
|
||||
for deploying and scaling web apps, databases, and backend services without
|
||||
depending on a particular stack or provider.
|
||||
|
||||
%prep
|
||||
%if 0%{?centos} <= 6 || 0%{?oraclelinux} <=6
|
||||
%setup -n %{name}
|
||||
%else
|
||||
%autosetup -n %{name}
|
||||
%endif
|
||||
|
||||
%build
|
||||
export DOCKER_GITCOMMIT=%{_gitcommit}
|
||||
./hack/make.sh dynbinary
|
||||
# ./man/md2man-all.sh runs outside the build container (if at all), since we don't have go-md2man here
|
||||
|
||||
%check
|
||||
./bundles/%{_origversion}/dynbinary-client/docker -v
|
||||
./bundles/%{_origversion}/dynbinary-daemon/dockerd -v
|
||||
|
||||
%install
|
||||
# install binary
|
||||
install -d $RPM_BUILD_ROOT/%{_bindir}
|
||||
install -p -m 755 bundles/%{_origversion}/dynbinary-client/docker-%{_origversion} $RPM_BUILD_ROOT/%{_bindir}/docker
|
||||
install -p -m 755 bundles/%{_origversion}/dynbinary-daemon/dockerd-%{_origversion} $RPM_BUILD_ROOT/%{_bindir}/dockerd
|
||||
install -p -m 755 bundles/%{_origversion}/dynbinary-daemon/docker-proxy-%{_origversion} $RPM_BUILD_ROOT/%{_bindir}/docker-proxy
|
||||
|
||||
# install containerd
|
||||
install -p -m 755 /usr/local/bin/containerd $RPM_BUILD_ROOT/%{_bindir}/docker-containerd
|
||||
install -p -m 755 /usr/local/bin/containerd-shim $RPM_BUILD_ROOT/%{_bindir}/docker-containerd-shim
|
||||
install -p -m 755 /usr/local/bin/ctr $RPM_BUILD_ROOT/%{_bindir}/docker-containerd-ctr
|
||||
|
||||
# install runc
|
||||
install -p -m 755 /usr/local/sbin/runc $RPM_BUILD_ROOT/%{_bindir}/docker-runc
|
||||
|
||||
# install udev rules
|
||||
install -d $RPM_BUILD_ROOT/%{_sysconfdir}/udev/rules.d
|
||||
install -p -m 644 contrib/udev/80-docker.rules $RPM_BUILD_ROOT/%{_sysconfdir}/udev/rules.d/80-docker.rules
|
||||
|
||||
# add init scripts
|
||||
install -d $RPM_BUILD_ROOT/etc/sysconfig
|
||||
install -d $RPM_BUILD_ROOT/%{_initddir}
|
||||
|
||||
|
||||
%if 0%{?is_systemd}
|
||||
install -d $RPM_BUILD_ROOT/%{_unitdir}
|
||||
install -p -m 644 contrib/init/systemd/docker.service $RPM_BUILD_ROOT/%{_unitdir}/docker.service
|
||||
install -p -m 644 contrib/init/systemd/docker.socket $RPM_BUILD_ROOT/%{_unitdir}/docker.socket
|
||||
%else
|
||||
install -p -m 644 contrib/init/sysvinit-redhat/docker.sysconfig $RPM_BUILD_ROOT/etc/sysconfig/docker
|
||||
install -p -m 755 contrib/init/sysvinit-redhat/docker $RPM_BUILD_ROOT/%{_initddir}/docker
|
||||
%endif
|
||||
# add bash, zsh, and fish completions
|
||||
install -d $RPM_BUILD_ROOT/usr/share/bash-completion/completions
|
||||
install -d $RPM_BUILD_ROOT/usr/share/zsh/vendor-completions
|
||||
install -d $RPM_BUILD_ROOT/usr/share/fish/vendor_completions.d
|
||||
install -p -m 644 contrib/completion/bash/docker $RPM_BUILD_ROOT/usr/share/bash-completion/completions/docker
|
||||
install -p -m 644 contrib/completion/zsh/_docker $RPM_BUILD_ROOT/usr/share/zsh/vendor-completions/_docker
|
||||
install -p -m 644 contrib/completion/fish/docker.fish $RPM_BUILD_ROOT/usr/share/fish/vendor_completions.d/docker.fish
|
||||
|
||||
# install manpages
|
||||
install -d %{buildroot}%{_mandir}/man1
|
||||
install -p -m 644 man/man1/*.1 $RPM_BUILD_ROOT/%{_mandir}/man1
|
||||
install -d %{buildroot}%{_mandir}/man5
|
||||
install -p -m 644 man/man5/*.5 $RPM_BUILD_ROOT/%{_mandir}/man5
|
||||
install -d %{buildroot}%{_mandir}/man8
|
||||
install -p -m 644 man/man8/*.8 $RPM_BUILD_ROOT/%{_mandir}/man8
|
||||
|
||||
# add vimfiles
|
||||
install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/doc
|
||||
install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/ftdetect
|
||||
install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/syntax
|
||||
install -p -m 644 contrib/syntax/vim/doc/dockerfile.txt $RPM_BUILD_ROOT/usr/share/vim/vimfiles/doc/dockerfile.txt
|
||||
install -p -m 644 contrib/syntax/vim/ftdetect/dockerfile.vim $RPM_BUILD_ROOT/usr/share/vim/vimfiles/ftdetect/dockerfile.vim
|
||||
install -p -m 644 contrib/syntax/vim/syntax/dockerfile.vim $RPM_BUILD_ROOT/usr/share/vim/vimfiles/syntax/dockerfile.vim
|
||||
|
||||
# add nano
|
||||
install -d $RPM_BUILD_ROOT/usr/share/nano
|
||||
install -p -m 644 contrib/syntax/nano/Dockerfile.nanorc $RPM_BUILD_ROOT/usr/share/nano/Dockerfile.nanorc
|
||||
|
||||
# list files owned by the package here
|
||||
%files
|
||||
%doc AUTHORS CHANGELOG.md CONTRIBUTING.md LICENSE MAINTAINERS NOTICE README.md
|
||||
/%{_bindir}/docker
|
||||
/%{_bindir}/dockerd
|
||||
/%{_bindir}/docker-containerd
|
||||
/%{_bindir}/docker-containerd-shim
|
||||
/%{_bindir}/docker-containerd-ctr
|
||||
/%{_bindir}/docker-proxy
|
||||
/%{_bindir}/docker-runc
|
||||
/%{_sysconfdir}/udev/rules.d/80-docker.rules
|
||||
%if 0%{?is_systemd}
|
||||
/%{_unitdir}/docker.service
|
||||
/%{_unitdir}/docker.socket
|
||||
%else
|
||||
%config(noreplace,missingok) /etc/sysconfig/docker
|
||||
/%{_initddir}/docker
|
||||
%endif
|
||||
/usr/share/bash-completion/completions/docker
|
||||
/usr/share/zsh/vendor-completions/_docker
|
||||
/usr/share/fish/vendor_completions.d/docker.fish
|
||||
%doc
|
||||
/%{_mandir}/man1/*
|
||||
/%{_mandir}/man5/*
|
||||
/%{_mandir}/man8/*
|
||||
/usr/share/vim/vimfiles/doc/dockerfile.txt
|
||||
/usr/share/vim/vimfiles/ftdetect/dockerfile.vim
|
||||
/usr/share/vim/vimfiles/syntax/dockerfile.vim
|
||||
/usr/share/nano/Dockerfile.nanorc
|
||||
|
||||
%post
|
||||
%if 0%{?is_systemd}
|
||||
%systemd_post docker
|
||||
%else
|
||||
# This adds the proper /etc/rc*.d links for the script
|
||||
/sbin/chkconfig --add docker
|
||||
%endif
|
||||
if ! getent group docker > /dev/null; then
|
||||
groupadd --system docker
|
||||
fi
|
||||
|
||||
%preun
|
||||
%if 0%{?is_systemd}
|
||||
%systemd_preun docker
|
||||
%else
|
||||
if [ $1 -eq 0 ] ; then
|
||||
/sbin/service docker stop >/dev/null 2>&1
|
||||
/sbin/chkconfig --del docker
|
||||
fi
|
||||
%endif
|
||||
|
||||
%postun
|
||||
%if 0%{?is_systemd}
|
||||
%systemd_postun_with_restart docker
|
||||
%else
|
||||
if [ "$1" -ge "1" ] ; then
|
||||
/sbin/service docker condrestart >/dev/null 2>&1 || :
|
||||
fi
|
||||
%endif
|
||||
|
||||
%changelog
|
66
vendor/github.com/containers/storage/hack/make/.detect-daemon-osarch
generated
vendored
Normal file
66
vendor/github.com/containers/storage/hack/make/.detect-daemon-osarch
generated
vendored
Normal file
|
@ -0,0 +1,66 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
docker-version-osarch() {
|
||||
local target="$1" # "Client" or "Server"
|
||||
local fmtStr="{{.${target}.Os}}/{{.${target}.Arch}}"
|
||||
if docker version -f "$fmtStr" 2>/dev/null; then
|
||||
# if "docker version -f" works, let's just use that!
|
||||
return
|
||||
fi
|
||||
docker version | awk '
|
||||
$1 ~ /^(Client|Server):$/ { section = 0 }
|
||||
$1 == "'"$target"':" { section = 1; next }
|
||||
section && $1 == "OS/Arch:" { print $2 }
|
||||
|
||||
# old versions of Docker
|
||||
$1 == "OS/Arch" && $2 == "('"${target,,}"'):" { print $3 }
|
||||
'
|
||||
}
|
||||
|
||||
# Retrieve OS/ARCH of docker daemon, eg. linux/amd64
|
||||
export DOCKER_ENGINE_OSARCH="$(docker-version-osarch 'Server')"
|
||||
export DOCKER_ENGINE_GOOS="${DOCKER_ENGINE_OSARCH%/*}"
|
||||
export DOCKER_ENGINE_GOARCH="${DOCKER_ENGINE_OSARCH##*/}"
|
||||
DOCKER_ENGINE_GOARCH=${DOCKER_ENGINE_GOARCH:=amd64}
|
||||
|
||||
# and the client, just in case
|
||||
export DOCKER_CLIENT_OSARCH="$(docker-version-osarch 'Client')"
|
||||
export DOCKER_CLIENT_GOOS="${DOCKER_CLIENT_OSARCH%/*}"
|
||||
export DOCKER_CLIENT_GOARCH="${DOCKER_CLIENT_OSARCH##*/}"
|
||||
DOCKER_CLIENT_GOARCH=${DOCKER_CLIENT_GOARCH:=amd64}
|
||||
|
||||
# Retrieve the architecture used in contrib/builder/(deb|rpm)/$PACKAGE_ARCH/
|
||||
PACKAGE_ARCH='amd64'
|
||||
case "${DOCKER_ENGINE_GOARCH:-$DOCKER_CLIENT_GOARCH}" in
|
||||
arm)
|
||||
PACKAGE_ARCH='armhf'
|
||||
;;
|
||||
arm64)
|
||||
PACKAGE_ARCH='aarch64'
|
||||
;;
|
||||
amd64|ppc64le|s390x)
|
||||
PACKAGE_ARCH="${DOCKER_ENGINE_GOARCH:-$DOCKER_CLIENT_GOARCH}"
|
||||
;;
|
||||
*)
|
||||
echo >&2 "warning: not sure how to convert '$DOCKER_ENGINE_GOARCH' to a 'Docker' arch, assuming '$PACKAGE_ARCH'"
|
||||
;;
|
||||
esac
|
||||
export PACKAGE_ARCH
|
||||
|
||||
DOCKERFILE='Dockerfile'
|
||||
TEST_IMAGE_NAMESPACE=
|
||||
case "$PACKAGE_ARCH" in
|
||||
amd64)
|
||||
case "${DOCKER_ENGINE_GOOS:-$DOCKER_CLIENT_GOOS}" in
|
||||
windows)
|
||||
DOCKERFILE='Dockerfile.windows'
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
DOCKERFILE="Dockerfile.$PACKAGE_ARCH"
|
||||
TEST_IMAGE_NAMESPACE="$PACKAGE_ARCH"
|
||||
;;
|
||||
esac
|
||||
export DOCKERFILE TEST_IMAGE_NAMESPACE
|
23
vendor/github.com/containers/storage/hack/make/.ensure-emptyfs
generated
vendored
Normal file
23
vendor/github.com/containers/storage/hack/make/.ensure-emptyfs
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
if ! docker inspect emptyfs &> /dev/null; then
|
||||
# let's build a "docker save" tarball for "emptyfs"
|
||||
# see https://github.com/docker/docker/pull/5262
|
||||
# and also https://github.com/docker/docker/issues/4242
|
||||
dir="$DEST/emptyfs"
|
||||
mkdir -p "$dir"
|
||||
(
|
||||
cd "$dir"
|
||||
echo '{"emptyfs":{"latest":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158"}}' > repositories
|
||||
mkdir -p 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158
|
||||
(
|
||||
cd 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158
|
||||
echo '{"id":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","comment":"Imported from -","created":"2013-06-13T14:03:50.821769-07:00","container_config":{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":null},"docker_version":"0.4.0","architecture":"x86_64","Size":0}' > json
|
||||
echo '1.0' > VERSION
|
||||
tar -cf layer.tar --files-from /dev/null
|
||||
)
|
||||
)
|
||||
( set -x; tar -cC "$dir" . | docker load )
|
||||
rm -rf "$dir"
|
||||
fi
|
67
vendor/github.com/containers/storage/hack/make/.ensure-frozen-images
generated
vendored
Normal file
67
vendor/github.com/containers/storage/hack/make/.ensure-frozen-images
generated
vendored
Normal file
|
@ -0,0 +1,67 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# image list should match what's in the Dockerfile (minus the explicit images IDs)
|
||||
images=(
|
||||
buildpack-deps:jessie
|
||||
busybox:latest
|
||||
debian:jessie
|
||||
hello-world:latest
|
||||
)
|
||||
|
||||
if [ "$TEST_IMAGE_NAMESPACE" ]; then
|
||||
for (( i = 0; i < ${#images[@]}; i++ )); do
|
||||
images[$i]="$TEST_IMAGE_NAMESPACE/${images[$i]}"
|
||||
done
|
||||
fi
|
||||
|
||||
if ! docker inspect "${images[@]}" &> /dev/null; then
|
||||
hardCodedDir='/docker-frozen-images'
|
||||
if [ -d "$hardCodedDir" ]; then
|
||||
# Do not use a subshell for the following command. Windows to Linux CI
|
||||
# runs bash 3.x so will not trap an error in a subshell.
|
||||
# http://stackoverflow.com/questions/22630363/how-does-set-e-work-with-subshells
|
||||
set -x; tar -cC "$hardCodedDir" . | docker load; set +x
|
||||
else
|
||||
dir="$DEST/frozen-images"
|
||||
# extract the exact "RUN download-frozen-image-v2.sh" line from the Dockerfile itself for consistency
|
||||
# NOTE: this will fail if either "curl" or "jq" is not installed or if the Dockerfile is not available/readable
|
||||
awk '
|
||||
$1 == "RUN" && $2 == "./contrib/download-frozen-image-v2.sh" {
|
||||
for (i = 2; i < NF; i++)
|
||||
printf ( $i == "'"$hardCodedDir"'" ? "'"$dir"'" : $i ) " ";
|
||||
print $NF;
|
||||
if (/\\$/) {
|
||||
inCont = 1;
|
||||
next;
|
||||
}
|
||||
}
|
||||
inCont {
|
||||
print;
|
||||
if (!/\\$/) {
|
||||
inCont = 0;
|
||||
}
|
||||
}
|
||||
' "$DOCKERFILE" | sh -x
|
||||
# Do not use a subshell for the following command. Windows to Linux CI
|
||||
# runs bash 3.x so will not trap an error in a subshell.
|
||||
# http://stackoverflow.com/questions/22630363/how-does-set-e-work-with-subshells
|
||||
set -x; tar -cC "$dir" . | docker load; set +x
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$TEST_IMAGE_NAMESPACE" ]; then
|
||||
for image in "${images[@]}"; do
|
||||
target="${image#$TEST_IMAGE_NAMESPACE/}"
|
||||
if [ "$target" != "$image" ]; then
|
||||
# tag images to ensure that all integrations work with the defined image names
|
||||
docker tag "$image" "$target"
|
||||
# then remove original tags as these make problems with later tests (e.g., TestInspectApiImageResponse)
|
||||
docker rmi "$image"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# explicitly rename "hello-world:latest" to ":frozen" for the test that uses it
|
||||
docker tag hello-world:latest hello-world:frozen
|
||||
docker rmi hello-world:latest
|
32
vendor/github.com/containers/storage/hack/make/.ensure-frozen-images-windows
generated
vendored
Normal file
32
vendor/github.com/containers/storage/hack/make/.ensure-frozen-images-windows
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# This scripts sets up the required images for Windows to Windows CI
|
||||
|
||||
# Tag (microsoft/)windowsservercore as latest
|
||||
set +e
|
||||
! BUILD=$(docker images | grep windowsservercore | grep -v latest | awk '{print $2}')
|
||||
if [ -z $BUILD ]; then
|
||||
echo "ERROR: Could not find windowsservercore images"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get the name. Around 2016 6D TP5, these have the microsoft/ prefix, hence cater for both.
|
||||
! IMAGENAME=$(docker images | grep windowsservercore | grep -v latest | awk '{print $1}')
|
||||
if [ -z $IMAGENAME ]; then
|
||||
echo "ERROR: Could not find windowsservercore image"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
! LATESTCOUNT=$(docker images | grep windowsservercore | grep -v $BUILD | wc -l)
|
||||
if [ $LATESTCOUNT -ne 1 ]; then
|
||||
set -e
|
||||
docker tag $IMAGENAME:$BUILD windowsservercore:latest
|
||||
echo "INFO: Tagged $IMAGENAME:$BUILD as windowsservercore:latest"
|
||||
fi
|
||||
|
||||
# Busybox (requires windowsservercore)
|
||||
if [ -z "$(docker images | grep busybox)" ]; then
|
||||
echo "INFO: Building busybox"
|
||||
docker build -t busybox https://raw.githubusercontent.com/jhowardmsft/busybox/master/Dockerfile
|
||||
fi
|
15
vendor/github.com/containers/storage/hack/make/.ensure-httpserver
generated
vendored
Normal file
15
vendor/github.com/containers/storage/hack/make/.ensure-httpserver
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Build a Go static web server on top of busybox image
|
||||
# and compile it for target daemon
|
||||
|
||||
dir="$DEST/httpserver"
|
||||
mkdir -p "$dir"
|
||||
(
|
||||
cd "$dir"
|
||||
GOOS=${DOCKER_ENGINE_GOOS:="linux"} GOARCH=${DOCKER_ENGINE_GOARCH:="amd64"} CGO_ENABLED=0 go build -o httpserver github.com/docker/docker/contrib/httpserver
|
||||
cp ../../../../contrib/httpserver/Dockerfile .
|
||||
docker build -qt httpserver . > /dev/null
|
||||
)
|
||||
rm -rf "$dir"
|
22
vendor/github.com/containers/storage/hack/make/.ensure-nnp-test
generated
vendored
Normal file
22
vendor/github.com/containers/storage/hack/make/.ensure-nnp-test
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Build a C binary for testing no-new-privileges
|
||||
# and compile it for target daemon
|
||||
if [ "$DOCKER_ENGINE_GOOS" = "linux" ]; then
|
||||
if [ "$DOCKER_ENGINE_OSARCH" = "$DOCKER_CLIENT_OSARCH" ]; then
|
||||
tmpdir=$(mktemp -d)
|
||||
gcc -g -Wall -static contrib/nnp-test/nnp-test.c -o "${tmpdir}/nnp-test"
|
||||
|
||||
dockerfile="${tmpdir}/Dockerfile"
|
||||
cat <<-EOF > "$dockerfile"
|
||||
FROM debian:jessie
|
||||
COPY . /usr/bin/
|
||||
RUN chmod +s /usr/bin/nnp-test
|
||||
EOF
|
||||
docker build --force-rm ${DOCKER_BUILD_ARGS} -qt nnp-test "${tmpdir}" > /dev/null
|
||||
rm -rf "${tmpdir}"
|
||||
else
|
||||
docker build ${DOCKER_BUILD_ARGS} -qt nnp-test contrib/nnp-test > /dev/null
|
||||
fi
|
||||
fi
|
23
vendor/github.com/containers/storage/hack/make/.ensure-syscall-test
generated
vendored
Normal file
23
vendor/github.com/containers/storage/hack/make/.ensure-syscall-test
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Build a C binary for cloning a userns for seccomp tests
|
||||
# and compile it for target daemon
|
||||
if [ "$DOCKER_ENGINE_GOOS" = "linux" ]; then
|
||||
if [ "$DOCKER_ENGINE_OSARCH" = "$DOCKER_CLIENT_OSARCH" ]; then
|
||||
tmpdir=$(mktemp -d)
|
||||
gcc -g -Wall -static contrib/syscall-test/userns.c -o "${tmpdir}/userns-test"
|
||||
gcc -g -Wall -static contrib/syscall-test/ns.c -o "${tmpdir}/ns-test"
|
||||
gcc -g -Wall -static contrib/syscall-test/acct.c -o "${tmpdir}/acct-test"
|
||||
|
||||
dockerfile="${tmpdir}/Dockerfile"
|
||||
cat <<-EOF > "$dockerfile"
|
||||
FROM debian:jessie
|
||||
COPY . /usr/bin/
|
||||
EOF
|
||||
docker build --force-rm ${DOCKER_BUILD_ARGS} -qt syscall-test "${tmpdir}" > /dev/null
|
||||
rm -rf "${tmpdir}"
|
||||
else
|
||||
docker build ${DOCKER_BUILD_ARGS} -qt syscall-test contrib/syscall-test > /dev/null
|
||||
fi
|
||||
fi
|
20
vendor/github.com/containers/storage/hack/make/.go-autogen
generated
vendored
Normal file
20
vendor/github.com/containers/storage/hack/make/.go-autogen
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
#!/bin/bash
|
||||
|
||||
cat > storageversion/version_autogen.go <<EOF
|
||||
// +build containersstorageautogen
|
||||
|
||||
// Package version is auto-generated at build-time
|
||||
package storageversion
|
||||
|
||||
// Default build-time variable for library-import.
|
||||
// This file is overridden on build with build-time informations.
|
||||
const (
|
||||
GitCommit string = "$GITCOMMIT"
|
||||
Version string = "$VERSION"
|
||||
BuildTime string = "$BUILDTIME"
|
||||
IAmStatic string = "${IAMSTATIC:-false}"
|
||||
)
|
||||
// AUTOGENERATED FILE; see $BASH_SOURCE
|
||||
EOF
|
||||
|
||||
BUILDTAGS+=" containersstorageautogen"
|
38
vendor/github.com/containers/storage/hack/make/.resources-windows/common.rc
generated
vendored
Normal file
38
vendor/github.com/containers/storage/hack/make/.resources-windows/common.rc
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
// Application icon
|
||||
1 ICON "docker.ico"
|
||||
|
||||
// Windows executable manifest
|
||||
1 24 /* RT_MANIFEST */ "docker.exe.manifest"
|
||||
|
||||
// Version information
|
||||
1 VERSIONINFO
|
||||
|
||||
#ifdef DOCKER_VERSION_QUAD
|
||||
FILEVERSION DOCKER_VERSION_QUAD
|
||||
PRODUCTVERSION DOCKER_VERSION_QUAD
|
||||
#endif
|
||||
|
||||
BEGIN
|
||||
BLOCK "StringFileInfo"
|
||||
BEGIN
|
||||
BLOCK "000004B0"
|
||||
BEGIN
|
||||
VALUE "ProductName", DOCKER_NAME
|
||||
|
||||
#ifdef DOCKER_VERSION
|
||||
VALUE "FileVersion", DOCKER_VERSION
|
||||
VALUE "ProductVersion", DOCKER_VERSION
|
||||
#endif
|
||||
|
||||
#ifdef DOCKER_COMMIT
|
||||
VALUE "OriginalFileName", DOCKER_COMMIT
|
||||
#endif
|
||||
|
||||
END
|
||||
END
|
||||
|
||||
BLOCK "VarFileInfo"
|
||||
BEGIN
|
||||
VALUE "Translation", 0x0000, 0x04B0
|
||||
END
|
||||
END
|
18
vendor/github.com/containers/storage/hack/make/.resources-windows/docker.exe.manifest
generated
vendored
Normal file
18
vendor/github.com/containers/storage/hack/make/.resources-windows/docker.exe.manifest
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
|
||||
<description>Docker</description>
|
||||
<compatibility xmlns="urn:schemas-microsoft-com:compatibility.v1">
|
||||
<application>
|
||||
<!-- Windows 10 -->
|
||||
<supportedOS Id="{8e0f7a12-bfb3-4fe8-b9a5-48fd50a15a9a}"/>
|
||||
<!-- Windows 8.1 -->
|
||||
<supportedOS Id="{1f676c76-80e1-4239-95bb-83d0f6d0da78}"/>
|
||||
<!-- Windows Vista -->
|
||||
<supportedOS Id="{e2011457-1546-43c5-a5fe-008deee3d3f0}"/>
|
||||
<!-- Windows 7 -->
|
||||
<supportedOS Id="{35138b9a-5d96-4fbd-8e2d-a2440225f93a}"/>
|
||||
<!-- Windows 8 -->
|
||||
<supportedOS Id="{4a2f28e3-53b9-4441-ba9c-d69d4a4a6e38}"/>
|
||||
</application>
|
||||
</compatibility>
|
||||
</assembly>
|
BIN
vendor/github.com/containers/storage/hack/make/.resources-windows/docker.ico
generated
vendored
Normal file
BIN
vendor/github.com/containers/storage/hack/make/.resources-windows/docker.ico
generated
vendored
Normal file
Binary file not shown.
After Width: | Height: | Size: 361 KiB |
BIN
vendor/github.com/containers/storage/hack/make/.resources-windows/docker.png
generated
vendored
Normal file
BIN
vendor/github.com/containers/storage/hack/make/.resources-windows/docker.png
generated
vendored
Normal file
Binary file not shown.
After Width: | Height: | Size: 643 KiB |
3
vendor/github.com/containers/storage/hack/make/.resources-windows/docker.rc
generated
vendored
Normal file
3
vendor/github.com/containers/storage/hack/make/.resources-windows/docker.rc
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
#define DOCKER_NAME "Docker Client"
|
||||
|
||||
#include "common.rc"
|
4
vendor/github.com/containers/storage/hack/make/.resources-windows/dockerd.rc
generated
vendored
Normal file
4
vendor/github.com/containers/storage/hack/make/.resources-windows/dockerd.rc
generated
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
#define DOCKER_NAME "Docker Engine"
|
||||
|
||||
#include "common.rc"
|
||||
#include "event_messages.rc"
|
39
vendor/github.com/containers/storage/hack/make/.resources-windows/event_messages.mc
generated
vendored
Normal file
39
vendor/github.com/containers/storage/hack/make/.resources-windows/event_messages.mc
generated
vendored
Normal file
|
@ -0,0 +1,39 @@
|
|||
MessageId=1
|
||||
Language=English
|
||||
%1
|
||||
.
|
||||
|
||||
MessageId=2
|
||||
Language=English
|
||||
debug: %1
|
||||
.
|
||||
|
||||
MessageId=3
|
||||
Language=English
|
||||
panic: %1
|
||||
.
|
||||
|
||||
MessageId=4
|
||||
Language=English
|
||||
fatal: %1
|
||||
.
|
||||
|
||||
MessageId=11
|
||||
Language=English
|
||||
%1 [%2]
|
||||
.
|
||||
|
||||
MessageId=12
|
||||
Language=English
|
||||
debug: %1 [%2]
|
||||
.
|
||||
|
||||
MessageId=13
|
||||
Language=English
|
||||
panic: %1 [%2]
|
||||
.
|
||||
|
||||
MessageId=14
|
||||
Language=English
|
||||
fatal: %1 [%2]
|
||||
.
|
18
vendor/github.com/containers/storage/hack/make/.resources-windows/resources.go
generated
vendored
Normal file
18
vendor/github.com/containers/storage/hack/make/.resources-windows/resources.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
/*
|
||||
|
||||
Package winresources is used to embed Windows resources into docker.exe.
|
||||
These resources are used to provide
|
||||
|
||||
* Version information
|
||||
* An icon
|
||||
* A Windows manifest declaring Windows version support
|
||||
|
||||
The resource object files are generated in hack/make/.go-autogen from
|
||||
source files in hack/make/.resources-windows. This occurs automatically
|
||||
when you run hack/make.sh.
|
||||
|
||||
These object files are picked up automatically by go build when this package
|
||||
is included.
|
||||
|
||||
*/
|
||||
package winresources
|
33
vendor/github.com/containers/storage/hack/make/.validate
generated
vendored
Normal file
33
vendor/github.com/containers/storage/hack/make/.validate
generated
vendored
Normal file
|
@ -0,0 +1,33 @@
|
|||
#!/bin/bash
|
||||
|
||||
if [ -z "$VALIDATE_UPSTREAM" ]; then
|
||||
# this is kind of an expensive check, so let's not do this twice if we
|
||||
# are running more than one validate bundlescript
|
||||
|
||||
VALIDATE_REPO='https://github.com/containers/storage.git'
|
||||
VALIDATE_BRANCH='master'
|
||||
|
||||
if [ "$TRAVIS" = 'true' -a "$TRAVIS_PULL_REQUEST" != 'false' ]; then
|
||||
VALIDATE_REPO="https://github.com/${TRAVIS_REPO_SLUG}.git"
|
||||
VALIDATE_BRANCH="${TRAVIS_BRANCH}"
|
||||
fi
|
||||
|
||||
VALIDATE_HEAD="$(git rev-parse --verify HEAD)"
|
||||
|
||||
git fetch -q "$VALIDATE_REPO" "refs/heads/$VALIDATE_BRANCH"
|
||||
VALIDATE_UPSTREAM="$(git rev-parse --verify FETCH_HEAD)"
|
||||
|
||||
VALIDATE_COMMIT_LOG="$VALIDATE_UPSTREAM..$VALIDATE_HEAD"
|
||||
VALIDATE_COMMIT_DIFF="$VALIDATE_UPSTREAM...$VALIDATE_HEAD"
|
||||
|
||||
validate_diff() {
|
||||
if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then
|
||||
git diff "$VALIDATE_COMMIT_DIFF" "$@"
|
||||
fi
|
||||
}
|
||||
validate_log() {
|
||||
if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then
|
||||
git log "$VALIDATE_COMMIT_LOG" "$@"
|
||||
fi
|
||||
}
|
||||
fi
|
17
vendor/github.com/containers/storage/hack/make/README.md
generated
vendored
Normal file
17
vendor/github.com/containers/storage/hack/make/README.md
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
This directory holds scripts called by `make.sh` in the parent directory.
|
||||
|
||||
Each script is named after the bundle it creates.
|
||||
They should not be called directly - instead, pass it as argument to make.sh, for example:
|
||||
|
||||
```
|
||||
./hack/make.sh test
|
||||
./hack/make.sh binary cross
|
||||
|
||||
# Or to run all bundles:
|
||||
./hack/make.sh
|
||||
```
|
||||
|
||||
To add a bundle:
|
||||
|
||||
* Create a shell-compatible file here
|
||||
* Add it to $DEFAULT_BUNDLES in make.sh
|
9
vendor/github.com/containers/storage/hack/make/binary
generated
vendored
Normal file
9
vendor/github.com/containers/storage/hack/make/binary
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# This script exists as backwards compatibility for CI
|
||||
(
|
||||
DEST="${DEST}-oci-storage"
|
||||
ABS_DEST="${ABS_DEST}-oci-storage"
|
||||
. hack/make/binary-oci-storage
|
||||
)
|
10
vendor/github.com/containers/storage/hack/make/binary-oci-storage
generated
vendored
Normal file
10
vendor/github.com/containers/storage/hack/make/binary-oci-storage
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
(
|
||||
export BINARY_SHORT_NAME='oci-storage'
|
||||
export SOURCE_PATH='./cmd/oci-storage'
|
||||
export IAMSTATIC='false'
|
||||
export BUILDFLAGS=( "${BUILDFLAGS[@]/static_build /}" ) # we're not building a "static" binary here
|
||||
source "${MAKEDIR}/.binary"
|
||||
)
|
20
vendor/github.com/containers/storage/hack/make/cover
generated
vendored
Normal file
20
vendor/github.com/containers/storage/hack/make/cover
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
bundle_cover() {
|
||||
coverprofiles=( "$DEST/../"*"/coverprofiles/"* )
|
||||
for p in "${coverprofiles[@]}"; do
|
||||
echo
|
||||
(
|
||||
set -x
|
||||
go tool cover -func="$p"
|
||||
)
|
||||
done
|
||||
}
|
||||
|
||||
if [ "$HAVE_GO_TEST_COVER" ]; then
|
||||
bundle_cover 2>&1 | tee "$DEST/report.log"
|
||||
else
|
||||
echo >&2 'warning: the current version of go does not support -cover'
|
||||
echo >&2 ' skipping test coverage report'
|
||||
fi
|
18
vendor/github.com/containers/storage/hack/make/cross
generated
vendored
Normal file
18
vendor/github.com/containers/storage/hack/make/cross
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
CROSSPLATFORMS="linux/amd64 linux/386 linux/arm"
|
||||
BUILDTAGS+=" exclude_graphdriver_devicemapper"
|
||||
|
||||
for platform in $CROSSPLATFORMS; do
|
||||
(
|
||||
export KEEPDEST=1
|
||||
export DEST="$DEST/$platform" # bundles/VERSION/cross/GOOS/GOARCH/docker-VERSION
|
||||
mkdir -p "$DEST"
|
||||
ABS_DEST="$(cd "$DEST" && pwd -P)"
|
||||
export GOOS=${platform%/*}
|
||||
export GOARCH=${platform##*/}
|
||||
|
||||
source "${MAKEDIR}/binary"
|
||||
)
|
||||
done
|
26
vendor/github.com/containers/storage/hack/make/gccgo
generated
vendored
Normal file
26
vendor/github.com/containers/storage/hack/make/gccgo
generated
vendored
Normal file
|
@ -0,0 +1,26 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
BINARY_NAME="oci-storage-$VERSION"
|
||||
BINARY_EXTENSION="$(binary_extension)"
|
||||
BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION"
|
||||
|
||||
source "${MAKEDIR}/.go-autogen"
|
||||
|
||||
# gccgo require explicit flag -pthread to allow goroutines to work.
|
||||
go build -compiler=gccgo \
|
||||
-o "$DEST/$BINARY_FULLNAME" \
|
||||
"${BUILDFLAGS[@]}" \
|
||||
-gccgoflags "
|
||||
-g
|
||||
-Wl,--no-export-dynamic
|
||||
-ldl
|
||||
-pthread
|
||||
" \
|
||||
./cmd/oci-storage
|
||||
|
||||
echo "Created binary: $DEST/$BINARY_FULLNAME"
|
||||
ln -sf "$BINARY_FULLNAME" "$DEST/oci-storage$BINARY_EXTENSION"
|
||||
|
||||
hash_files "$DEST/$BINARY_FULLNAME"
|
||||
|
12
vendor/github.com/containers/storage/hack/make/test-integration-cli
generated
vendored
Normal file
12
vendor/github.com/containers/storage/hack/make/test-integration-cli
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
bundle_test_integration_cli() {
|
||||
TESTFLAGS="$TESTFLAGS -check.v -check.timeout=${TIMEOUT} -test.timeout=360m"
|
||||
go_test_dir ./integration-cli
|
||||
}
|
||||
|
||||
# subshell so that we can export PATH without breaking other things
|
||||
(
|
||||
bundle_test_integration_cli
|
||||
) 2>&1 | tee -a "$DEST/test.log"
|
54
vendor/github.com/containers/storage/hack/make/test-unit
generated
vendored
Normal file
54
vendor/github.com/containers/storage/hack/make/test-unit
generated
vendored
Normal file
|
@ -0,0 +1,54 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Run the test suite, including sub-packages, and store their output as a bundle
|
||||
# If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'.
|
||||
# You can use this to select certain tests to run, eg.
|
||||
#
|
||||
# TESTFLAGS='-test.run ^TestBuild$' ./hack/make.sh test-unit
|
||||
#
|
||||
bundle_test_list() {
|
||||
go list -e -f '{{range .Imports}}{{.}}{{"\n"}}{{end}}' ./cmd/oci-storage "$@" |\
|
||||
grep github.com/containers/storage |\
|
||||
grep -v github.com/containers/storage/vendor |\
|
||||
grep -v github.com/containers/storage/integration-cli
|
||||
}
|
||||
bundle_test_unit() {
|
||||
TESTFLAGS+=" -test.timeout=${TIMEOUT}"
|
||||
INCBUILD="-i"
|
||||
count=0
|
||||
for flag in "${BUILDFLAGS[@]}"; do
|
||||
if [ "${flag}" == ${INCBUILD} ]; then
|
||||
unset BUILDFLAGS[${count}]
|
||||
break
|
||||
fi
|
||||
count=$[ ${count} + 1 ]
|
||||
done
|
||||
|
||||
date
|
||||
if [ -z "$TESTDIRS" ]; then
|
||||
TEST_PATH=./...
|
||||
else
|
||||
TEST_PATH=./${TESTDIRS}
|
||||
fi
|
||||
pkg_list=$(
|
||||
deps=`bundle_test_list ./cmd/oci-storage`
|
||||
newdeps=
|
||||
while test "$deps" != "$newdeps" ; do
|
||||
deps=${newdeps:-`bundle_test_list ./cmd/oci-storage`}
|
||||
newdeps=`for dep in $deps ; do
|
||||
bundle_test_list "$dep"
|
||||
done | sort -u`
|
||||
done
|
||||
echo $deps
|
||||
)
|
||||
go test $COVER $GCCGOFLAGS -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" ${BUILDTAGS:+-tags "${BUILDTAGS}"} $TESTFLAGS $pkg_list
|
||||
}
|
||||
|
||||
|
||||
if [[ "$(go version)" == *"gccgo"* ]]; then
|
||||
GCCGOFLAGS=-gccgoflags="-lpthread"
|
||||
else
|
||||
COVER=-cover
|
||||
fi
|
||||
bundle_test_unit 2>&1 | tee -a "$DEST/test.log"
|
54
vendor/github.com/containers/storage/hack/make/validate-dco
generated
vendored
Normal file
54
vendor/github.com/containers/storage/hack/make/validate-dco
generated
vendored
Normal file
|
@ -0,0 +1,54 @@
|
|||
#!/bin/bash
|
||||
|
||||
source "${MAKEDIR}/.validate"
|
||||
|
||||
adds=$(validate_diff --numstat | awk '{ s += $1 } END { print s }')
|
||||
dels=$(validate_diff --numstat | awk '{ s += $2 } END { print s }')
|
||||
#notDocs="$(validate_diff --numstat | awk '$3 !~ /^docs\// { print $3 }')"
|
||||
|
||||
: ${adds:=0}
|
||||
: ${dels:=0}
|
||||
|
||||
# "Username may only contain alphanumeric characters or dashes and cannot begin with a dash"
|
||||
githubUsernameRegex='[a-zA-Z0-9][a-zA-Z0-9-]+'
|
||||
|
||||
# https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work
|
||||
dcoPrefix='Signed-off-by:'
|
||||
dcoRegex="^$dcoPrefix ([^<]+) <([^<>@]+@[^<>]+)>( \\(github: ($githubUsernameRegex)\\))?$"
|
||||
|
||||
check_dco() {
|
||||
grep -qE "$dcoRegex"
|
||||
}
|
||||
|
||||
if [ $adds -eq 0 -a $dels -eq 0 ]; then
|
||||
echo '0 adds, 0 deletions; nothing to validate! :)'
|
||||
else
|
||||
commits=( $(validate_log --format='format:%H%n') )
|
||||
badCommits=()
|
||||
for commit in "${commits[@]}"; do
|
||||
if [ -z "$(git log -1 --format='format:' --name-status "$commit")" ]; then
|
||||
# no content (ie, Merge commit, etc)
|
||||
continue
|
||||
fi
|
||||
if ! git log -1 --format='format:%B' "$commit" | check_dco; then
|
||||
badCommits+=( "$commit" )
|
||||
fi
|
||||
done
|
||||
if [ ${#badCommits[@]} -eq 0 ]; then
|
||||
echo "Congratulations! All commits are properly signed with the DCO!"
|
||||
else
|
||||
{
|
||||
echo "These commits do not have a proper '$dcoPrefix' marker:"
|
||||
for commit in "${badCommits[@]}"; do
|
||||
echo " - $commit"
|
||||
done
|
||||
echo
|
||||
echo 'Please amend each commit to include a properly formatted DCO marker.'
|
||||
echo
|
||||
echo 'Visit the following URL for information about the Docker DCO:'
|
||||
echo ' https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work'
|
||||
echo
|
||||
} >&2
|
||||
false
|
||||
fi
|
||||
fi
|
30
vendor/github.com/containers/storage/hack/make/validate-gofmt
generated
vendored
Normal file
30
vendor/github.com/containers/storage/hack/make/validate-gofmt
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
#!/bin/bash
|
||||
|
||||
source "${MAKEDIR}/.validate"
|
||||
|
||||
IFS=$'\n'
|
||||
files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' || true) )
|
||||
unset IFS
|
||||
|
||||
badFiles=()
|
||||
for f in "${files[@]}"; do
|
||||
# we use "git show" here to validate that what's committed is formatted
|
||||
if [ "$(git show "$VALIDATE_HEAD:$f" | gofmt -s -l)" ]; then
|
||||
badFiles+=( "$f" )
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ${#badFiles[@]} -eq 0 ]; then
|
||||
echo 'Congratulations! All Go source files are properly formatted.'
|
||||
else
|
||||
{
|
||||
echo "These files are not properly gofmt'd:"
|
||||
for f in "${badFiles[@]}"; do
|
||||
echo " - $f"
|
||||
done
|
||||
echo
|
||||
echo 'Please reformat the above files using "gofmt -s -w" and commit the result.'
|
||||
echo
|
||||
} >&2
|
||||
false
|
||||
fi
|
30
vendor/github.com/containers/storage/hack/make/validate-lint
generated
vendored
Normal file
30
vendor/github.com/containers/storage/hack/make/validate-lint
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
#!/bin/bash
|
||||
|
||||
source "${MAKEDIR}/.validate"
|
||||
|
||||
IFS=$'\n'
|
||||
files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' || true) )
|
||||
unset IFS
|
||||
|
||||
errors=()
|
||||
for f in "${files[@]}"; do
|
||||
failedLint=$(golint "$f")
|
||||
if [ "$failedLint" ]; then
|
||||
errors+=( "$failedLint" )
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ${#errors[@]} -eq 0 ]; then
|
||||
echo 'Congratulations! All Go source files have been linted.'
|
||||
else
|
||||
{
|
||||
echo "Errors from golint:"
|
||||
for err in "${errors[@]}"; do
|
||||
echo "$err"
|
||||
done
|
||||
echo
|
||||
echo 'Please fix the above errors. You can test via "golint" and commit the result.'
|
||||
echo
|
||||
} >&2
|
||||
false
|
||||
fi
|
32
vendor/github.com/containers/storage/hack/make/validate-pkg
generated
vendored
Normal file
32
vendor/github.com/containers/storage/hack/make/validate-pkg
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
source "${MAKEDIR}/.validate"
|
||||
|
||||
IFS=$'\n'
|
||||
files=( $(validate_diff --diff-filter=ACMR --name-only -- 'pkg/*.go' || true) )
|
||||
unset IFS
|
||||
|
||||
badFiles=()
|
||||
for f in "${files[@]}"; do
|
||||
IFS=$'\n'
|
||||
badImports=( $(go list -e -f '{{ join .Deps "\n" }}' "$f" | sort -u | grep -vE '^github.com/docker/docker/pkg/' | grep -E '^github.com/docker/docker' || true) )
|
||||
unset IFS
|
||||
|
||||
for import in "${badImports[@]}"; do
|
||||
badFiles+=( "$f imports $import" )
|
||||
done
|
||||
done
|
||||
|
||||
if [ ${#badFiles[@]} -eq 0 ]; then
|
||||
echo 'Congratulations! "./pkg/..." is safely isolated from internal code.'
|
||||
else
|
||||
{
|
||||
echo 'These files import internal code: (either directly or indirectly)'
|
||||
for f in "${badFiles[@]}"; do
|
||||
echo " - $f"
|
||||
done
|
||||
echo
|
||||
} >&2
|
||||
false
|
||||
fi
|
35
vendor/github.com/containers/storage/hack/make/validate-test
generated
vendored
Normal file
35
vendor/github.com/containers/storage/hack/make/validate-test
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Make sure we're not using gos' Testing package any more in integration-cli
|
||||
|
||||
source "${MAKEDIR}/.validate"
|
||||
|
||||
IFS=$'\n'
|
||||
files=( $(validate_diff --diff-filter=ACMR --name-only -- 'integration-cli/*.go' || true) )
|
||||
unset IFS
|
||||
|
||||
badFiles=()
|
||||
for f in "${files[@]}"; do
|
||||
# skip check_test.go since it *does* use the testing package
|
||||
if [ "$f" = "integration-cli/check_test.go" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# we use "git show" here to validate that what's committed doesn't contain golang built-in testing
|
||||
if git show "$VALIDATE_HEAD:$f" | grep -q testing.T; then
|
||||
badFiles+=( "$f" )
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ${#badFiles[@]} -eq 0 ]; then
|
||||
echo 'Congratulations! No testing.T found.'
|
||||
else
|
||||
{
|
||||
echo "These files use the wrong testing infrastructure:"
|
||||
for f in "${badFiles[@]}"; do
|
||||
echo " - $f"
|
||||
done
|
||||
echo
|
||||
} >&2
|
||||
false
|
||||
fi
|
30
vendor/github.com/containers/storage/hack/make/validate-toml
generated
vendored
Normal file
30
vendor/github.com/containers/storage/hack/make/validate-toml
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
#!/bin/bash
|
||||
|
||||
source "${MAKEDIR}/.validate"
|
||||
|
||||
IFS=$'\n'
|
||||
files=( $(validate_diff --diff-filter=ACMR --name-only -- 'MAINTAINERS' || true) )
|
||||
unset IFS
|
||||
|
||||
badFiles=()
|
||||
for f in "${files[@]}"; do
|
||||
# we use "git show" here to validate that what's committed has valid toml syntax
|
||||
if ! git show "$VALIDATE_HEAD:$f" | tomlv /proc/self/fd/0 ; then
|
||||
badFiles+=( "$f" )
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ${#badFiles[@]} -eq 0 ]; then
|
||||
echo 'Congratulations! All toml source files changed here have valid syntax.'
|
||||
else
|
||||
{
|
||||
echo "These files are not valid toml:"
|
||||
for f in "${badFiles[@]}"; do
|
||||
echo " - $f"
|
||||
done
|
||||
echo
|
||||
echo 'Please reformat the above files as valid toml'
|
||||
echo
|
||||
} >&2
|
||||
false
|
||||
fi
|
27
vendor/github.com/containers/storage/hack/make/validate-vendor
generated
vendored
Normal file
27
vendor/github.com/containers/storage/hack/make/validate-vendor
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
#!/bin/bash
|
||||
|
||||
source "${MAKEDIR}/.validate"
|
||||
|
||||
IFS=$'\n'
|
||||
files=( $(validate_diff --diff-filter=ACMR --name-only -- 'hack/vendor.sh' 'hack/.vendor-helpers.sh' 'vendor/' || true) )
|
||||
unset IFS
|
||||
|
||||
if [ ${#files[@]} -gt 0 ]; then
|
||||
# We run vendor.sh to and see if we have a diff afterwards
|
||||
./hack/vendor.sh >/dev/null
|
||||
# Let see if the working directory is clean
|
||||
diffs="$(git status --porcelain -- vendor 2>/dev/null)"
|
||||
if [ "$diffs" ]; then
|
||||
{
|
||||
echo 'The result of ./hack/vendor.sh differs'
|
||||
echo
|
||||
echo "$diffs"
|
||||
echo
|
||||
echo 'Please vendor your package with ./hack/vendor.sh.'
|
||||
echo
|
||||
} >&2
|
||||
false
|
||||
else
|
||||
echo 'Congratulations! All vendoring changes are done the right way.'
|
||||
fi
|
||||
fi
|
31
vendor/github.com/containers/storage/hack/make/validate-vet
generated
vendored
Normal file
31
vendor/github.com/containers/storage/hack/make/validate-vet
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
#!/bin/bash
|
||||
|
||||
source "${MAKEDIR}/.validate"
|
||||
|
||||
IFS=$'\n'
|
||||
files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' || true) )
|
||||
unset IFS
|
||||
|
||||
errors=()
|
||||
for f in "${files[@]}"; do
|
||||
failedVet=$(go vet "$f")
|
||||
if [ "$failedVet" ]; then
|
||||
errors+=( "$failedVet" )
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
if [ ${#errors[@]} -eq 0 ]; then
|
||||
echo 'Congratulations! All Go source files have been vetted.'
|
||||
else
|
||||
{
|
||||
echo "Errors from go vet:"
|
||||
for err in "${errors[@]}"; do
|
||||
echo " - $err"
|
||||
done
|
||||
echo
|
||||
echo 'Please fix the above errors. You can test via "go vet" and commit the result.'
|
||||
echo
|
||||
} >&2
|
||||
false
|
||||
fi
|
319
vendor/github.com/containers/storage/hack/release.sh
generated
vendored
Executable file
319
vendor/github.com/containers/storage/hack/release.sh
generated
vendored
Executable file
|
@ -0,0 +1,319 @@
|
|||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# This script looks for bundles built by make.sh, and releases them on a
|
||||
# public S3 bucket.
|
||||
#
|
||||
# Bundles should be available for the VERSION string passed as argument.
|
||||
#
|
||||
# The correct way to call this script is inside a container built by the
|
||||
# official Dockerfile at the root of the Docker source code. The Dockerfile,
|
||||
# make.sh and release.sh should all be from the same source code revision.
|
||||
|
||||
set -o pipefail
|
||||
|
||||
# Print a usage message and exit.
|
||||
usage() {
|
||||
cat >&2 <<'EOF'
|
||||
To run, I need:
|
||||
- to be in a container generated by the Dockerfile at the top of the Docker
|
||||
repository;
|
||||
- to be provided with the location of an S3 bucket and path, in
|
||||
environment variables AWS_S3_BUCKET and AWS_S3_BUCKET_PATH (default: '');
|
||||
- to be provided with AWS credentials for this S3 bucket, in environment
|
||||
variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY;
|
||||
- a generous amount of good will and nice manners.
|
||||
The canonical way to run me is to run the image produced by the Dockerfile: e.g.:"
|
||||
|
||||
docker run -e AWS_S3_BUCKET=test.docker.com \
|
||||
-e AWS_ACCESS_KEY_ID \
|
||||
-e AWS_SECRET_ACCESS_KEY \
|
||||
-e AWS_DEFAULT_REGION \
|
||||
-it --privileged \
|
||||
docker ./hack/release.sh
|
||||
EOF
|
||||
exit 1
|
||||
}
|
||||
|
||||
[ "$AWS_S3_BUCKET" ] || usage
|
||||
[ "$AWS_ACCESS_KEY_ID" ] || usage
|
||||
[ "$AWS_SECRET_ACCESS_KEY" ] || usage
|
||||
[ -d /go/src/github.com/docker/docker ] || usage
|
||||
cd /go/src/github.com/docker/docker
|
||||
[ -x hack/make.sh ] || usage
|
||||
|
||||
export AWS_DEFAULT_REGION
|
||||
: ${AWS_DEFAULT_REGION:=us-west-1}
|
||||
|
||||
RELEASE_BUNDLES=(
|
||||
binary
|
||||
cross
|
||||
tgz
|
||||
)
|
||||
|
||||
if [ "$1" != '--release-regardless-of-test-failure' ]; then
|
||||
RELEASE_BUNDLES=(
|
||||
test-unit
|
||||
"${RELEASE_BUNDLES[@]}"
|
||||
test-integration-cli
|
||||
)
|
||||
fi
|
||||
|
||||
VERSION=$(< VERSION)
|
||||
BUCKET=$AWS_S3_BUCKET
|
||||
BUCKET_PATH=$BUCKET
|
||||
[[ -n "$AWS_S3_BUCKET_PATH" ]] && BUCKET_PATH+=/$AWS_S3_BUCKET_PATH
|
||||
|
||||
if command -v git &> /dev/null && git rev-parse &> /dev/null; then
|
||||
if [ -n "$(git status --porcelain --untracked-files=no)" ]; then
|
||||
echo "You cannot run the release script on a repo with uncommitted changes"
|
||||
usage
|
||||
fi
|
||||
fi
|
||||
|
||||
# These are the 2 keys we've used to sign the deb's
|
||||
# release (get.docker.com)
|
||||
# GPG_KEY="36A1D7869245C8950F966E92D8576A8BA88D21E9"
|
||||
# test (test.docker.com)
|
||||
# GPG_KEY="740B314AE3941731B942C66ADF4FD13717AAD7D6"
|
||||
|
||||
setup_s3() {
|
||||
echo "Setting up S3"
|
||||
# Try creating the bucket. Ignore errors (it might already exist).
|
||||
aws s3 mb "s3://$BUCKET" 2>/dev/null || true
|
||||
# Check access to the bucket.
|
||||
aws s3 ls "s3://$BUCKET" >/dev/null
|
||||
# Make the bucket accessible through website endpoints.
|
||||
aws s3 website --index-document index --error-document error "s3://$BUCKET"
|
||||
}
|
||||
|
||||
# write_to_s3 uploads the contents of standard input to the specified S3 url.
|
||||
write_to_s3() {
|
||||
DEST=$1
|
||||
F=`mktemp`
|
||||
cat > "$F"
|
||||
aws s3 cp --acl public-read --content-type 'text/plain' "$F" "$DEST"
|
||||
rm -f "$F"
|
||||
}
|
||||
|
||||
s3_url() {
|
||||
case "$BUCKET" in
|
||||
get.docker.com|test.docker.com|experimental.docker.com)
|
||||
echo "https://$BUCKET_PATH"
|
||||
;;
|
||||
*)
|
||||
BASE_URL="http://${BUCKET}.s3-website-${AWS_DEFAULT_REGION}.amazonaws.com"
|
||||
if [[ -n "$AWS_S3_BUCKET_PATH" ]] ; then
|
||||
echo "$BASE_URL/$AWS_S3_BUCKET_PATH"
|
||||
else
|
||||
echo "$BASE_URL"
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
build_all() {
|
||||
echo "Building release"
|
||||
if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then
|
||||
echo >&2
|
||||
echo >&2 'The build or tests appear to have failed.'
|
||||
echo >&2
|
||||
echo >&2 'You, as the release maintainer, now have a couple options:'
|
||||
echo >&2 '- delay release and fix issues'
|
||||
echo >&2 '- delay release and fix issues'
|
||||
echo >&2 '- did we mention how important this is? issues need fixing :)'
|
||||
echo >&2
|
||||
echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,'
|
||||
echo >&2 ' really knows all the hairy problems at hand with the current release'
|
||||
echo >&2 ' issues) may bypass this checking by running this script again with the'
|
||||
echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip'
|
||||
echo >&2 ' running the test suite, and will only build the binaries and packages. Please'
|
||||
echo >&2 ' avoid using this if at all possible.'
|
||||
echo >&2
|
||||
echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass'
|
||||
echo >&2 ' should be used. If there are release issues, we should always err on the'
|
||||
echo >&2 ' side of caution.'
|
||||
echo >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
upload_release_build() {
|
||||
src="$1"
|
||||
dst="$2"
|
||||
latest="$3"
|
||||
|
||||
echo
|
||||
echo "Uploading $src"
|
||||
echo " to $dst"
|
||||
echo
|
||||
aws s3 cp --follow-symlinks --acl public-read "$src" "$dst"
|
||||
if [ "$latest" ]; then
|
||||
echo
|
||||
echo "Copying to $latest"
|
||||
echo
|
||||
aws s3 cp --acl public-read "$dst" "$latest"
|
||||
fi
|
||||
|
||||
# get hash files too (see hash_files() in hack/make.sh)
|
||||
for hashAlgo in md5 sha256; do
|
||||
if [ -e "$src.$hashAlgo" ]; then
|
||||
echo
|
||||
echo "Uploading $src.$hashAlgo"
|
||||
echo " to $dst.$hashAlgo"
|
||||
echo
|
||||
aws s3 cp --follow-symlinks --acl public-read --content-type='text/plain' "$src.$hashAlgo" "$dst.$hashAlgo"
|
||||
if [ "$latest" ]; then
|
||||
echo
|
||||
echo "Copying to $latest.$hashAlgo"
|
||||
echo
|
||||
aws s3 cp --acl public-read "$dst.$hashAlgo" "$latest.$hashAlgo"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
release_build() {
|
||||
echo "Releasing binaries"
|
||||
GOOS=$1
|
||||
GOARCH=$2
|
||||
|
||||
binDir=bundles/$VERSION/cross/$GOOS/$GOARCH
|
||||
tgzDir=bundles/$VERSION/tgz/$GOOS/$GOARCH
|
||||
binary=docker-$VERSION
|
||||
zipExt=".tgz"
|
||||
binaryExt=""
|
||||
tgz=$binary$zipExt
|
||||
|
||||
latestBase=
|
||||
if [ -z "$NOLATEST" ]; then
|
||||
latestBase=docker-latest
|
||||
fi
|
||||
|
||||
# we need to map our GOOS and GOARCH to uname values
|
||||
# see https://en.wikipedia.org/wiki/Uname
|
||||
# ie, GOOS=linux -> "uname -s"=Linux
|
||||
|
||||
s3Os=$GOOS
|
||||
case "$s3Os" in
|
||||
darwin)
|
||||
s3Os=Darwin
|
||||
;;
|
||||
freebsd)
|
||||
s3Os=FreeBSD
|
||||
;;
|
||||
linux)
|
||||
s3Os=Linux
|
||||
;;
|
||||
windows)
|
||||
# this is windows use the .zip and .exe extentions for the files.
|
||||
s3Os=Windows
|
||||
zipExt=".zip"
|
||||
binaryExt=".exe"
|
||||
tgz=$binary$zipExt
|
||||
binary+=$binaryExt
|
||||
;;
|
||||
*)
|
||||
echo >&2 "error: can't convert $s3Os to an appropriate value for 'uname -s'"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
s3Arch=$GOARCH
|
||||
case "$s3Arch" in
|
||||
amd64)
|
||||
s3Arch=x86_64
|
||||
;;
|
||||
386)
|
||||
s3Arch=i386
|
||||
;;
|
||||
arm)
|
||||
s3Arch=armel
|
||||
# someday, we might potentially support multiple GOARM values, in which case we might get armhf here too
|
||||
;;
|
||||
*)
|
||||
echo >&2 "error: can't convert $s3Arch to an appropriate value for 'uname -m'"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
s3Dir="s3://$BUCKET_PATH/builds/$s3Os/$s3Arch"
|
||||
# latest=
|
||||
latestTgz=
|
||||
if [ "$latestBase" ]; then
|
||||
# commented out since we aren't uploading binaries right now.
|
||||
# latest="$s3Dir/$latestBase$binaryExt"
|
||||
# we don't include the $binaryExt because we don't want docker.exe.zip
|
||||
latestTgz="$s3Dir/$latestBase$zipExt"
|
||||
fi
|
||||
|
||||
if [ ! -f "$tgzDir/$tgz" ]; then
|
||||
echo >&2 "error: can't find $tgzDir/$tgz - was it packaged properly?"
|
||||
exit 1
|
||||
fi
|
||||
# disable binary uploads for now. Only providing tgz downloads
|
||||
# upload_release_build "$binDir/$binary" "$s3Dir/$binary" "$latest"
|
||||
upload_release_build "$tgzDir/$tgz" "$s3Dir/$tgz" "$latestTgz"
|
||||
}
|
||||
|
||||
# Upload binaries and tgz files to S3
|
||||
release_binaries() {
|
||||
[ -e "bundles/$VERSION/cross/linux/amd64/docker-$VERSION" ] || {
|
||||
echo >&2 './hack/make.sh must be run before release_binaries'
|
||||
exit 1
|
||||
}
|
||||
|
||||
for d in bundles/$VERSION/cross/*/*; do
|
||||
GOARCH="$(basename "$d")"
|
||||
GOOS="$(basename "$(dirname "$d")")"
|
||||
release_build "$GOOS" "$GOARCH"
|
||||
done
|
||||
|
||||
# TODO create redirect from builds/*/i686 to builds/*/i386
|
||||
|
||||
cat <<EOF | write_to_s3 s3://$BUCKET_PATH/builds/index
|
||||
# To install, run the following commands as root:
|
||||
curl -fsSLO $(s3_url)/builds/Linux/x86_64/docker-$VERSION.tgz && tar --strip-components=1 -xvzf docker-$VERSION.tgz -C /usr/local/bin
|
||||
|
||||
# Then start docker in daemon mode:
|
||||
/usr/local/bin/dockerd
|
||||
EOF
|
||||
|
||||
# Add redirect at /builds/info for URL-backwards-compatibility
|
||||
rm -rf /tmp/emptyfile && touch /tmp/emptyfile
|
||||
aws s3 cp --acl public-read --website-redirect '/builds/' --content-type='text/plain' /tmp/emptyfile "s3://$BUCKET_PATH/builds/info"
|
||||
|
||||
if [ -z "$NOLATEST" ]; then
|
||||
echo "Advertising $VERSION on $BUCKET_PATH as most recent version"
|
||||
echo "$VERSION" | write_to_s3 "s3://$BUCKET_PATH/latest"
|
||||
fi
|
||||
}
|
||||
|
||||
# Upload the index script
|
||||
release_index() {
|
||||
echo "Releasing index"
|
||||
url="$(s3_url)/" hack/make.sh install-script
|
||||
write_to_s3 "s3://$BUCKET_PATH/index" < "bundles/$VERSION/install-script/install.sh"
|
||||
}
|
||||
|
||||
main() {
|
||||
build_all
|
||||
setup_s3
|
||||
release_binaries
|
||||
release_index
|
||||
}
|
||||
|
||||
main
|
||||
|
||||
echo
|
||||
echo
|
||||
echo "Release complete; see $(s3_url)"
|
||||
echo "Use the following text to announce the release:"
|
||||
echo
|
||||
echo "We have just pushed $VERSION to $(s3_url). You can download it with the following:"
|
||||
echo
|
||||
echo "Linux 64bit tgz: $(s3_url)/builds/Linux/x86_64/docker-$VERSION.tgz"
|
||||
echo "Darwin/OSX 64bit client tgz: $(s3_url)/builds/Darwin/x86_64/docker-$VERSION.tgz"
|
||||
echo "Windows 64bit zip: $(s3_url)/builds/Windows/x86_64/docker-$VERSION.zip"
|
||||
echo "Windows 32bit client zip: $(s3_url)/builds/Windows/i386/docker-$VERSION.zip"
|
||||
echo
|
61
vendor/github.com/containers/storage/hack/vendor.sh
generated
vendored
Executable file
61
vendor/github.com/containers/storage/hack/vendor.sh
generated
vendored
Executable file
|
@ -0,0 +1,61 @@
|
|||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# this script is used to update vendored dependencies
|
||||
#
|
||||
# Usage:
|
||||
# vendor.sh revendor all dependencies
|
||||
# vendor.sh github.com/docker/engine-api revendor only the engine-api dependency.
|
||||
# vendor.sh github.com/docker/engine-api v0.3.3 vendor only engine-api at the specified tag/commit.
|
||||
# vendor.sh git github.com/docker/engine-api v0.3.3 is the same but specifies the VCS for cases where the VCS is something else than git
|
||||
# vendor.sh git golang.org/x/sys eb2c74142fd19a79b3f237334c7384d5167b1b46 https://github.com/golang/sys.git vendor only golang.org/x/sys downloading from the specified URL
|
||||
|
||||
cd "$(dirname "$BASH_SOURCE")/.."
|
||||
source 'hack/.vendor-helpers.sh'
|
||||
|
||||
case $# in
|
||||
0)
|
||||
rm -rf vendor/
|
||||
;;
|
||||
# If user passed arguments to the script
|
||||
1)
|
||||
eval "$(grep -E "^clone [^ ]+ $1" "$0")"
|
||||
clean
|
||||
exit 0
|
||||
;;
|
||||
2)
|
||||
rm -rf "vendor/src/$1"
|
||||
clone git "$1" "$2"
|
||||
clean
|
||||
exit 0
|
||||
;;
|
||||
[34])
|
||||
rm -rf "vendor/src/$2"
|
||||
clone "$@"
|
||||
clean
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
>&2 echo "error: unexpected parameters"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# the following lines are in sorted order, FYI
|
||||
clone git github.com/Microsoft/hcsshim v0.3.6
|
||||
clone git github.com/Microsoft/go-winio v0.3.4
|
||||
clone git github.com/Sirupsen/logrus v0.10.0 # logrus is a common dependency among multiple deps
|
||||
# forked golang.org/x/net package includes a patch for lazy loading trace templates
|
||||
clone git golang.org/x/net 2beffdc2e92c8a3027590f898fe88f69af48a3f8 https://github.com/tonistiigi/net.git
|
||||
clone git golang.org/x/sys eb2c74142fd19a79b3f237334c7384d5167b1b46 https://github.com/golang/sys.git
|
||||
clone git github.com/docker/go-units 651fc226e7441360384da338d0fd37f2440ffbe3
|
||||
clone git github.com/docker/go-connections fa2850ff103453a9ad190da0df0af134f0314b3d
|
||||
clone git github.com/docker/engine-api 1d247454d4307fb1ddf10d09fd2996394b085904
|
||||
# get graph and distribution packages
|
||||
clone git github.com/vbatts/tar-split v0.9.13
|
||||
# get go-zfs packages
|
||||
clone git github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa
|
||||
clone git github.com/pborman/uuid v1.0
|
||||
clone git github.com/opencontainers/runc cc29e3dded8e27ba8f65738f40d251c885030a28 # libcontainer
|
||||
|
||||
clean
|
Loading…
Add table
Add a link
Reference in a new issue