vendor: remove dep and use vndr
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
This commit is contained in:
parent
16f44674a4
commit
148e72d81e
16131 changed files with 73815 additions and 4235138 deletions
3
vendor/github.com/containers/storage/.dockerignore
generated
vendored
3
vendor/github.com/containers/storage/.dockerignore
generated
vendored
|
@ -1,3 +0,0 @@
|
|||
bundles
|
||||
.gopath
|
||||
vendor/pkg
|
32
vendor/github.com/containers/storage/.gitignore
generated
vendored
32
vendor/github.com/containers/storage/.gitignore
generated
vendored
|
@ -1,32 +0,0 @@
|
|||
# Docker project generated files to ignore
|
||||
# if you want to ignore files created by your editor/tools,
|
||||
# please consider a global .gitignore https://help.github.com/articles/ignoring-files
|
||||
*.exe
|
||||
*.exe~
|
||||
*.orig
|
||||
*.test
|
||||
.*.swp
|
||||
.DS_Store
|
||||
# a .bashrc may be added to customize the build environment
|
||||
.bashrc
|
||||
.gopath/
|
||||
autogen/
|
||||
bundles/
|
||||
cmd/dockerd/dockerd
|
||||
cmd/docker/docker
|
||||
dockerversion/version_autogen.go
|
||||
docs/AWS_S3_BUCKET
|
||||
docs/GITCOMMIT
|
||||
docs/GIT_BRANCH
|
||||
docs/VERSION
|
||||
docs/_build
|
||||
docs/_static
|
||||
docs/_templates
|
||||
docs/changed-files
|
||||
# generated by man/md2man-all.sh
|
||||
man/man1
|
||||
man/man5
|
||||
man/man8
|
||||
vendor/pkg/
|
||||
.vagrant
|
||||
storageversion/version_autogen.go
|
254
vendor/github.com/containers/storage/.mailmap
generated
vendored
254
vendor/github.com/containers/storage/.mailmap
generated
vendored
|
@ -1,254 +0,0 @@
|
|||
# Generate AUTHORS: hack/generate-authors.sh
|
||||
|
||||
# Tip for finding duplicates (besides scanning the output of AUTHORS for name
|
||||
# duplicates that aren't also email duplicates): scan the output of:
|
||||
# git log --format='%aE - %aN' | sort -uf
|
||||
#
|
||||
# For explanation on this file format: man git-shortlog
|
||||
|
||||
Patrick Stapleton <github@gdi2290.com>
|
||||
Shishir Mahajan <shishir.mahajan@redhat.com> <smahajan@redhat.com>
|
||||
Erwin van der Koogh <info@erronis.nl>
|
||||
Ahmed Kamal <email.ahmedkamal@googlemail.com>
|
||||
Tejesh Mehta <tejesh.mehta@gmail.com> <tj@init.me>
|
||||
Cristian Staretu <cristian.staretu@gmail.com>
|
||||
Cristian Staretu <cristian.staretu@gmail.com> <unclejacksons@gmail.com>
|
||||
Cristian Staretu <cristian.staretu@gmail.com> <unclejack@users.noreply.github.com>
|
||||
Marcus Linke <marcus.linke@gmx.de>
|
||||
Aleksandrs Fadins <aleks@s-ko.net>
|
||||
Christopher Latham <sudosurootdev@gmail.com>
|
||||
Hu Keping <hukeping@huawei.com>
|
||||
Wayne Chang <wayne@neverfear.org>
|
||||
Chen Chao <cc272309126@gmail.com>
|
||||
Daehyeok Mun <daehyeok@gmail.com>
|
||||
<daehyeok@gmail.com> <daehyeok@daehyeokui-MacBook-Air.local>
|
||||
<jt@yadutaf.fr> <admin@jtlebi.fr>
|
||||
<jeff@docker.com> <jefferya@programmerq.net>
|
||||
<charles.hooper@dotcloud.com> <chooper@plumata.com>
|
||||
<daniel.mizyrycki@dotcloud.com> <daniel@dotcloud.com>
|
||||
<daniel.mizyrycki@dotcloud.com> <mzdaniel@glidelink.net>
|
||||
Guillaume J. Charmes <guillaume.charmes@docker.com> <charmes.guillaume@gmail.com>
|
||||
<guillaume.charmes@docker.com> <guillaume@dotcloud.com>
|
||||
<guillaume.charmes@docker.com> <guillaume@docker.com>
|
||||
<guillaume.charmes@docker.com> <guillaume.charmes@dotcloud.com>
|
||||
<guillaume.charmes@docker.com> <guillaume@charmes.net>
|
||||
<kencochrane@gmail.com> <KenCochrane@gmail.com>
|
||||
Thatcher Peskens <thatcher@docker.com>
|
||||
Thatcher Peskens <thatcher@docker.com> <thatcher@dotcloud.com>
|
||||
Thatcher Peskens <thatcher@docker.com> dhrp <thatcher@gmx.net>
|
||||
Jérôme Petazzoni <jerome.petazzoni@dotcloud.com> jpetazzo <jerome.petazzoni@dotcloud.com>
|
||||
Jérôme Petazzoni <jerome.petazzoni@dotcloud.com> <jp@enix.org>
|
||||
Joffrey F <joffrey@docker.com>
|
||||
Joffrey F <joffrey@docker.com> <joffrey@dotcloud.com>
|
||||
Joffrey F <joffrey@docker.com> <f.joffrey@gmail.com>
|
||||
Tim Terhorst <mynamewastaken+git@gmail.com>
|
||||
Andy Smith <github@anarkystic.com>
|
||||
<kalessin@kalessin.fr> <louis@dotcloud.com>
|
||||
<victor.vieux@docker.com> <victor.vieux@dotcloud.com>
|
||||
<victor.vieux@docker.com> <victor@dotcloud.com>
|
||||
<victor.vieux@docker.com> <dev@vvieux.com>
|
||||
<victor.vieux@docker.com> <victor@docker.com>
|
||||
<victor.vieux@docker.com> <vieux@docker.com>
|
||||
<victor.vieux@docker.com> <victorvieux@gmail.com>
|
||||
<dominik@honnef.co> <dominikh@fork-bomb.org>
|
||||
<ehanchrow@ine.com> <eric.hanchrow@gmail.com>
|
||||
Walter Stanish <walter@pratyeka.org>
|
||||
<daniel@gasienica.ch> <dgasienica@zynga.com>
|
||||
Roberto Hashioka <roberto_hashioka@hotmail.com>
|
||||
Konstantin Pelykh <kpelykh@zettaset.com>
|
||||
David Sissitka <me@dsissitka.com>
|
||||
Nolan Darilek <nolan@thewordnerd.info>
|
||||
<mastahyeti@gmail.com> <mastahyeti@users.noreply.github.com>
|
||||
Benoit Chesneau <bchesneau@gmail.com>
|
||||
Jordan Arentsen <blissdev@gmail.com>
|
||||
Daniel Garcia <daniel@danielgarcia.info>
|
||||
Miguel Angel Fernández <elmendalerenda@gmail.com>
|
||||
Bhiraj Butala <abhiraj.butala@gmail.com>
|
||||
Faiz Khan <faizkhan00@gmail.com>
|
||||
Victor Lyuboslavsky <victor@victoreda.com>
|
||||
Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
|
||||
Matthew Mueller <mattmuelle@gmail.com>
|
||||
<mosoni@ebay.com> <mohitsoni1989@gmail.com>
|
||||
Shih-Yuan Lee <fourdollars@gmail.com>
|
||||
Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com> root <root@vagrant-ubuntu-12.10.vagrantup.com>
|
||||
Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
|
||||
<proppy@google.com> <proppy@aminche.com>
|
||||
<michael@docker.com> <michael@crosbymichael.com>
|
||||
<michael@docker.com> <crosby.michael@gmail.com>
|
||||
<michael@docker.com> <crosbymichael@gmail.com>
|
||||
<github@developersupport.net> <github@metaliveblog.com>
|
||||
<brandon@ifup.org> <brandon@ifup.co>
|
||||
<dano@spotify.com> <daniel.norberg@gmail.com>
|
||||
<danny@codeaholics.org> <Danny.Yates@mailonline.co.uk>
|
||||
<gurjeet@singh.im> <singh.gurjeet@gmail.com>
|
||||
<shawn@churchofgit.com> <shawnlandden@gmail.com>
|
||||
<sjoerd-github@linuxonly.nl> <sjoerd@byte.nl>
|
||||
<solomon@docker.com> <solomon.hykes@dotcloud.com>
|
||||
<solomon@docker.com> <solomon@dotcloud.com>
|
||||
<solomon@docker.com> <s@docker.com>
|
||||
Sven Dowideit <SvenDowideit@home.org.au>
|
||||
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@fosiki.com>
|
||||
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@docker.com>
|
||||
Sven Dowideit <SvenDowideit@home.org.au> <¨SvenDowideit@home.org.au¨>
|
||||
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@home.org.au>
|
||||
Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@users.noreply.github.com>
|
||||
Sven Dowideit <SvenDowideit@home.org.au> <sven@t440s.home.gateway>
|
||||
<alexl@redhat.com> <alexander.larsson@gmail.com>
|
||||
Alexander Morozov <lk4d4@docker.com> <lk4d4math@gmail.com>
|
||||
Alexander Morozov <lk4d4@docker.com>
|
||||
<git.nivoc@neverbox.com> <kuehnle@online.de>
|
||||
O.S. Tezer <ostezer@gmail.com>
|
||||
<ostezer@gmail.com> <ostezer@users.noreply.github.com>
|
||||
Roberto G. Hashioka <roberto.hashioka@docker.com> <roberto_hashioka@hotmail.com>
|
||||
<justin.p.simonelis@gmail.com> <justin.simonelis@PTS-JSIMON2.toronto.exclamation.com>
|
||||
<taim@bosboot.org> <maztaim@users.noreply.github.com>
|
||||
<viktor.vojnovski@amadeus.com> <vojnovski@gmail.com>
|
||||
<vbatts@redhat.com> <vbatts@hashbangbash.com>
|
||||
<altsysrq@gmail.com> <iamironbob@gmail.com>
|
||||
Sridhar Ratnakumar <sridharr@activestate.com>
|
||||
Sridhar Ratnakumar <sridharr@activestate.com> <github@srid.name>
|
||||
Liang-Chi Hsieh <viirya@gmail.com>
|
||||
Aleksa Sarai <asarai@suse.de>
|
||||
Aleksa Sarai <asarai@suse.de> <asarai@suse.com>
|
||||
Aleksa Sarai <asarai@suse.de> <cyphar@cyphar.com>
|
||||
Will Weaver <monkey@buildingbananas.com>
|
||||
Timothy Hobbs <timothyhobbs@seznam.cz>
|
||||
Nathan LeClaire <nathan.leclaire@docker.com> <nathan.leclaire@gmail.com>
|
||||
Nathan LeClaire <nathan.leclaire@docker.com> <nathanleclaire@gmail.com>
|
||||
<github@hollensbe.org> <erik+github@hollensbe.org>
|
||||
<github@albersweb.de> <albers@users.noreply.github.com>
|
||||
<lsm5@fedoraproject.org> <lsm5@redhat.com>
|
||||
<marc@marc-abramowitz.com> <msabramo@gmail.com>
|
||||
Matthew Heon <mheon@redhat.com> <mheon@mheonlaptop.redhat.com>
|
||||
<bernat@luffy.cx> <vincent@bernat.im>
|
||||
<bernat@luffy.cx> <Vincent.Bernat@exoscale.ch>
|
||||
<p@pwaller.net> <peter@scraperwiki.com>
|
||||
<andrew.weiss@outlook.com> <andrew.weiss@microsoft.com>
|
||||
Francisco Carriedo <fcarriedo@gmail.com>
|
||||
<julienbordellier@gmail.com> <git@julienbordellier.com>
|
||||
<ahmetb@microsoft.com> <ahmetalpbalkan@gmail.com>
|
||||
<arnaud.porterie@docker.com> <icecrime@gmail.com>
|
||||
<baloo@gandi.net> <superbaloo+registrations.github@superbaloo.net>
|
||||
Brian Goff <cpuguy83@gmail.com>
|
||||
<cpuguy83@gmail.com> <bgoff@cpuguy83-mbp.home>
|
||||
<eric@windisch.us> <ewindisch@docker.com>
|
||||
<frank.rosquin+github@gmail.com> <frank.rosquin@gmail.com>
|
||||
Hollie Teal <hollie@docker.com>
|
||||
<hollie@docker.com> <hollie.teal@docker.com>
|
||||
<hollie@docker.com> <hollietealok@users.noreply.github.com>
|
||||
<huu@prismskylabs.com> <whoshuu@gmail.com>
|
||||
Jessica Frazelle <jess@mesosphere.com>
|
||||
Jessica Frazelle <jess@mesosphere.com> <jfrazelle@users.noreply.github.com>
|
||||
Jessica Frazelle <jess@mesosphere.com> <acidburn@docker.com>
|
||||
Jessica Frazelle <jess@mesosphere.com> <jess@docker.com>
|
||||
Jessica Frazelle <jess@mesosphere.com> <princess@docker.com>
|
||||
<konrad.wilhelm.kleine@gmail.com> <kwk@users.noreply.github.com>
|
||||
<tintypemolly@gmail.com> <tintypemolly@Ohui-MacBook-Pro.local>
|
||||
<estesp@linux.vnet.ibm.com> <estesp@gmail.com>
|
||||
<github@gone.nl> <thaJeztah@users.noreply.github.com>
|
||||
Thomas LEVEIL <thomasleveil@gmail.com> Thomas LÉVEIL <thomasleveil@users.noreply.github.com>
|
||||
<oi@truffles.me.uk> <timruffles@googlemail.com>
|
||||
<Vincent.Bernat@exoscale.ch> <bernat@luffy.cx>
|
||||
Antonio Murdaca <antonio.murdaca@gmail.com> <amurdaca@redhat.com>
|
||||
Antonio Murdaca <antonio.murdaca@gmail.com> <runcom@redhat.com>
|
||||
Antonio Murdaca <antonio.murdaca@gmail.com> <me@runcom.ninja>
|
||||
Antonio Murdaca <antonio.murdaca@gmail.com> <runcom@linux.com>
|
||||
Antonio Murdaca <antonio.murdaca@gmail.com> <runcom@users.noreply.github.com>
|
||||
Darren Shepherd <darren.s.shepherd@gmail.com> <darren@rancher.com>
|
||||
Deshi Xiao <dxiao@redhat.com> <dsxiao@dataman-inc.com>
|
||||
Deshi Xiao <dxiao@redhat.com> <xiaods@gmail.com>
|
||||
Doug Davis <dug@us.ibm.com> <duglin@users.noreply.github.com>
|
||||
Jacob Atzen <jacob@jacobatzen.dk> <jatzen@gmail.com>
|
||||
Jeff Nickoloff <jeff.nickoloff@gmail.com> <jeff@allingeek.com>
|
||||
John Howard (VM) <John.Howard@microsoft.com> <jhowardmsft@users.noreply.github.com>
|
||||
John Howard (VM) <John.Howard@microsoft.com>
|
||||
John Howard (VM) <John.Howard@microsoft.com> <john.howard@microsoft.com>
|
||||
John Howard (VM) <John.Howard@microsoft.com> <jhoward@microsoft.com>
|
||||
Madhu Venugopal <madhu@socketplane.io> <madhu@docker.com>
|
||||
Mary Anthony <mary.anthony@docker.com> <mary@docker.com>
|
||||
Mary Anthony <mary.anthony@docker.com> moxiegirl <mary@docker.com>
|
||||
Mary Anthony <mary.anthony@docker.com> <moxieandmore@gmail.com>
|
||||
mattyw <mattyw@me.com> <gh@mattyw.net>
|
||||
resouer <resouer@163.com> <resouer@gmail.com>
|
||||
AJ Bowen <aj@gandi.net> soulshake <amy@gandi.net>
|
||||
AJ Bowen <aj@gandi.net> soulshake <aj@gandi.net>
|
||||
Tibor Vass <teabee89@gmail.com> <tibor@docker.com>
|
||||
Tibor Vass <teabee89@gmail.com> <tiborvass@users.noreply.github.com>
|
||||
Vincent Bernat <bernat@luffy.cx> <Vincent.Bernat@exoscale.ch>
|
||||
Yestin Sun <sunyi0804@gmail.com> <yestin.sun@polyera.com>
|
||||
bin liu <liubin0329@users.noreply.github.com> <liubin0329@gmail.com>
|
||||
John Howard (VM) <John.Howard@microsoft.com> jhowardmsft <jhoward@microsoft.com>
|
||||
Ankush Agarwal <ankushagarwal11@gmail.com> <ankushagarwal@users.noreply.github.com>
|
||||
Tangi COLIN <tangicolin@gmail.com> tangicolin <tangicolin@gmail.com>
|
||||
Allen Sun <allen.sun@daocloud.io>
|
||||
Adrien Gallouët <adrien@gallouet.fr> <angt@users.noreply.github.com>
|
||||
<aanm90@gmail.com> <martins@noironetworks.com>
|
||||
Anuj Bahuguna <anujbahuguna.dev@gmail.com>
|
||||
Anusha Ragunathan <anusha.ragunathan@docker.com> <anusha@docker.com>
|
||||
Avi Miller <avi.miller@oracle.com> <avi.miller@gmail.com>
|
||||
Brent Salisbury <brent.salisbury@docker.com> <brent@docker.com>
|
||||
Chander G <chandergovind@gmail.com>
|
||||
Chun Chen <ramichen@tencent.com> <chenchun.feed@gmail.com>
|
||||
Ying Li <cyli@twistedmatrix.com>
|
||||
Daehyeok Mun <daehyeok@gmail.com> <daehyeok@daehyeok-ui-MacBook-Air.local>
|
||||
<dqminh@cloudflare.com> <dqminh89@gmail.com>
|
||||
Daniel, Dao Quang Minh <dqminh@cloudflare.com>
|
||||
Daniel Nephin <dnephin@docker.com> <dnephin@gmail.com>
|
||||
Dave Tucker <dt@docker.com> <dave@dtucker.co.uk>
|
||||
Doug Tangren <d.tangren@gmail.com>
|
||||
Frederick F. Kautz IV <fkautz@redhat.com> <fkautz@alumni.cmu.edu>
|
||||
Ben Golub <ben.golub@dotcloud.com>
|
||||
Harold Cooper <hrldcpr@gmail.com>
|
||||
hsinko <21551195@zju.edu.cn> <hsinko@users.noreply.github.com>
|
||||
Josh Hawn <josh.hawn@docker.com> <jlhawn@berkeley.edu>
|
||||
Justin Cormack <justin.cormack@docker.com>
|
||||
<justin.cormack@docker.com> <justin.cormack@unikernel.com>
|
||||
<justin.cormack@docker.com> <justin@specialbusservice.com>
|
||||
Kamil Domański <kamil@domanski.co>
|
||||
Lei Jitang <leijitang@huawei.com>
|
||||
<leijitang@huawei.com> <leijitang@gmail.com>
|
||||
Linus Heckemann <lheckemann@twig-world.com>
|
||||
<lheckemann@twig-world.com> <anonymouse2048@gmail.com>
|
||||
Lynda O'Leary <lyndaoleary29@gmail.com>
|
||||
<lyndaoleary29@gmail.com> <lyndaoleary@hotmail.com>
|
||||
Marianna Tessel <mtesselh@gmail.com>
|
||||
Michael Huettermann <michael@huettermann.net>
|
||||
Moysés Borges <moysesb@gmail.com>
|
||||
<moysesb@gmail.com> <moyses.furtado@wplex.com.br>
|
||||
Nigel Poulton <nigelpoulton@hotmail.com>
|
||||
Qiang Huang <h.huangqiang@huawei.com>
|
||||
<h.huangqiang@huawei.com> <qhuang@10.0.2.15>
|
||||
Boaz Shuster <ripcurld.github@gmail.com>
|
||||
Shuwei Hao <haosw@cn.ibm.com>
|
||||
<haosw@cn.ibm.com> <haoshuwei24@gmail.com>
|
||||
Soshi Katsuta <soshi.katsuta@gmail.com>
|
||||
<soshi.katsuta@gmail.com> <katsuta_soshi@cyberagent.co.jp>
|
||||
Stefan Berger <stefanb@linux.vnet.ibm.com>
|
||||
<stefanb@linux.vnet.ibm.com> <stefanb@us.ibm.com>
|
||||
Stephen Day <stephen.day@docker.com>
|
||||
<stephen.day@docker.com> <stevvooe@users.noreply.github.com>
|
||||
Toli Kuznets <toli@docker.com>
|
||||
Tristan Carel <tristan@cogniteev.com>
|
||||
<tristan@cogniteev.com> <tristan.carel@gmail.com>
|
||||
Vincent Demeester <vincent@sbr.pm>
|
||||
<vincent@sbr.pm> <vincent+github@demeester.fr>
|
||||
Vishnu Kannan <vishnuk@google.com>
|
||||
xlgao-zju <xlgao@zju.edu.cn> xlgao <xlgao@zju.edu.cn>
|
||||
yuchangchun <yuchangchun1@huawei.com> y00277921 <yuchangchun1@huawei.com>
|
||||
<zij@case.edu> <zjaffee@us.ibm.com>
|
||||
<anujbahuguna.dev@gmail.com> <abahuguna@fiberlink.com>
|
||||
<eungjun.yi@navercorp.com> <semtlenori@gmail.com>
|
||||
<haosw@cn.ibm.com> <haoshuwei1989@163.com>
|
||||
Hao Shu Wei <haosw@cn.ibm.com>
|
||||
<matt.bentley@docker.com> <mbentley@mbentley.net>
|
||||
<MihaiBorob@gmail.com> <MihaiBorobocea@gmail.com>
|
||||
<redmond.martin@gmail.com> <xgithub@redmond5.com>
|
||||
<redmond.martin@gmail.com> <martin@tinychat.com>
|
||||
<srbrahma@us.ibm.com> <sbrahma@us.ibm.com>
|
||||
<suda.akihiro@lab.ntt.co.jp> <suda.kyoto@gmail.com>
|
||||
<thomas@gazagnaire.org> <thomas@gazagnaire.com>
|
||||
Shengbo Song <thomassong@tencent.com> mYmNeo <mymneo@163.com>
|
||||
Shengbo Song <thomassong@tencent.com>
|
||||
<sylvain@ascribe.io> <sylvain.bellemare@ezeep.com>
|
||||
Sylvain Bellemare <sylvain@ascribe.io>
|
||||
|
19
vendor/github.com/containers/storage/.tool/lint
generated
vendored
19
vendor/github.com/containers/storage/.tool/lint
generated
vendored
|
@ -1,19 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
for d in $(find . -type d -not -iwholename '*.git*' -a -not -iname '.tool' -a -not -iwholename '*vendor*'); do
|
||||
gometalinter \
|
||||
--exclude='error return value not checked.*(Close|Log|Print).*\(errcheck\)$' \
|
||||
--exclude='.*_test\.go:.*error return value not checked.*\(errcheck\)$' \
|
||||
--exclude='duplicate of.*_test.go.*\(dupl\)$' \
|
||||
--disable=aligncheck \
|
||||
--disable=gotype \
|
||||
--disable=gas \
|
||||
--cyclo-over=50 \
|
||||
--dupl-threshold=100 \
|
||||
--tests \
|
||||
--deadline=30s "${d}"
|
||||
done
|
18
vendor/github.com/containers/storage/.travis.yml
generated
vendored
18
vendor/github.com/containers/storage/.travis.yml
generated
vendored
|
@ -1,18 +0,0 @@
|
|||
language: go
|
||||
go:
|
||||
- tip
|
||||
- 1.7
|
||||
- 1.6
|
||||
dist: trusty
|
||||
sudo: required
|
||||
before_install:
|
||||
- sudo apt-get -qq update
|
||||
- sudo apt-get -qq install btrfs-tools libdevmapper-dev
|
||||
script:
|
||||
- make install.tools
|
||||
- ./hack/make.sh validate-gofmt validate-pkg validate-lint validate-test validate-toml validate-vet
|
||||
- make .gitvalidation
|
||||
- make build-binary
|
||||
- ./hack/make.sh cross
|
||||
- sudo -E env "PATH=${PATH}" ./hack/make.sh test-unit
|
||||
- make docs
|
1522
vendor/github.com/containers/storage/AUTHORS
generated
vendored
1522
vendor/github.com/containers/storage/AUTHORS
generated
vendored
File diff suppressed because it is too large
Load diff
85
vendor/github.com/containers/storage/Makefile
generated
vendored
85
vendor/github.com/containers/storage/Makefile
generated
vendored
|
@ -1,85 +0,0 @@
|
|||
.PHONY: all binary build build-binary build-gccgo bundles cross default docs gccgo test test-integration-cli test-unit validate help win tgz
|
||||
|
||||
# set the graph driver as the current graphdriver if not set
|
||||
DRIVER := $(if $(STORAGE_DRIVER),$(STORAGE_DRIVER),$(if $(DOCKER_GRAPHDRIVER),DOCKER_GRAPHDRIVER),$(shell docker info 2>&1 | grep "Storage Driver" | sed 's/.*: //'))
|
||||
|
||||
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||
GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g")
|
||||
EPOCH_TEST_COMMIT := 0418ebf59f9e1f564831c0ba9378b7f8e40a1c73
|
||||
SYSTEM_GOPATH := ${GOPATH}
|
||||
|
||||
RUNINVM := vagrant/runinvm.sh
|
||||
|
||||
default all: build ## validate all checks, build linux binaries, run all tests\ncross build non-linux binaries and generate archives\nusing VMs
|
||||
$(RUNINVM) hack/make.sh
|
||||
|
||||
build build-binary: bundles ## build using go on the host
|
||||
hack/make.sh binary
|
||||
|
||||
build-gccgo: bundles ## build using gccgo on the host
|
||||
hack/make.sh gccgo
|
||||
|
||||
binary: bundles
|
||||
$(RUNINVM) hack/make.sh binary
|
||||
|
||||
bundles:
|
||||
mkdir -p bundles
|
||||
|
||||
cross: build ## cross build the binaries for darwin, freebsd and windows\nusing VMs
|
||||
$(RUNINVM) hack/make.sh binary cross
|
||||
|
||||
win: build ## cross build the binary for windows using VMs
|
||||
$(RUNINVM) hack/make.sh win
|
||||
|
||||
tgz: build ## build the archives (.zip on windows and .tgz otherwise)\ncontaining the binaries on the host
|
||||
hack/make.sh binary cross tgz
|
||||
|
||||
docs: ## build the docs on the host
|
||||
$(MAKE) -C docs docs
|
||||
|
||||
gccgo: build-gccgo ## build the gcc-go linux binaries using VMs
|
||||
$(RUNINVM) hack/make.sh gccgo
|
||||
|
||||
test: build ## run the unit and integration tests using VMs
|
||||
$(RUNINVM) hack/make.sh binary cross test-unit
|
||||
|
||||
test-unit: build ## run the unit tests using VMs
|
||||
$(RUNINVM) hack/make.sh test-unit
|
||||
|
||||
validate: build ## validate DCO, Seccomp profile generation, gofmt,\n./pkg/ isolation, golint, tests, tomls, go vet and vendor\nusing VMs
|
||||
$(RUNINVM) hack/make.sh validate-dco validate-gofmt validate-pkg validate-lint validate-test validate-toml validate-vet
|
||||
|
||||
lint:
|
||||
@which gometalinter > /dev/null 2>/dev/null || (echo "ERROR: gometalinter not found. Consider 'make install.tools' target" && false)
|
||||
@echo "checking lint"
|
||||
@./.tool/lint
|
||||
|
||||
.PHONY: .gitvalidation
|
||||
# When this is running in travis, it will only check the travis commit range
|
||||
.gitvalidation:
|
||||
@which git-validation > /dev/null 2>/dev/null || (echo "ERROR: git-validation not found. Consider 'make install.tools' target" && false)
|
||||
ifeq ($(TRAVIS_EVENT_TYPE),pull_request)
|
||||
git-validation -q -run DCO,short-subject
|
||||
else ifeq ($(TRAVIS_EVENT_TYPE),push)
|
||||
git-validation -q -run DCO,short-subject -no-travis -range $(EPOCH_TEST_COMMIT)..$(TRAVIS_BRANCH)
|
||||
else
|
||||
git-validation -q -run DCO,short-subject -range $(EPOCH_TEST_COMMIT)..HEAD
|
||||
endif
|
||||
|
||||
.PHONY: install.tools
|
||||
|
||||
install.tools: .install.gitvalidation .install.gometalinter .install.md2man
|
||||
|
||||
.install.gitvalidation:
|
||||
GOPATH=${SYSTEM_GOPATH} go get github.com/vbatts/git-validation
|
||||
|
||||
.install.gometalinter:
|
||||
GOPATH=${SYSTEM_GOPATH} go get github.com/alecthomas/gometalinter
|
||||
GOPATH=${SYSTEM_GOPATH} gometalinter --install
|
||||
|
||||
.install.md2man:
|
||||
GOPATH=${SYSTEM_GOPATH} go get github.com/cpuguy83/go-md2man
|
||||
|
||||
help: ## this help
|
||||
@awk 'BEGIN {FS = ":.*?## "} /^[a-z A-Z_-]+:.*?## / {gsub(" ",",",$$1);gsub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-21s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
|
||||
|
1
vendor/github.com/containers/storage/VERSION
generated
vendored
1
vendor/github.com/containers/storage/VERSION
generated
vendored
|
@ -1 +0,0 @@
|
|||
0.1-dev
|
25
vendor/github.com/containers/storage/Vagrantfile
generated
vendored
25
vendor/github.com/containers/storage/Vagrantfile
generated
vendored
|
@ -1,25 +0,0 @@
|
|||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
#
|
||||
# The fedora/25-cloud-base and debian/jessie64 boxes are also available for
|
||||
# the "virtualbox" provider. Set the VAGRANT_PROVIDER environment variable to
|
||||
# "virtualbox" to use them instead.
|
||||
#
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.define "fedora" do |c|
|
||||
c.vm.box = "fedora/25-cloud-base"
|
||||
c.vm.synced_folder ".", "/vagrant", type: "rsync",
|
||||
rsync__exclude: "bundles", rsync__args: "-vadz"
|
||||
c.vm.provision "shell", inline: <<-SHELL
|
||||
sudo /vagrant/vagrant/provision.sh
|
||||
SHELL
|
||||
end
|
||||
config.vm.define "debian" do |c|
|
||||
c.vm.box = "debian/jessie64"
|
||||
c.vm.synced_folder ".", "/vagrant", type: "rsync",
|
||||
rsync__exclude: "bundles", rsync__args: "-vadz"
|
||||
c.vm.provision "shell", inline: <<-SHELL
|
||||
sudo /vagrant/vagrant/provision.sh
|
||||
SHELL
|
||||
end
|
||||
end
|
31
vendor/github.com/containers/storage/cmd/oci-storage/README.md
generated
vendored
31
vendor/github.com/containers/storage/cmd/oci-storage/README.md
generated
vendored
|
@ -1,31 +0,0 @@
|
|||
This is `oci-storage`, a command line tool for manipulating a layer store.
|
||||
|
||||
It depends on `storage`, which is a pretty barebones wrapping of the
|
||||
graph drivers that exposes the create/mount/unmount/delete operations
|
||||
and adds enough bookkeeping to know about the relationships between
|
||||
layers.
|
||||
|
||||
On top of that, `storage` provides a notion of a reference to a layer
|
||||
which is paired with arbitrary user data (i.e., an `image`, that data
|
||||
being history and configuration metadata). It also provides a notion of
|
||||
a type of layer, which is typically the child of an image's topmost
|
||||
layer, to which arbitrary data is directly attached (i.e., a
|
||||
`container`, where the data is typically configuration).
|
||||
|
||||
Layers, images, and containers are each identified using IDs which can
|
||||
be set when they are created (if not set, random values are generated),
|
||||
and can optionally be assigned names which are resolved to IDs
|
||||
automatically by the various APIs.
|
||||
|
||||
The oci-storage tool is a CLI that wraps that as thinly as possible, so
|
||||
that other tooling can use it to import layers from images. Those other
|
||||
tools can then either manage the concept of images on their own, or let
|
||||
the API/CLI handle storing the image metadata and/or configuration.
|
||||
Likewise, other tools can create container layers and manage them on
|
||||
their own or use the API/CLI for storing what I assume will be container
|
||||
metadata and/or configurations.
|
||||
|
||||
Logic for importing images and creating and managing containers will
|
||||
most likely be implemented elsewhere, and if that implementation ends up
|
||||
not needing the API/CLI to provide a place to store data about images
|
||||
and containers, that functionality can be dropped.
|
205
vendor/github.com/containers/storage/cmd/oci-storage/container.go
generated
vendored
205
vendor/github.com/containers/storage/cmd/oci-storage/container.go
generated
vendored
|
@ -1,205 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/mflag"
|
||||
)
|
||||
|
||||
var (
|
||||
paramContainerDataFile = ""
|
||||
)
|
||||
|
||||
func container(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
images, err := m.Images()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
matches := []*storage.Container{}
|
||||
for _, arg := range args {
|
||||
if container, err := m.Container(arg); err == nil {
|
||||
matches = append(matches, container)
|
||||
}
|
||||
}
|
||||
if jsonOutput {
|
||||
json.NewEncoder(os.Stdout).Encode(matches)
|
||||
} else {
|
||||
for _, container := range matches {
|
||||
fmt.Printf("ID: %s\n", container.ID)
|
||||
for _, name := range container.Names {
|
||||
fmt.Printf("Name: %s\n", name)
|
||||
}
|
||||
fmt.Printf("Image: %s\n", container.ImageID)
|
||||
for _, image := range images {
|
||||
if image.ID == container.ImageID {
|
||||
for _, name := range image.Names {
|
||||
fmt.Printf("Image name: %s\n", name)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
fmt.Printf("Layer: %s\n", container.LayerID)
|
||||
for _, name := range container.BigDataNames {
|
||||
fmt.Printf("Data: %s\n", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(matches) != len(args) {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func listContainerBigData(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
container, err := m.Container(args[0])
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
d, err := m.ListContainerBigData(container.ID)
|
||||
if jsonOutput {
|
||||
json.NewEncoder(os.Stdout).Encode(d)
|
||||
} else {
|
||||
for _, name := range d {
|
||||
fmt.Printf("%s\n", name)
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func getContainerBigData(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
container, err := m.Container(args[0])
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
output := os.Stdout
|
||||
if paramContainerDataFile != "" {
|
||||
f, err := os.Create(paramContainerDataFile)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
output = f
|
||||
}
|
||||
b, err := m.ContainerBigData(container.ID, args[1])
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
output.Write(b)
|
||||
output.Close()
|
||||
return 0
|
||||
}
|
||||
|
||||
func setContainerBigData(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
container, err := m.Container(args[0])
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
input := os.Stdin
|
||||
if paramContainerDataFile != "" {
|
||||
f, err := os.Open(paramContainerDataFile)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
input = f
|
||||
}
|
||||
b, err := ioutil.ReadAll(input)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
err = m.SetContainerBigData(container.ID, args[1], b)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func getContainerDir(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
path, err := m.ContainerDirectory(args[0])
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
fmt.Printf("%s\n", path)
|
||||
return 0
|
||||
}
|
||||
|
||||
func getContainerRunDir(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
path, err := m.ContainerRunDirectory(args[0])
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
fmt.Printf("%s\n", path)
|
||||
return 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
commands = append(commands,
|
||||
command{
|
||||
names: []string{"container"},
|
||||
optionsHelp: "[options [...]] containerNameOrID [...]",
|
||||
usage: "Examine a container",
|
||||
action: container,
|
||||
minArgs: 1,
|
||||
addFlags: func(flags *mflag.FlagSet, cmd *command) {
|
||||
flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output")
|
||||
},
|
||||
},
|
||||
command{
|
||||
names: []string{"list-container-data", "listcontainerdata"},
|
||||
optionsHelp: "[options [...]] containerNameOrID",
|
||||
usage: "List data items that are attached to an container",
|
||||
action: listContainerBigData,
|
||||
minArgs: 1,
|
||||
maxArgs: 1,
|
||||
addFlags: func(flags *mflag.FlagSet, cmd *command) {
|
||||
flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output")
|
||||
},
|
||||
},
|
||||
command{
|
||||
names: []string{"get-container-data", "getcontainerdata"},
|
||||
optionsHelp: "[options [...]] containerNameOrID dataName",
|
||||
usage: "Get data that is attached to an container",
|
||||
action: getContainerBigData,
|
||||
minArgs: 2,
|
||||
addFlags: func(flags *mflag.FlagSet, cmd *command) {
|
||||
flags.StringVar(¶mContainerDataFile, []string{"-file", "f"}, paramContainerDataFile, "Write data to file")
|
||||
},
|
||||
},
|
||||
command{
|
||||
names: []string{"set-container-data", "setcontainerdata"},
|
||||
optionsHelp: "[options [...]] containerNameOrID dataName",
|
||||
usage: "Set data that is attached to an container",
|
||||
action: setContainerBigData,
|
||||
minArgs: 2,
|
||||
addFlags: func(flags *mflag.FlagSet, cmd *command) {
|
||||
flags.StringVar(¶mContainerDataFile, []string{"-file", "f"}, paramContainerDataFile, "Read data from file")
|
||||
},
|
||||
},
|
||||
command{
|
||||
names: []string{"get-container-dir", "getcontainerdir"},
|
||||
optionsHelp: "[options [...]] containerNameOrID",
|
||||
usage: "Find the container's associated data directory",
|
||||
action: getContainerDir,
|
||||
minArgs: 1,
|
||||
},
|
||||
command{
|
||||
names: []string{"get-container-run-dir", "getcontainerrundir"},
|
||||
optionsHelp: "[options [...]] containerNameOrID",
|
||||
usage: "Find the container's associated runtime directory",
|
||||
action: getContainerRunDir,
|
||||
minArgs: 1,
|
||||
})
|
||||
}
|
45
vendor/github.com/containers/storage/cmd/oci-storage/containers.go
generated
vendored
45
vendor/github.com/containers/storage/cmd/oci-storage/containers.go
generated
vendored
|
@ -1,45 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/mflag"
|
||||
)
|
||||
|
||||
func containers(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
containers, err := m.Containers()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
if jsonOutput {
|
||||
json.NewEncoder(os.Stdout).Encode(containers)
|
||||
} else {
|
||||
for _, container := range containers {
|
||||
fmt.Printf("%s\n", container.ID)
|
||||
for _, name := range container.Names {
|
||||
fmt.Printf("\tname: %s\n", name)
|
||||
}
|
||||
for _, name := range container.BigDataNames {
|
||||
fmt.Printf("\tdata: %s\n", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
commands = append(commands, command{
|
||||
names: []string{"containers"},
|
||||
optionsHelp: "[options [...]]",
|
||||
usage: "List containers",
|
||||
action: containers,
|
||||
maxArgs: 0,
|
||||
addFlags: func(flags *mflag.FlagSet, cmd *command) {
|
||||
flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output")
|
||||
},
|
||||
})
|
||||
}
|
201
vendor/github.com/containers/storage/cmd/oci-storage/create.go
generated
vendored
201
vendor/github.com/containers/storage/cmd/oci-storage/create.go
generated
vendored
|
@ -1,201 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/opts"
|
||||
"github.com/containers/storage/pkg/mflag"
|
||||
)
|
||||
|
||||
var (
|
||||
paramMountLabel = ""
|
||||
paramNames = []string{}
|
||||
paramID = ""
|
||||
paramLayer = ""
|
||||
paramMetadata = ""
|
||||
paramMetadataFile = ""
|
||||
paramCreateRO = false
|
||||
)
|
||||
|
||||
func createLayer(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
parent := ""
|
||||
if len(args) > 0 {
|
||||
parent = args[0]
|
||||
}
|
||||
layer, err := m.CreateLayer(paramID, parent, paramNames, paramMountLabel, !paramCreateRO)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
if jsonOutput {
|
||||
json.NewEncoder(os.Stdout).Encode(layer)
|
||||
} else {
|
||||
fmt.Printf("%s", layer.ID)
|
||||
for _, name := range layer.Names {
|
||||
fmt.Printf("\t%s\n", name)
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func importLayer(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
parent := ""
|
||||
if len(args) > 0 {
|
||||
parent = args[0]
|
||||
}
|
||||
diffStream := io.Reader(os.Stdin)
|
||||
if applyDiffFile != "" {
|
||||
f, err := os.Open(applyDiffFile)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
diffStream = f
|
||||
defer f.Close()
|
||||
}
|
||||
layer, _, err := m.PutLayer(paramID, parent, paramNames, paramMountLabel, !paramCreateRO, diffStream)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
if jsonOutput {
|
||||
json.NewEncoder(os.Stdout).Encode(layer)
|
||||
} else {
|
||||
fmt.Printf("%s", layer.ID)
|
||||
for _, name := range layer.Names {
|
||||
fmt.Printf("\t%s\n", name)
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func createImage(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
if paramMetadataFile != "" {
|
||||
f, err := os.Open(paramMetadataFile)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
b, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
paramMetadata = string(b)
|
||||
}
|
||||
image, err := m.CreateImage(paramID, paramNames, args[0], paramMetadata, nil)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
if jsonOutput {
|
||||
json.NewEncoder(os.Stdout).Encode(image)
|
||||
} else {
|
||||
fmt.Printf("%s", image.ID)
|
||||
for _, name := range image.Names {
|
||||
fmt.Printf("\t%s\n", name)
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func createContainer(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
if paramMetadataFile != "" {
|
||||
f, err := os.Open(paramMetadataFile)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
b, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
paramMetadata = string(b)
|
||||
}
|
||||
container, err := m.CreateContainer(paramID, paramNames, args[0], paramLayer, paramMetadata, nil)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
if jsonOutput {
|
||||
json.NewEncoder(os.Stdout).Encode(container)
|
||||
} else {
|
||||
fmt.Printf("%s", container.ID)
|
||||
for _, name := range container.Names {
|
||||
fmt.Printf("\t%s", name)
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
commands = append(commands, command{
|
||||
names: []string{"create-layer", "createlayer"},
|
||||
optionsHelp: "[options [...]] [parentLayerNameOrID]",
|
||||
usage: "Create a new layer",
|
||||
maxArgs: 1,
|
||||
action: createLayer,
|
||||
addFlags: func(flags *mflag.FlagSet, cmd *command) {
|
||||
flags.StringVar(¶mMountLabel, []string{"-label", "l"}, "", "Mount Label")
|
||||
flags.Var(opts.NewListOptsRef(¶mNames, nil), []string{"-name", "n"}, "Layer name")
|
||||
flags.StringVar(¶mID, []string{"-id", "i"}, "", "Layer ID")
|
||||
flags.BoolVar(¶mCreateRO, []string{"-readonly", "r"}, false, "Mark as read-only")
|
||||
flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output")
|
||||
},
|
||||
})
|
||||
commands = append(commands, command{
|
||||
names: []string{"import-layer", "importlayer"},
|
||||
optionsHelp: "[options [...]] [parentLayerNameOrID]",
|
||||
usage: "Import a new layer",
|
||||
maxArgs: 1,
|
||||
action: importLayer,
|
||||
addFlags: func(flags *mflag.FlagSet, cmd *command) {
|
||||
flags.StringVar(¶mMountLabel, []string{"-label", "l"}, "", "Mount Label")
|
||||
flags.Var(opts.NewListOptsRef(¶mNames, nil), []string{"-name", "n"}, "Layer name")
|
||||
flags.StringVar(¶mID, []string{"-id", "i"}, "", "Layer ID")
|
||||
flags.BoolVar(¶mCreateRO, []string{"-readonly", "r"}, false, "Mark as read-only")
|
||||
flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output")
|
||||
flags.StringVar(&applyDiffFile, []string{"-file", "f"}, "", "Read from file instead of stdin")
|
||||
},
|
||||
})
|
||||
commands = append(commands, command{
|
||||
names: []string{"create-image", "createimage"},
|
||||
optionsHelp: "[options [...]] topLayerNameOrID",
|
||||
usage: "Create a new image using layers",
|
||||
minArgs: 1,
|
||||
maxArgs: 1,
|
||||
action: createImage,
|
||||
addFlags: func(flags *mflag.FlagSet, cmd *command) {
|
||||
flags.Var(opts.NewListOptsRef(¶mNames, nil), []string{"-name", "n"}, "Image name")
|
||||
flags.StringVar(¶mID, []string{"-id", "i"}, "", "Image ID")
|
||||
flags.StringVar(¶mMetadata, []string{"-metadata", "m"}, "", "Metadata")
|
||||
flags.StringVar(¶mMetadataFile, []string{"-metadata-file", "f"}, "", "Metadata File")
|
||||
flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output")
|
||||
},
|
||||
})
|
||||
commands = append(commands, command{
|
||||
names: []string{"create-container", "createcontainer"},
|
||||
optionsHelp: "[options [...]] parentImageNameOrID",
|
||||
usage: "Create a new container from an image",
|
||||
minArgs: 1,
|
||||
maxArgs: 1,
|
||||
action: createContainer,
|
||||
addFlags: func(flags *mflag.FlagSet, cmd *command) {
|
||||
flags.Var(opts.NewListOptsRef(¶mNames, nil), []string{"-name", "n"}, "Container name")
|
||||
flags.StringVar(¶mID, []string{"-id", "i"}, "", "Container ID")
|
||||
flags.StringVar(¶mMetadata, []string{"-metadata", "m"}, "", "Metadata")
|
||||
flags.StringVar(¶mMetadataFile, []string{"-metadata-file", "f"}, "", "Metadata File")
|
||||
flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output")
|
||||
},
|
||||
})
|
||||
}
|
188
vendor/github.com/containers/storage/cmd/oci-storage/delete.go
generated
vendored
188
vendor/github.com/containers/storage/cmd/oci-storage/delete.go
generated
vendored
|
@ -1,188 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/mflag"
|
||||
)
|
||||
|
||||
var testDeleteImage = false
|
||||
|
||||
func deleteThing(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
if len(args) < 1 {
|
||||
return 1
|
||||
}
|
||||
deleted := make(map[string]string)
|
||||
for _, what := range args {
|
||||
err := m.Delete(what)
|
||||
if err != nil {
|
||||
deleted[what] = fmt.Sprintf("%v", err)
|
||||
} else {
|
||||
deleted[what] = ""
|
||||
}
|
||||
}
|
||||
if jsonOutput {
|
||||
json.NewEncoder(os.Stdout).Encode(deleted)
|
||||
} else {
|
||||
for what, err := range deleted {
|
||||
if err != "" {
|
||||
fmt.Fprintf(os.Stderr, "%s: %s\n", what, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, err := range deleted {
|
||||
if err != "" {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func deleteLayer(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
if len(args) < 1 {
|
||||
return 1
|
||||
}
|
||||
deleted := make(map[string]string)
|
||||
for _, what := range args {
|
||||
err := m.DeleteLayer(what)
|
||||
if err != nil {
|
||||
deleted[what] = fmt.Sprintf("%v", err)
|
||||
} else {
|
||||
deleted[what] = ""
|
||||
}
|
||||
}
|
||||
if jsonOutput {
|
||||
json.NewEncoder(os.Stdout).Encode(deleted)
|
||||
} else {
|
||||
for what, err := range deleted {
|
||||
if err != "" {
|
||||
fmt.Fprintf(os.Stderr, "%s: %s\n", what, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, err := range deleted {
|
||||
if err != "" {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type deletedImage struct {
|
||||
DeletedLayers []string `json:"deleted-layers,omitifempty"`
|
||||
Error string `json:"error,omitifempty"`
|
||||
}
|
||||
|
||||
func deleteImage(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
if len(args) < 1 {
|
||||
return 1
|
||||
}
|
||||
deleted := make(map[string]deletedImage)
|
||||
for _, what := range args {
|
||||
layers, err := m.DeleteImage(what, !testDeleteImage)
|
||||
errText := ""
|
||||
if err != nil {
|
||||
errText = fmt.Sprintf("%v", err)
|
||||
}
|
||||
deleted[what] = deletedImage{
|
||||
DeletedLayers: layers,
|
||||
Error: errText,
|
||||
}
|
||||
}
|
||||
if jsonOutput {
|
||||
json.NewEncoder(os.Stdout).Encode(deleted)
|
||||
} else {
|
||||
for what, record := range deleted {
|
||||
if record.Error != "" {
|
||||
fmt.Fprintf(os.Stderr, "%s: %s\n", what, record.Error)
|
||||
} else {
|
||||
for _, layer := range record.DeletedLayers {
|
||||
fmt.Fprintf(os.Stderr, "%s: %s\n", what, layer)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, record := range deleted {
|
||||
if record.Error != "" {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func deleteContainer(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
if len(args) < 1 {
|
||||
return 1
|
||||
}
|
||||
deleted := make(map[string]string)
|
||||
for _, what := range args {
|
||||
err := m.DeleteContainer(what)
|
||||
if err != nil {
|
||||
deleted[what] = fmt.Sprintf("%v", err)
|
||||
} else {
|
||||
deleted[what] = ""
|
||||
}
|
||||
}
|
||||
if jsonOutput {
|
||||
json.NewEncoder(os.Stdout).Encode(deleted)
|
||||
} else {
|
||||
for what, err := range deleted {
|
||||
if err != "" {
|
||||
fmt.Fprintf(os.Stderr, "%s: %s\n", what, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, err := range deleted {
|
||||
if err != "" {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
commands = append(commands, command{
|
||||
names: []string{"delete"},
|
||||
optionsHelp: "[LayerOrImageOrContainerNameOrID [...]]",
|
||||
usage: "Delete a layer or image or container, with no safety checks",
|
||||
minArgs: 1,
|
||||
action: deleteThing,
|
||||
addFlags: func(flags *mflag.FlagSet, cmd *command) {
|
||||
flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output")
|
||||
},
|
||||
})
|
||||
commands = append(commands, command{
|
||||
names: []string{"delete-layer", "deletelayer"},
|
||||
optionsHelp: "[LayerNameOrID [...]]",
|
||||
usage: "Delete a layer, with safety checks",
|
||||
minArgs: 1,
|
||||
action: deleteLayer,
|
||||
addFlags: func(flags *mflag.FlagSet, cmd *command) {
|
||||
flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output")
|
||||
},
|
||||
})
|
||||
commands = append(commands, command{
|
||||
names: []string{"delete-image", "deleteimage"},
|
||||
optionsHelp: "[ImageNameOrID [...]]",
|
||||
usage: "Delete an image, with safety checks",
|
||||
minArgs: 1,
|
||||
action: deleteImage,
|
||||
addFlags: func(flags *mflag.FlagSet, cmd *command) {
|
||||
flags.BoolVar(&testDeleteImage, []string{"-test", "t"}, jsonOutput, "Only test removal")
|
||||
flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output")
|
||||
},
|
||||
})
|
||||
commands = append(commands, command{
|
||||
names: []string{"delete-container", "deletecontainer"},
|
||||
optionsHelp: "[ContainerNameOrID [...]]",
|
||||
usage: "Delete a container, with safety checks",
|
||||
minArgs: 1,
|
||||
action: deleteContainer,
|
||||
addFlags: func(flags *mflag.FlagSet, cmd *command) {
|
||||
flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output")
|
||||
},
|
||||
})
|
||||
}
|
190
vendor/github.com/containers/storage/cmd/oci-storage/diff.go
generated
vendored
190
vendor/github.com/containers/storage/cmd/oci-storage/diff.go
generated
vendored
|
@ -1,190 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/mflag"
|
||||
)
|
||||
|
||||
var (
|
||||
applyDiffFile = ""
|
||||
diffFile = ""
|
||||
diffGzip = false
|
||||
diffBzip2 = false
|
||||
diffXz = false
|
||||
)
|
||||
|
||||
func changes(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
if len(args) < 1 {
|
||||
return 1
|
||||
}
|
||||
to := args[0]
|
||||
from := ""
|
||||
if len(args) >= 2 {
|
||||
from = args[1]
|
||||
}
|
||||
changes, err := m.Changes(from, to)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
if jsonOutput {
|
||||
json.NewEncoder(os.Stdout).Encode(changes)
|
||||
} else {
|
||||
for _, change := range changes {
|
||||
what := "?"
|
||||
switch change.Kind {
|
||||
case archive.ChangeAdd:
|
||||
what = "Add"
|
||||
case archive.ChangeModify:
|
||||
what = "Modify"
|
||||
case archive.ChangeDelete:
|
||||
what = "Delete"
|
||||
}
|
||||
fmt.Printf("%s %q\n", what, change.Path)
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func diff(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
if len(args) < 1 {
|
||||
return 1
|
||||
}
|
||||
to := args[0]
|
||||
from := ""
|
||||
if len(args) >= 2 {
|
||||
from = args[1]
|
||||
}
|
||||
diffStream := io.Writer(os.Stdout)
|
||||
if diffFile != "" {
|
||||
f, err := os.Create(diffFile)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
diffStream = f
|
||||
defer f.Close()
|
||||
}
|
||||
reader, err := m.Diff(from, to)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
if diffGzip || diffBzip2 || diffXz {
|
||||
compression := archive.Uncompressed
|
||||
if diffGzip {
|
||||
compression = archive.Gzip
|
||||
} else if diffBzip2 {
|
||||
compression = archive.Bzip2
|
||||
} else if diffXz {
|
||||
compression = archive.Xz
|
||||
}
|
||||
compressor, err := archive.CompressStream(diffStream, compression)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
diffStream = compressor
|
||||
defer compressor.Close()
|
||||
}
|
||||
_, err = io.Copy(diffStream, reader)
|
||||
reader.Close()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func applyDiff(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
if len(args) < 1 {
|
||||
return 1
|
||||
}
|
||||
diffStream := io.Reader(os.Stdin)
|
||||
if applyDiffFile != "" {
|
||||
f, err := os.Open(applyDiffFile)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
diffStream = f
|
||||
defer f.Close()
|
||||
}
|
||||
_, err := m.ApplyDiff(args[0], diffStream)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func diffSize(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
if len(args) < 1 {
|
||||
return 1
|
||||
}
|
||||
to := args[0]
|
||||
from := ""
|
||||
if len(args) >= 2 {
|
||||
from = args[1]
|
||||
}
|
||||
n, err := m.DiffSize(from, to)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
fmt.Printf("%d\n", n)
|
||||
return 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
commands = append(commands, command{
|
||||
names: []string{"changes"},
|
||||
usage: "Compare two layers",
|
||||
optionsHelp: "[options [...]] layerNameOrID [referenceLayerNameOrID]",
|
||||
minArgs: 1,
|
||||
maxArgs: 2,
|
||||
action: changes,
|
||||
addFlags: func(flags *mflag.FlagSet, cmd *command) {
|
||||
flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output")
|
||||
},
|
||||
})
|
||||
commands = append(commands, command{
|
||||
names: []string{"diffsize", "diff-size"},
|
||||
usage: "Compare two layers",
|
||||
optionsHelp: "[options [...]] layerNameOrID [referenceLayerNameOrID]",
|
||||
minArgs: 1,
|
||||
maxArgs: 2,
|
||||
action: diffSize,
|
||||
})
|
||||
commands = append(commands, command{
|
||||
names: []string{"diff"},
|
||||
usage: "Compare two layers",
|
||||
optionsHelp: "[options [...]] layerNameOrID [referenceLayerNameOrID]",
|
||||
minArgs: 1,
|
||||
maxArgs: 2,
|
||||
action: diff,
|
||||
addFlags: func(flags *mflag.FlagSet, cmd *command) {
|
||||
flags.StringVar(&diffFile, []string{"-file", "f"}, "", "Write to file instead of stdout")
|
||||
flags.BoolVar(&diffGzip, []string{"-gzip", "c"}, diffGzip, "Compress using gzip")
|
||||
flags.BoolVar(&diffBzip2, []string{"-bzip2", "-bz2", "b"}, diffBzip2, "Compress using bzip2")
|
||||
flags.BoolVar(&diffXz, []string{"-xz", "x"}, diffXz, "Compress using xz")
|
||||
},
|
||||
})
|
||||
commands = append(commands, command{
|
||||
names: []string{"applydiff", "apply-diff"},
|
||||
optionsHelp: "[options [...]] layerNameOrID [referenceLayerNameOrID]",
|
||||
usage: "Apply a diff to a layer",
|
||||
minArgs: 1,
|
||||
maxArgs: 1,
|
||||
action: applyDiff,
|
||||
addFlags: func(flags *mflag.FlagSet, cmd *command) {
|
||||
flags.StringVar(&applyDiffFile, []string{"-file", "f"}, "", "Read from file instead of stdin")
|
||||
},
|
||||
})
|
||||
}
|
77
vendor/github.com/containers/storage/cmd/oci-storage/exists.go
generated
vendored
77
vendor/github.com/containers/storage/cmd/oci-storage/exists.go
generated
vendored
|
@ -1,77 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/mflag"
|
||||
)
|
||||
|
||||
var (
|
||||
existLayer = false
|
||||
existImage = false
|
||||
existContainer = false
|
||||
existQuiet = false
|
||||
)
|
||||
|
||||
func exist(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
if len(args) < 1 {
|
||||
return 1
|
||||
}
|
||||
anyMissing := false
|
||||
existDict := make(map[string]bool)
|
||||
for _, what := range args {
|
||||
exists := m.Exists(what)
|
||||
existDict[what] = exists
|
||||
if existContainer {
|
||||
if c, err := m.Container(what); c == nil || err != nil {
|
||||
exists = false
|
||||
}
|
||||
}
|
||||
if existImage {
|
||||
if i, err := m.Image(what); i == nil || err != nil {
|
||||
exists = false
|
||||
}
|
||||
}
|
||||
if existLayer {
|
||||
if l, err := m.Layer(what); l == nil || err != nil {
|
||||
exists = false
|
||||
}
|
||||
}
|
||||
if !exists {
|
||||
anyMissing = true
|
||||
}
|
||||
}
|
||||
if jsonOutput {
|
||||
json.NewEncoder(os.Stdout).Encode(existDict)
|
||||
} else {
|
||||
if !existQuiet {
|
||||
for what, exists := range existDict {
|
||||
fmt.Printf("%s: %v\n", what, exists)
|
||||
}
|
||||
}
|
||||
}
|
||||
if anyMissing {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
commands = append(commands, command{
|
||||
names: []string{"exists"},
|
||||
optionsHelp: "[LayerOrImageOrContainerNameOrID [...]]",
|
||||
usage: "Check if a layer or image or container exists",
|
||||
minArgs: 1,
|
||||
action: exist,
|
||||
addFlags: func(flags *mflag.FlagSet, cmd *command) {
|
||||
flags.BoolVar(&existQuiet, []string{"-quiet", "q"}, existQuiet, "Don't print names")
|
||||
flags.BoolVar(&existLayer, []string{"-layer", "l"}, existQuiet, "Only succeed if the match is a layer")
|
||||
flags.BoolVar(&existImage, []string{"-image", "i"}, existQuiet, "Only succeed if the match is an image")
|
||||
flags.BoolVar(&existContainer, []string{"-container", "c"}, existQuiet, "Only succeed if the match is a container")
|
||||
flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output")
|
||||
},
|
||||
})
|
||||
}
|
157
vendor/github.com/containers/storage/cmd/oci-storage/image.go
generated
vendored
157
vendor/github.com/containers/storage/cmd/oci-storage/image.go
generated
vendored
|
@ -1,157 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/mflag"
|
||||
)
|
||||
|
||||
var (
|
||||
paramImageDataFile = ""
|
||||
)
|
||||
|
||||
func image(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
matched := []*storage.Image{}
|
||||
for _, arg := range args {
|
||||
if image, err := m.Image(arg); err == nil {
|
||||
matched = append(matched, image)
|
||||
}
|
||||
}
|
||||
if jsonOutput {
|
||||
json.NewEncoder(os.Stdout).Encode(matched)
|
||||
} else {
|
||||
for _, image := range matched {
|
||||
fmt.Printf("ID: %s\n", image.ID)
|
||||
for _, name := range image.Names {
|
||||
fmt.Printf("Name: %s\n", name)
|
||||
}
|
||||
fmt.Printf("Top Layer: %s\n", image.TopLayer)
|
||||
for _, name := range image.BigDataNames {
|
||||
fmt.Printf("Data: %s\n", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(matched) != len(args) {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func listImageBigData(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
image, err := m.Image(args[0])
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
d, err := m.ListImageBigData(image.ID)
|
||||
if jsonOutput {
|
||||
json.NewEncoder(os.Stdout).Encode(d)
|
||||
} else {
|
||||
for _, name := range d {
|
||||
fmt.Printf("%s\n", name)
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func getImageBigData(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
image, err := m.Image(args[0])
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
output := os.Stdout
|
||||
if paramImageDataFile != "" {
|
||||
f, err := os.Create(paramImageDataFile)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
output = f
|
||||
}
|
||||
b, err := m.ImageBigData(image.ID, args[1])
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
output.Write(b)
|
||||
output.Close()
|
||||
return 0
|
||||
}
|
||||
|
||||
func setImageBigData(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
image, err := m.Image(args[0])
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
input := os.Stdin
|
||||
if paramImageDataFile != "" {
|
||||
f, err := os.Open(paramImageDataFile)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
input = f
|
||||
}
|
||||
b, err := ioutil.ReadAll(input)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
err = m.SetImageBigData(image.ID, args[1], b)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
commands = append(commands,
|
||||
command{
|
||||
names: []string{"image"},
|
||||
optionsHelp: "[options [...]] imageNameOrID [...]",
|
||||
usage: "Examine an image",
|
||||
action: image,
|
||||
minArgs: 1,
|
||||
addFlags: func(flags *mflag.FlagSet, cmd *command) {
|
||||
flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output")
|
||||
},
|
||||
},
|
||||
command{
|
||||
names: []string{"list-image-data", "listimagedata"},
|
||||
optionsHelp: "[options [...]] imageNameOrID",
|
||||
usage: "List data items that are attached to an image",
|
||||
action: listImageBigData,
|
||||
minArgs: 1,
|
||||
maxArgs: 1,
|
||||
addFlags: func(flags *mflag.FlagSet, cmd *command) {
|
||||
flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output")
|
||||
},
|
||||
},
|
||||
command{
|
||||
names: []string{"get-image-data", "getimagedata"},
|
||||
optionsHelp: "[options [...]] imageNameOrID dataName",
|
||||
usage: "Get data that is attached to an image",
|
||||
action: getImageBigData,
|
||||
minArgs: 2,
|
||||
addFlags: func(flags *mflag.FlagSet, cmd *command) {
|
||||
flags.StringVar(¶mImageDataFile, []string{"-file", "f"}, paramImageDataFile, "Write data to file")
|
||||
},
|
||||
},
|
||||
command{
|
||||
names: []string{"set-image-data", "setimagedata"},
|
||||
optionsHelp: "[options [...]] imageNameOrID dataName",
|
||||
usage: "Set data that is attached to an image",
|
||||
action: setImageBigData,
|
||||
minArgs: 2,
|
||||
addFlags: func(flags *mflag.FlagSet, cmd *command) {
|
||||
flags.StringVar(¶mImageDataFile, []string{"-file", "f"}, paramImageDataFile, "Read data from file")
|
||||
},
|
||||
})
|
||||
}
|
45
vendor/github.com/containers/storage/cmd/oci-storage/images.go
generated
vendored
45
vendor/github.com/containers/storage/cmd/oci-storage/images.go
generated
vendored
|
@ -1,45 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/mflag"
|
||||
)
|
||||
|
||||
func images(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
images, err := m.Images()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
if jsonOutput {
|
||||
json.NewEncoder(os.Stdout).Encode(images)
|
||||
} else {
|
||||
for _, image := range images {
|
||||
fmt.Printf("%s\n", image.ID)
|
||||
for _, name := range image.Names {
|
||||
fmt.Printf("\tname: %s\n", name)
|
||||
}
|
||||
for _, name := range image.BigDataNames {
|
||||
fmt.Printf("\tdata: %s\n", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
commands = append(commands, command{
|
||||
names: []string{"images"},
|
||||
optionsHelp: "[options [...]]",
|
||||
usage: "List images",
|
||||
action: images,
|
||||
maxArgs: 0,
|
||||
addFlags: func(flags *mflag.FlagSet, cmd *command) {
|
||||
flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output")
|
||||
},
|
||||
})
|
||||
}
|
113
vendor/github.com/containers/storage/cmd/oci-storage/layers.go
generated
vendored
113
vendor/github.com/containers/storage/cmd/oci-storage/layers.go
generated
vendored
|
@ -1,113 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/mflag"
|
||||
)
|
||||
|
||||
var listLayersTree = false
|
||||
|
||||
func layers(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
layers, err := m.Layers()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
if jsonOutput {
|
||||
json.NewEncoder(os.Stdout).Encode(layers)
|
||||
return 0
|
||||
}
|
||||
imageMap := make(map[string]*[]storage.Image)
|
||||
if images, err := m.Images(); err == nil {
|
||||
for _, image := range images {
|
||||
if ilist, ok := imageMap[image.TopLayer]; ok && ilist != nil {
|
||||
list := append(*ilist, image)
|
||||
imageMap[image.TopLayer] = &list
|
||||
} else {
|
||||
list := []storage.Image{image}
|
||||
imageMap[image.TopLayer] = &list
|
||||
}
|
||||
}
|
||||
}
|
||||
containerMap := make(map[string]storage.Container)
|
||||
if containers, err := m.Containers(); err == nil {
|
||||
for _, container := range containers {
|
||||
containerMap[container.LayerID] = container
|
||||
}
|
||||
}
|
||||
nodes := []treeNode{}
|
||||
for _, layer := range layers {
|
||||
if listLayersTree {
|
||||
node := treeNode{
|
||||
left: string(layer.Parent),
|
||||
right: string(layer.ID),
|
||||
notes: []string{},
|
||||
}
|
||||
if node.left == "" {
|
||||
node.left = "(base)"
|
||||
}
|
||||
for _, name := range layer.Names {
|
||||
node.notes = append(node.notes, "name: "+name)
|
||||
}
|
||||
if layer.MountPoint != "" {
|
||||
node.notes = append(node.notes, "mount: "+layer.MountPoint)
|
||||
}
|
||||
if imageList, ok := imageMap[layer.ID]; ok && imageList != nil {
|
||||
for _, image := range *imageList {
|
||||
node.notes = append(node.notes, fmt.Sprintf("image: %s", image.ID))
|
||||
for _, name := range image.Names {
|
||||
node.notes = append(node.notes, fmt.Sprintf("image name: %s", name))
|
||||
}
|
||||
}
|
||||
}
|
||||
if container, ok := containerMap[layer.ID]; ok {
|
||||
node.notes = append(node.notes, fmt.Sprintf("container: %s", container.ID))
|
||||
for _, name := range container.Names {
|
||||
node.notes = append(node.notes, fmt.Sprintf("container name: %s", name))
|
||||
}
|
||||
}
|
||||
nodes = append(nodes, node)
|
||||
} else {
|
||||
fmt.Printf("%s\n", layer.ID)
|
||||
for _, name := range layer.Names {
|
||||
fmt.Printf("\tname: %s\n", name)
|
||||
}
|
||||
if imageList, ok := imageMap[layer.ID]; ok && imageList != nil {
|
||||
for _, image := range *imageList {
|
||||
fmt.Printf("\timage: %s\n", image.ID)
|
||||
for _, name := range image.Names {
|
||||
fmt.Printf("\t\tname: %s\n", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
if container, ok := containerMap[layer.ID]; ok {
|
||||
fmt.Printf("\tcontainer: %s\n", container.ID)
|
||||
for _, name := range container.Names {
|
||||
fmt.Printf("\t\tname: %s\n", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if listLayersTree {
|
||||
printTree(nodes)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
commands = append(commands, command{
|
||||
names: []string{"layers"},
|
||||
optionsHelp: "[options [...]]",
|
||||
usage: "List layers",
|
||||
action: layers,
|
||||
maxArgs: 0,
|
||||
addFlags: func(flags *mflag.FlagSet, cmd *command) {
|
||||
flags.BoolVar(&listLayersTree, []string{"-tree", "t"}, listLayersTree, "Use a tree")
|
||||
flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output")
|
||||
},
|
||||
})
|
||||
}
|
126
vendor/github.com/containers/storage/cmd/oci-storage/main.go
generated
vendored
126
vendor/github.com/containers/storage/cmd/oci-storage/main.go
generated
vendored
|
@ -1,126 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/opts"
|
||||
"github.com/containers/storage/pkg/mflag"
|
||||
"github.com/containers/storage/pkg/reexec"
|
||||
)
|
||||
|
||||
type command struct {
|
||||
names []string
|
||||
optionsHelp string
|
||||
minArgs int
|
||||
maxArgs int
|
||||
usage string
|
||||
addFlags func(*mflag.FlagSet, *command)
|
||||
action func(*mflag.FlagSet, string, storage.Store, []string) int
|
||||
}
|
||||
|
||||
var (
|
||||
commands = []command{}
|
||||
jsonOutput = false
|
||||
)
|
||||
|
||||
func main() {
|
||||
if reexec.Init() {
|
||||
return
|
||||
}
|
||||
|
||||
options := storage.DefaultStoreOptions
|
||||
debug := false
|
||||
|
||||
makeFlags := func(command string, eh mflag.ErrorHandling) *mflag.FlagSet {
|
||||
flags := mflag.NewFlagSet(command, eh)
|
||||
flags.StringVar(&options.RunRoot, []string{"-run", "R"}, options.RunRoot, "Root of the runtime state tree")
|
||||
flags.StringVar(&options.GraphRoot, []string{"-graph", "g"}, options.GraphRoot, "Root of the storage tree")
|
||||
flags.StringVar(&options.GraphDriverName, []string{"-storage-driver", "s"}, options.GraphDriverName, "Storage driver to use ($STORAGE_DRIVER)")
|
||||
flags.Var(opts.NewListOptsRef(&options.GraphDriverOptions, nil), []string{"-storage-opt"}, "Set storage driver options ($STORAGE_OPTS)")
|
||||
flags.BoolVar(&debug, []string{"-debug", "D"}, debug, "Print debugging information")
|
||||
return flags
|
||||
}
|
||||
|
||||
flags := makeFlags("oci-storage", mflag.ContinueOnError)
|
||||
flags.Usage = func() {
|
||||
fmt.Printf("Usage: oci-storage command [options [...]]\n\n")
|
||||
fmt.Printf("Commands:\n\n")
|
||||
for _, command := range commands {
|
||||
fmt.Printf(" %-22s%s\n", command.names[0], command.usage)
|
||||
}
|
||||
fmt.Printf("\nOptions:\n")
|
||||
flags.PrintDefaults()
|
||||
}
|
||||
|
||||
if len(os.Args) < 2 {
|
||||
flags.Usage()
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := flags.ParseFlags(os.Args[1:], true); err != nil {
|
||||
fmt.Printf("%v while parsing arguments (1)\n", err)
|
||||
flags.Usage()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
args := flags.Args()
|
||||
if len(args) < 1 {
|
||||
flags.Usage()
|
||||
os.Exit(1)
|
||||
return
|
||||
}
|
||||
cmd := args[0]
|
||||
|
||||
for _, command := range commands {
|
||||
for _, name := range command.names {
|
||||
if cmd == name {
|
||||
flags := makeFlags(cmd, mflag.ExitOnError)
|
||||
if command.addFlags != nil {
|
||||
command.addFlags(flags, &command)
|
||||
}
|
||||
flags.Usage = func() {
|
||||
fmt.Printf("Usage: oci-storage %s %s\n\n", cmd, command.optionsHelp)
|
||||
fmt.Printf("%s\n", command.usage)
|
||||
fmt.Printf("\nOptions:\n")
|
||||
flags.PrintDefaults()
|
||||
}
|
||||
if err := flags.ParseFlags(args[1:], false); err != nil {
|
||||
fmt.Printf("%v while parsing arguments (3)", err)
|
||||
flags.Usage()
|
||||
os.Exit(1)
|
||||
}
|
||||
args = flags.Args()
|
||||
if command.minArgs != 0 && len(args) < command.minArgs {
|
||||
fmt.Printf("%s: more arguments required.\n", cmd)
|
||||
flags.Usage()
|
||||
os.Exit(1)
|
||||
}
|
||||
if command.maxArgs != 0 && len(args) > command.maxArgs {
|
||||
fmt.Printf("%s: too many arguments (%s).\n", cmd, args)
|
||||
flags.Usage()
|
||||
os.Exit(1)
|
||||
}
|
||||
if debug {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
logrus.Debugf("RunRoot: %s", options.RunRoot)
|
||||
logrus.Debugf("GraphRoot: %s", options.GraphRoot)
|
||||
logrus.Debugf("GraphDriverName: %s", options.GraphDriverName)
|
||||
logrus.Debugf("GraphDriverOptions: %s", options.GraphDriverOptions)
|
||||
} else {
|
||||
logrus.SetLevel(logrus.ErrorLevel)
|
||||
}
|
||||
store, err := storage.GetStore(options)
|
||||
if err != nil {
|
||||
fmt.Printf("error initializing: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(command.action(flags, cmd, store, args))
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
fmt.Printf("%s: unrecognized command.\n", cmd)
|
||||
os.Exit(1)
|
||||
}
|
98
vendor/github.com/containers/storage/cmd/oci-storage/metadata.go
generated
vendored
98
vendor/github.com/containers/storage/cmd/oci-storage/metadata.go
generated
vendored
|
@ -1,98 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/mflag"
|
||||
)
|
||||
|
||||
var metadataQuiet = false
|
||||
|
||||
func metadata(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
if len(args) < 1 {
|
||||
return 1
|
||||
}
|
||||
metadataDict := make(map[string]string)
|
||||
missingAny := false
|
||||
for _, what := range args {
|
||||
if metadata, err := m.Metadata(what); err == nil {
|
||||
metadataDict[what] = strings.TrimSuffix(metadata, "\n")
|
||||
} else {
|
||||
missingAny = true
|
||||
}
|
||||
}
|
||||
if jsonOutput {
|
||||
json.NewEncoder(os.Stdout).Encode(metadataDict)
|
||||
} else {
|
||||
for _, what := range args {
|
||||
if metadataQuiet {
|
||||
fmt.Printf("%s\n", metadataDict[what])
|
||||
} else {
|
||||
fmt.Printf("%s: %s\n", what, metadataDict[what])
|
||||
}
|
||||
}
|
||||
}
|
||||
if missingAny {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func setMetadata(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
if len(args) < 1 {
|
||||
return 1
|
||||
}
|
||||
if paramMetadataFile == "" && paramMetadata == "" {
|
||||
fmt.Fprintf(os.Stderr, "no new metadata provided\n")
|
||||
return 1
|
||||
}
|
||||
if paramMetadataFile != "" {
|
||||
f, err := os.Open(paramMetadataFile)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
b, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
paramMetadata = string(b)
|
||||
}
|
||||
if err := m.SetMetadata(args[0], paramMetadata); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
commands = append(commands, command{
|
||||
names: []string{"metadata"},
|
||||
optionsHelp: "[LayerOrImageOrContainerNameOrID [...]]",
|
||||
usage: "Retrieve layer, image, or container metadata",
|
||||
minArgs: 1,
|
||||
action: metadata,
|
||||
addFlags: func(flags *mflag.FlagSet, cmd *command) {
|
||||
flags.BoolVar(&metadataQuiet, []string{"-quiet", "q"}, metadataQuiet, "Omit names and IDs")
|
||||
flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output")
|
||||
},
|
||||
})
|
||||
commands = append(commands, command{
|
||||
names: []string{"set-metadata", "setmetadata"},
|
||||
optionsHelp: "[options [...]] layerOrImageOrContainerNameOrID",
|
||||
usage: "Set layer, image, or container metadata",
|
||||
minArgs: 1,
|
||||
maxArgs: 1,
|
||||
action: setMetadata,
|
||||
addFlags: func(flags *mflag.FlagSet, cmd *command) {
|
||||
flags.StringVar(¶mMetadata, []string{"-metadata", "m"}, "", "Metadata")
|
||||
flags.StringVar(¶mMetadataFile, []string{"-metadata-file", "f"}, "", "Metadata File")
|
||||
},
|
||||
})
|
||||
}
|
99
vendor/github.com/containers/storage/cmd/oci-storage/mount.go
generated
vendored
99
vendor/github.com/containers/storage/cmd/oci-storage/mount.go
generated
vendored
|
@ -1,99 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/mflag"
|
||||
)
|
||||
|
||||
type mountPointOrError struct {
|
||||
ID string `json:"id"`
|
||||
MountPoint string `json:"mountpoint"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
type mountPointError struct {
|
||||
ID string `json:"id"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
func mount(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
moes := []mountPointOrError{}
|
||||
for _, arg := range args {
|
||||
result, err := m.Mount(arg, paramMountLabel)
|
||||
errText := ""
|
||||
if err != nil {
|
||||
errText = fmt.Sprintf("%v", err)
|
||||
}
|
||||
moes = append(moes, mountPointOrError{arg, result, errText})
|
||||
}
|
||||
if jsonOutput {
|
||||
json.NewEncoder(os.Stdout).Encode(moes)
|
||||
} else {
|
||||
for _, mountOrError := range moes {
|
||||
if mountOrError.Error != "" {
|
||||
fmt.Fprintf(os.Stderr, "%s while mounting %s\n", mountOrError.Error, mountOrError.ID)
|
||||
}
|
||||
fmt.Printf("%s\n", mountOrError.MountPoint)
|
||||
}
|
||||
}
|
||||
for _, mountOrErr := range moes {
|
||||
if mountOrErr.Error != "" {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func unmount(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
mes := []mountPointError{}
|
||||
errors := false
|
||||
for _, arg := range args {
|
||||
err := m.Unmount(arg)
|
||||
errText := ""
|
||||
if err != nil {
|
||||
errText = fmt.Sprintf("%v", err)
|
||||
errors = true
|
||||
}
|
||||
mes = append(mes, mountPointError{arg, errText})
|
||||
}
|
||||
if jsonOutput {
|
||||
json.NewEncoder(os.Stdout).Encode(mes)
|
||||
} else {
|
||||
for _, me := range mes {
|
||||
if me.Error != "" {
|
||||
fmt.Fprintf(os.Stderr, "%s while unmounting %s\n", me.Error, me.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
if errors {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
commands = append(commands, command{
|
||||
names: []string{"mount"},
|
||||
optionsHelp: "[options [...]] LayerOrContainerNameOrID",
|
||||
usage: "Mount a layer or container",
|
||||
minArgs: 1,
|
||||
action: mount,
|
||||
addFlags: func(flags *mflag.FlagSet, cmd *command) {
|
||||
flags.StringVar(¶mMountLabel, []string{"-label", "l"}, "", "Mount Label")
|
||||
flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output")
|
||||
},
|
||||
})
|
||||
commands = append(commands, command{
|
||||
names: []string{"unmount", "umount"},
|
||||
optionsHelp: "LayerOrContainerNameOrID",
|
||||
usage: "Unmount a layer or container",
|
||||
minArgs: 1,
|
||||
action: unmount,
|
||||
addFlags: func(flags *mflag.FlagSet, cmd *command) {
|
||||
flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output")
|
||||
},
|
||||
})
|
||||
}
|
96
vendor/github.com/containers/storage/cmd/oci-storage/name.go
generated
vendored
96
vendor/github.com/containers/storage/cmd/oci-storage/name.go
generated
vendored
|
@ -1,96 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/opts"
|
||||
"github.com/containers/storage/pkg/mflag"
|
||||
)
|
||||
|
||||
func addNames(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
if len(args) < 1 {
|
||||
return 1
|
||||
}
|
||||
id, err := m.Lookup(args[0])
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
oldnames, err := m.Names(id)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
newNames := []string{}
|
||||
if oldnames != nil {
|
||||
newNames = append(newNames, oldnames...)
|
||||
}
|
||||
if paramNames != nil {
|
||||
newNames = append(newNames, paramNames...)
|
||||
}
|
||||
if err := m.SetNames(id, newNames); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
names, err := m.Names(id)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
if jsonOutput {
|
||||
json.NewEncoder(os.Stdout).Encode(names)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func setNames(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
if len(args) < 1 {
|
||||
return 1
|
||||
}
|
||||
id, err := m.Lookup(args[0])
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
if err := m.SetNames(id, paramNames); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
names, err := m.Names(id)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return 1
|
||||
}
|
||||
if jsonOutput {
|
||||
json.NewEncoder(os.Stdout).Encode(names)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
commands = append(commands, command{
|
||||
names: []string{"add-names", "addnames"},
|
||||
optionsHelp: "[options [...]] imageOrContainerNameOrID",
|
||||
usage: "Add layer, image, or container name or names",
|
||||
minArgs: 1,
|
||||
action: addNames,
|
||||
addFlags: func(flags *mflag.FlagSet, cmd *command) {
|
||||
flags.Var(opts.NewListOptsRef(¶mNames, nil), []string{"-name", "n"}, "New name")
|
||||
flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output")
|
||||
},
|
||||
})
|
||||
commands = append(commands, command{
|
||||
names: []string{"set-names", "setnames"},
|
||||
optionsHelp: "[options [...]] imageOrContainerNameOrID",
|
||||
usage: "Set layer, image, or container name or names",
|
||||
minArgs: 1,
|
||||
action: setNames,
|
||||
addFlags: func(flags *mflag.FlagSet, cmd *command) {
|
||||
flags.Var(opts.NewListOptsRef(¶mNames, nil), []string{"-name", "n"}, "New name")
|
||||
flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output")
|
||||
},
|
||||
})
|
||||
}
|
46
vendor/github.com/containers/storage/cmd/oci-storage/shutdown.go
generated
vendored
46
vendor/github.com/containers/storage/cmd/oci-storage/shutdown.go
generated
vendored
|
@ -1,46 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/mflag"
|
||||
)
|
||||
|
||||
var (
|
||||
forceShutdown = false
|
||||
)
|
||||
|
||||
func shutdown(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
_, err := m.Shutdown(forceShutdown)
|
||||
if jsonOutput {
|
||||
if err == nil {
|
||||
json.NewEncoder(os.Stdout).Encode(string(""))
|
||||
} else {
|
||||
json.NewEncoder(os.Stdout).Encode(err)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%s: %v\n", action, err)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
commands = append(commands, command{
|
||||
names: []string{"shutdown"},
|
||||
usage: "Shut down layer storage",
|
||||
minArgs: 0,
|
||||
action: shutdown,
|
||||
addFlags: func(flags *mflag.FlagSet, cmd *command) {
|
||||
flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output")
|
||||
flags.BoolVar(&forceShutdown, []string{"-force", "f"}, forceShutdown, "Unmount mounted layers first")
|
||||
},
|
||||
})
|
||||
}
|
38
vendor/github.com/containers/storage/cmd/oci-storage/status.go
generated
vendored
38
vendor/github.com/containers/storage/cmd/oci-storage/status.go
generated
vendored
|
@ -1,38 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/mflag"
|
||||
)
|
||||
|
||||
func status(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
status, err := m.Status()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "status: %v\n", err)
|
||||
return 1
|
||||
}
|
||||
if jsonOutput {
|
||||
json.NewEncoder(os.Stdout).Encode(status)
|
||||
} else {
|
||||
for _, pair := range status {
|
||||
fmt.Fprintf(os.Stderr, "%s: %s\n", pair[0], pair[1])
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
commands = append(commands, command{
|
||||
names: []string{"status"},
|
||||
usage: "Check on graph driver status",
|
||||
minArgs: 0,
|
||||
action: status,
|
||||
addFlags: func(flags *mflag.FlagSet, cmd *command) {
|
||||
flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output")
|
||||
},
|
||||
})
|
||||
}
|
88
vendor/github.com/containers/storage/cmd/oci-storage/tree.go
generated
vendored
88
vendor/github.com/containers/storage/cmd/oci-storage/tree.go
generated
vendored
|
@ -1,88 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const treeIndentStep = 2
|
||||
const treeStemWidth = treeIndentStep - 1
|
||||
const treeVertical = '\u2502'
|
||||
const treeThisAndMore = "\u251c"
|
||||
const treeJustThis = "\u2514"
|
||||
const treeStem = "\u2500"
|
||||
|
||||
type treeNode struct {
|
||||
left, right string
|
||||
notes []string
|
||||
}
|
||||
|
||||
func selectRoot(nodes []treeNode) string {
|
||||
children := make(map[string][]string)
|
||||
areChildren := make(map[string]bool)
|
||||
for _, node := range nodes {
|
||||
areChildren[node.right] = true
|
||||
if childlist, ok := children[node.left]; ok {
|
||||
children[node.left] = append(childlist, node.right)
|
||||
} else {
|
||||
children[node.left] = []string{node.right}
|
||||
}
|
||||
}
|
||||
favorite := ""
|
||||
for left, right := range children {
|
||||
if areChildren[left] {
|
||||
continue
|
||||
}
|
||||
if favorite == "" {
|
||||
favorite = left
|
||||
} else if len(right) < len(children[favorite]) {
|
||||
favorite = left
|
||||
}
|
||||
}
|
||||
return favorite
|
||||
}
|
||||
|
||||
func printSubTree(root string, nodes []treeNode, indent int, continued []int) []treeNode {
|
||||
leftovers := []treeNode{}
|
||||
children := []treeNode{}
|
||||
for _, node := range nodes {
|
||||
if node.left != root {
|
||||
leftovers = append(leftovers, node)
|
||||
continue
|
||||
}
|
||||
children = append(children, node)
|
||||
}
|
||||
for n, child := range children {
|
||||
istring := []rune(strings.Repeat(" ", indent))
|
||||
for _, column := range continued {
|
||||
istring[column] = treeVertical
|
||||
}
|
||||
subc := continued[:]
|
||||
header := treeJustThis
|
||||
noteHeader := " "
|
||||
if n < len(children)-1 {
|
||||
subc = append(subc, indent)
|
||||
header = treeThisAndMore
|
||||
noteHeader = string(treeVertical)
|
||||
}
|
||||
fmt.Printf("%s%s%s%s\n", string(istring), header, strings.Repeat(treeStem, treeStemWidth), child.right)
|
||||
for _, note := range child.notes {
|
||||
fmt.Printf("%s%s%s%s\n", string(istring), noteHeader, strings.Repeat(" ", treeStemWidth), note)
|
||||
}
|
||||
leftovers = printSubTree(child.right, leftovers, indent+treeIndentStep, subc)
|
||||
}
|
||||
return leftovers
|
||||
}
|
||||
|
||||
func printTree(nodes []treeNode) {
|
||||
for len(nodes) > 0 {
|
||||
root := selectRoot(nodes)
|
||||
fmt.Printf("%s\n", root)
|
||||
oldLength := len(nodes)
|
||||
nodes = printSubTree(root, nodes, 0, []int{})
|
||||
newLength := len(nodes)
|
||||
if oldLength == newLength {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
25
vendor/github.com/containers/storage/cmd/oci-storage/tree_test.go
generated
vendored
25
vendor/github.com/containers/storage/cmd/oci-storage/tree_test.go
generated
vendored
|
@ -1,25 +0,0 @@
|
|||
package main
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestTree(*testing.T) {
|
||||
nodes := []treeNode{
|
||||
{"F", "H", []string{}},
|
||||
{"F", "I", []string{}},
|
||||
{"F", "J", []string{}},
|
||||
{"A", "B", []string{}},
|
||||
{"A", "C", []string{}},
|
||||
{"A", "K", []string{}},
|
||||
{"C", "F", []string{}},
|
||||
{"C", "G", []string{"beware", "the", "scary", "thing"}},
|
||||
{"C", "L", []string{}},
|
||||
{"B", "D", []string{}},
|
||||
{"B", "E", []string{}},
|
||||
{"B", "M", []string{}},
|
||||
{"K", "N", []string{}},
|
||||
{"W", "X", []string{}},
|
||||
{"Y", "Z", []string{}},
|
||||
{"X", "Y", []string{}},
|
||||
}
|
||||
printTree(nodes)
|
||||
}
|
38
vendor/github.com/containers/storage/cmd/oci-storage/version.go
generated
vendored
38
vendor/github.com/containers/storage/cmd/oci-storage/version.go
generated
vendored
|
@ -1,38 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/mflag"
|
||||
)
|
||||
|
||||
func version(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
version, err := m.Version()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "version: %v\n", err)
|
||||
return 1
|
||||
}
|
||||
if jsonOutput {
|
||||
json.NewEncoder(os.Stdout).Encode(version)
|
||||
} else {
|
||||
for _, pair := range version {
|
||||
fmt.Fprintf(os.Stderr, "%s: %s\n", pair[0], pair[1])
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
commands = append(commands, command{
|
||||
names: []string{"version"},
|
||||
usage: "Return oci-storage version information",
|
||||
minArgs: 0,
|
||||
action: version,
|
||||
addFlags: func(flags *mflag.FlagSet, cmd *command) {
|
||||
flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output")
|
||||
},
|
||||
})
|
||||
}
|
41
vendor/github.com/containers/storage/cmd/oci-storage/wipe.go
generated
vendored
41
vendor/github.com/containers/storage/cmd/oci-storage/wipe.go
generated
vendored
|
@ -1,41 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/mflag"
|
||||
)
|
||||
|
||||
func wipe(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
|
||||
err := m.Wipe()
|
||||
if jsonOutput {
|
||||
if err == nil {
|
||||
json.NewEncoder(os.Stdout).Encode(string(""))
|
||||
} else {
|
||||
json.NewEncoder(os.Stdout).Encode(err)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%s: %v\n", action, err)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
commands = append(commands, command{
|
||||
names: []string{"wipe"},
|
||||
usage: "Wipe all layers, images, and containers",
|
||||
minArgs: 0,
|
||||
action: wipe,
|
||||
addFlags: func(flags *mflag.FlagSet, cmd *command) {
|
||||
flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output")
|
||||
},
|
||||
})
|
||||
}
|
6
vendor/github.com/containers/storage/docs/Makefile
generated
vendored
6
vendor/github.com/containers/storage/docs/Makefile
generated
vendored
|
@ -1,6 +0,0 @@
|
|||
GOMD2MAN = go-md2man
|
||||
|
||||
docs: $(patsubst %.md,%.1,$(wildcard *.md))
|
||||
|
||||
%.1: %.md
|
||||
$(GOMD2MAN) -in $^ -out $@
|
25
vendor/github.com/containers/storage/docs/oci-storage-add-names.md
generated
vendored
25
vendor/github.com/containers/storage/docs/oci-storage-add-names.md
generated
vendored
|
@ -1,25 +0,0 @@
|
|||
## oci-storage-add-names "August 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage add-names - Add names to a layer/image/container
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** **add-names** [*options* [...]] *layerOrImageOrContainerNameOrID*
|
||||
|
||||
## DESCRIPTION
|
||||
In addition to IDs, *layers*, *images*, and *containers* can have
|
||||
human-readable names assigned to them in *oci-storage*. The *add-names*
|
||||
command can be used to add one or more names to them.
|
||||
|
||||
## OPTIONS
|
||||
**-n | --name** *name*
|
||||
|
||||
Specifies a name to add to the layer, image, or container. If a specified name
|
||||
is already used by another layer, image, or container, it is removed from that
|
||||
other layer, image, or container.
|
||||
|
||||
## EXAMPLE
|
||||
**oci-storage add-names -n my-awesome-container -n my-for-realsies-awesome-container f3be6c6134d0d980936b4c894f1613b69a62b79588fdeda744d0be3693bde8ec**
|
||||
|
||||
## SEE ALSO
|
||||
oci-storage-set-names(1)
|
32
vendor/github.com/containers/storage/docs/oci-storage-applydiff.md
generated
vendored
32
vendor/github.com/containers/storage/docs/oci-storage-applydiff.md
generated
vendored
|
@ -1,32 +0,0 @@
|
|||
## oci-storage-apply-diff 1 "August 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage apply-diff - Apply a layer diff to a layer
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** **apply-diff** [*options* [...]] *layerNameOrID* [*referenceLayerNameOrID*]
|
||||
|
||||
## DESCRIPTION
|
||||
When a layer is first created, it contains no changes relative to its parent
|
||||
layer. The layer can either be mounted read-write and its contents modified
|
||||
directly, or contents can be added (or removed) by applying a layer diff. A
|
||||
layer diff takes the form of a (possibly compressed) tar archive with
|
||||
additional information present in its headers, and can be produced by running
|
||||
*oci-storage diff* or an equivalent.
|
||||
|
||||
Layer diffs are not typically applied manually. More often they are applied by
|
||||
a tool which is being used to import an entire image, such as **skopeo**.
|
||||
|
||||
## OPTIONS
|
||||
**-f | --file** *filename*
|
||||
|
||||
Specifies the name of a file from which the diff should be read. If this
|
||||
option is not used, the diff is read from standard input.
|
||||
|
||||
## EXAMPLE
|
||||
**oci-storage apply-diff -f 71841c97e320d6cde.tar.gz layer1**
|
||||
|
||||
## SEE ALSO
|
||||
oci-storage-changes(1)
|
||||
oci-storage-diff(1)
|
||||
oci-storage-diffsize(1)
|
21
vendor/github.com/containers/storage/docs/oci-storage-changes.md
generated
vendored
21
vendor/github.com/containers/storage/docs/oci-storage-changes.md
generated
vendored
|
@ -1,21 +0,0 @@
|
|||
## oci-storage-changes 1 "August 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage changes - Produce a list of changes in a layer
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** **changes** *layerNameOrID* [*referenceLayerNameOrID*]
|
||||
|
||||
## DESCRIPTION
|
||||
When a layer is first created, it contains no changes relative to its parent
|
||||
layer. After that is changed, the *oci-storage changes* command can be used to
|
||||
obtain a summary of which files have been added, deleted, or modified in the
|
||||
layer.
|
||||
|
||||
## EXAMPLE
|
||||
**oci-storage changes f3be6c6134d0d980936b4c894f1613b69a62b79588fdeda744d0be3693bde8ec**
|
||||
|
||||
## SEE ALSO
|
||||
oci-storage-applydiff(1)
|
||||
oci-storage-diff(1)
|
||||
oci-storage-diffsize(1)
|
18
vendor/github.com/containers/storage/docs/oci-storage-container.md
generated
vendored
18
vendor/github.com/containers/storage/docs/oci-storage-container.md
generated
vendored
|
@ -1,18 +0,0 @@
|
|||
## oci-storage-container 1 "August 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage container - Examine a single container
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** **container** *containerNameOrID*
|
||||
|
||||
## DESCRIPTION
|
||||
Retrieve information about a container: any names it has, which image was used
|
||||
to create it, any names that image has, and the ID of the container's layer.
|
||||
|
||||
## EXAMPLE
|
||||
**oci-storage container f3be6c6134d0d980936b4c894f1613b69a62b79588fdeda744d0be3693bde8ec**
|
||||
**oci-storage container my-awesome-container**
|
||||
|
||||
## SEE ALSO
|
||||
oci-storage-containers(1)
|
16
vendor/github.com/containers/storage/docs/oci-storage-containers.md
generated
vendored
16
vendor/github.com/containers/storage/docs/oci-storage-containers.md
generated
vendored
|
@ -1,16 +0,0 @@
|
|||
## oci-storage-containers 1 "August 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage containers - List known containers
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** **containers**
|
||||
|
||||
## DESCRIPTION
|
||||
Retrieves information about all known containers and lists their IDs and names.
|
||||
|
||||
## EXAMPLE
|
||||
**oci-storage containers**
|
||||
|
||||
## SEE ALSO
|
||||
oci-storage-container(1)
|
37
vendor/github.com/containers/storage/docs/oci-storage-create-container.md
generated
vendored
37
vendor/github.com/containers/storage/docs/oci-storage-create-container.md
generated
vendored
|
@ -1,37 +0,0 @@
|
|||
## oci-storage-create-container 1 "August 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage create-container - Create a container
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** **create-container** [*options*...] *imageNameOrID*
|
||||
|
||||
## DESCRIPTION
|
||||
Creates a container, using the specified image as the starting point for its
|
||||
root filesystem.
|
||||
|
||||
## OPTIONS
|
||||
**-n | --name** *name*
|
||||
|
||||
Sets an optional name for the container. If a name is already in use, an error
|
||||
is returned.
|
||||
|
||||
**-i | --id** *ID*
|
||||
|
||||
Sets the ID for the container. If none is specified, one is generated.
|
||||
|
||||
**-m | --metadata** *metadata-value*
|
||||
|
||||
Sets the metadata for the container to the specified value.
|
||||
|
||||
**-f | --metadata-file** *metadata-file*
|
||||
|
||||
Sets the metadata for the container to the contents of the specified file.
|
||||
|
||||
## EXAMPLE
|
||||
**oci-storage create-container -f manifest.json -n new-container goodimage**
|
||||
|
||||
## SEE ALSO
|
||||
oci-storage-create-image(1)
|
||||
oci-storage-create-layer(1)
|
||||
oci-storage-delete-container(1)
|
37
vendor/github.com/containers/storage/docs/oci-storage-create-image.md
generated
vendored
37
vendor/github.com/containers/storage/docs/oci-storage-create-image.md
generated
vendored
|
@ -1,37 +0,0 @@
|
|||
## oci-storage-create-image 1 "August 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage create-image - Create an image
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** **create-image** [*options*...] *topLayerNameOrID*
|
||||
|
||||
## DESCRIPTION
|
||||
Creates an image, referring to the specified layer as the one which should be
|
||||
used as the basis for containers which will be based on the image.
|
||||
|
||||
## OPTIONS
|
||||
**-n | --name** *name*
|
||||
|
||||
Sets an optional name for the image. If a name is already in use, an error is
|
||||
returned.
|
||||
|
||||
**-i | --id** *ID*
|
||||
|
||||
Sets the ID for the image. If none is specified, one is generated.
|
||||
|
||||
**-m | --metadata** *metadata-value*
|
||||
|
||||
Sets the metadata for the image to the specified value.
|
||||
|
||||
**-f | --metadata-file** *metadata-file*
|
||||
|
||||
Sets the metadata for the image to the contents of the specified file.
|
||||
|
||||
## EXAMPLE
|
||||
**oci-storage create-image -f manifest.json -n new-image somelayer**
|
||||
|
||||
## SEE ALSO
|
||||
oci-storage-create-container(1)
|
||||
oci-storage-create-layer(1)
|
||||
oci-storage-delete-image(1)
|
42
vendor/github.com/containers/storage/docs/oci-storage-create-layer.md
generated
vendored
42
vendor/github.com/containers/storage/docs/oci-storage-create-layer.md
generated
vendored
|
@ -1,42 +0,0 @@
|
|||
## oci-storage-create-layer 1 "August 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage create-layer - Create a layer
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** **create-layer** [*options* [...]] [*parentLayerNameOrID*]
|
||||
|
||||
## DESCRIPTION
|
||||
Creates a new layer which either has a specified layer as its parent, or if no
|
||||
parent is specified, is empty.
|
||||
|
||||
## OPTIONS
|
||||
**-n** *name*
|
||||
|
||||
Sets an optional name for the layer. If a name is already in use, an error is
|
||||
returned.
|
||||
|
||||
**-i | --id** *ID*
|
||||
|
||||
Sets the ID for the layer. If none is specified, one is generated.
|
||||
|
||||
**-m | --metadata** *metadata-value*
|
||||
|
||||
Sets the metadata for the layer to the specified value.
|
||||
|
||||
**-f | --metadata-file** *metadata-file*
|
||||
|
||||
Sets the metadata for the layer to the contents of the specified file.
|
||||
|
||||
**-l | --label** *mount-label*
|
||||
|
||||
Sets the label which should be assigned as an SELinux context when mounting the
|
||||
layer.
|
||||
|
||||
## EXAMPLE
|
||||
**oci-storage create-layer -f manifest.json -n new-layer somelayer**
|
||||
|
||||
## SEE ALSO
|
||||
oci-storage-create-container(1)
|
||||
oci-storage-create-image(1)
|
||||
oci-storage-delete-layer(1)
|
18
vendor/github.com/containers/storage/docs/oci-storage-delete-container.md
generated
vendored
18
vendor/github.com/containers/storage/docs/oci-storage-delete-container.md
generated
vendored
|
@ -1,18 +0,0 @@
|
|||
## oci-storage-delete-container 1 "August 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage delete-container - Delete a container
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** **delete-container** *containerNameOrID*
|
||||
|
||||
## DESCRIPTION
|
||||
Deletes a container and its layer.
|
||||
|
||||
## EXAMPLE
|
||||
**oci-storage delete-container my-awesome-container**
|
||||
|
||||
## SEE ALSO
|
||||
oci-storage-create-container(1)
|
||||
oci-storage-delete-image(1)
|
||||
oci-storage-delete-layer(1)
|
21
vendor/github.com/containers/storage/docs/oci-storage-delete-image.md
generated
vendored
21
vendor/github.com/containers/storage/docs/oci-storage-delete-image.md
generated
vendored
|
@ -1,21 +0,0 @@
|
|||
## oci-storage-delete-image 1 "August 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage delete-image - Delete an image
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** **delete-image** *imageNameOrID*
|
||||
|
||||
## DESCRIPTION
|
||||
Deletes an image if it is not currently being used by any containers. If the
|
||||
image's top layer is not being used by any other images, it will be removed.
|
||||
If that image's parent is then not being used by other images, it, too, will be
|
||||
removed, and the this will be repeated for each parent's parent.
|
||||
|
||||
## EXAMPLE
|
||||
**oci-storage delete-image my-base-image**
|
||||
|
||||
## SEE ALSO
|
||||
oci-storage-create-image(1)
|
||||
oci-storage-delete-container(1)
|
||||
oci-storage-delete-layer(1)
|
19
vendor/github.com/containers/storage/docs/oci-storage-delete-layer.md
generated
vendored
19
vendor/github.com/containers/storage/docs/oci-storage-delete-layer.md
generated
vendored
|
@ -1,19 +0,0 @@
|
|||
## oci-storage-delete-layer 1 "August 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage delete-layer - Delete a layer
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** **delete-layer** *layerNameOrID*
|
||||
|
||||
## DESCRIPTION
|
||||
Deletes a layer if it is not currently being used by any images or containers,
|
||||
and is not the parent of any other layers.
|
||||
|
||||
## EXAMPLE
|
||||
**oci-storage delete-layer my-base-layer**
|
||||
|
||||
## SEE ALSO
|
||||
oci-storage-create-layer(1)
|
||||
oci-storage-delete-image(1)
|
||||
oci-storage-delete-layer(1)
|
19
vendor/github.com/containers/storage/docs/oci-storage-delete.md
generated
vendored
19
vendor/github.com/containers/storage/docs/oci-storage-delete.md
generated
vendored
|
@ -1,19 +0,0 @@
|
|||
## oci-storage-delete 1 "August 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage delete - Force deletion of a layer, image, or container
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** **delete** *layerOrImageOrContainerNameOrID*
|
||||
|
||||
## DESCRIPTION
|
||||
Deletes a specified layer, image, or container, with no safety checking. This
|
||||
can corrupt data, and may be removed.
|
||||
|
||||
## EXAMPLE
|
||||
**oci-storage delete my-base-layer**
|
||||
|
||||
## SEE ALSO
|
||||
oci-storage-delete-container(1)
|
||||
oci-storage-delete-image(1)
|
||||
oci-storage-delete-layer(1)
|
32
vendor/github.com/containers/storage/docs/oci-storage-diff.md
generated
vendored
32
vendor/github.com/containers/storage/docs/oci-storage-diff.md
generated
vendored
|
@ -1,32 +0,0 @@
|
|||
## oci-storage-diff 1 "August 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage diff - Generate a layer diff
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** **diff** [*options* [...]] *layerNameOrID*
|
||||
|
||||
## DESCRIPTION
|
||||
Generates a layer diff representing the changes made in the specified layer.
|
||||
If the layer was populated using a layer diff, the result aims to be
|
||||
bit-for-bit identical with the one that was applied, including the type of
|
||||
compression which was applied.
|
||||
|
||||
## OPTIONS
|
||||
**-f | --file** *file*
|
||||
|
||||
Write the diff to the specified file instead of stdout.
|
||||
|
||||
**-c | --gzip**
|
||||
|
||||
Compress the diff using gzip compression. If the layer was populated by a
|
||||
layer diff, and that layer diff was compressed, this will be done
|
||||
automatically.
|
||||
|
||||
## EXAMPLE
|
||||
**oci-storage diff my-base-layer**
|
||||
|
||||
## SEE ALSO
|
||||
oci-storage-applydiff(1)
|
||||
oci-storage-changes(1)
|
||||
oci-storage-diffsize(1)
|
19
vendor/github.com/containers/storage/docs/oci-storage-diffsize.md
generated
vendored
19
vendor/github.com/containers/storage/docs/oci-storage-diffsize.md
generated
vendored
|
@ -1,19 +0,0 @@
|
|||
## oci-storage-diffsize 1 "August 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage diffsize - Compute the size of a layer diff
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** **diffsize** *layerNameOrID*
|
||||
|
||||
## DESCRIPTION
|
||||
Computes the expected size of the layer diff which would be generated for the
|
||||
specified layer.
|
||||
|
||||
## EXAMPLE
|
||||
**oci-storage diffsize my-base-layer**
|
||||
|
||||
## SEE ALSO
|
||||
oci-storage-applydiff(1)
|
||||
oci-storage-changes(1)
|
||||
oci-storage-diff(1)
|
31
vendor/github.com/containers/storage/docs/oci-storage-exists.md
generated
vendored
31
vendor/github.com/containers/storage/docs/oci-storage-exists.md
generated
vendored
|
@ -1,31 +0,0 @@
|
|||
## oci-storage-exists 1 "August 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage exists - Check if a layer, image, or container exists
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** **exists** [*options* [...]] *layerOrImageOrContainerNameOrID* [...]
|
||||
|
||||
## DESCRIPTION
|
||||
Checks if there are layers, images, or containers which have the specified
|
||||
names or IDs.
|
||||
|
||||
## OPTIONS
|
||||
**-c | --container**
|
||||
|
||||
Only succeed if the names or IDs are that of containers.
|
||||
|
||||
**-i | --image**
|
||||
|
||||
Only succeed if the names or IDs are that of images.
|
||||
|
||||
**-l | --layer**
|
||||
|
||||
Only succeed if the names or IDs are that of layers.
|
||||
|
||||
**-q | --quiet**
|
||||
|
||||
Suppress output.
|
||||
|
||||
## EXAMPLE
|
||||
**oci-storage exists my-base-layer**
|
22
vendor/github.com/containers/storage/docs/oci-storage-get-container-data.md
generated
vendored
22
vendor/github.com/containers/storage/docs/oci-storage-get-container-data.md
generated
vendored
|
@ -1,22 +0,0 @@
|
|||
## oci-storage-get-container-data 1 "August 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage get-container-data - Retrieve lookaside data for a container
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** **get-container-data** [*options* [...]] *containerNameOrID* *dataName*
|
||||
|
||||
## DESCRIPTION
|
||||
Retrieves a piece of named data which is associated with a container.
|
||||
|
||||
## OPTIONS
|
||||
**-f | --file** *file*
|
||||
|
||||
Write the data to a file instead of stdout.
|
||||
|
||||
## EXAMPLE
|
||||
**oci-storage get-container-data -f config.json my-container configuration**
|
||||
|
||||
## SEE ALSO
|
||||
oci-storage-list-container-data(1)
|
||||
oci-storage-set-container-data(1)
|
17
vendor/github.com/containers/storage/docs/oci-storage-get-container-dir.md
generated
vendored
17
vendor/github.com/containers/storage/docs/oci-storage-get-container-dir.md
generated
vendored
|
@ -1,17 +0,0 @@
|
|||
## oci-storage-get-container-dir 1 "Sepember 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage get-container-dir - Find lookaside directory for a container
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** **get-container-dir** [*options* [...]] *containerNameOrID*
|
||||
|
||||
## DESCRIPTION
|
||||
Prints the location of a directory which the caller can use to store lookaside
|
||||
information which should be cleaned up when the container is deleted.
|
||||
|
||||
## EXAMPLE
|
||||
**oci-storage get-container-dir my-container**
|
||||
|
||||
## SEE ALSO
|
||||
oci-storage-get-container-run-dir(1)
|
17
vendor/github.com/containers/storage/docs/oci-storage-get-container-run-dir.md
generated
vendored
17
vendor/github.com/containers/storage/docs/oci-storage-get-container-run-dir.md
generated
vendored
|
@ -1,17 +0,0 @@
|
|||
## oci-storage-get-container-run-dir 1 "Sepember 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage get-container-run-dir - Find runtime lookaside directory for a container
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** **get-container-run-dir** [*options* [...]] *containerNameOrID*
|
||||
|
||||
## DESCRIPTION
|
||||
Prints the location of a directory which the caller can use to store lookaside
|
||||
information which should be cleaned up when the host is rebooted.
|
||||
|
||||
## EXAMPLE
|
||||
**oci-storage get-container-run-dir my-container**
|
||||
|
||||
## SEE ALSO
|
||||
oci-storage-get-container-dir(1)
|
22
vendor/github.com/containers/storage/docs/oci-storage-get-image-data.md
generated
vendored
22
vendor/github.com/containers/storage/docs/oci-storage-get-image-data.md
generated
vendored
|
@ -1,22 +0,0 @@
|
|||
## oci-storage-get-image-data 1 "August 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage get-image-data - Retrieve lookaside data for an image
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** **get-image-data** [*options* [...]] *imageNameOrID* *dataName*
|
||||
|
||||
## DESCRIPTION
|
||||
Retrieves a piece of named data which is associated with an image.
|
||||
|
||||
## OPTIONS
|
||||
**-f | --file** *file*
|
||||
|
||||
Write the data to a file instead of stdout.
|
||||
|
||||
## EXAMPLE
|
||||
**oci-storage get-image-data -f manifest.json my-image manifest**
|
||||
|
||||
## SEE ALSO
|
||||
oci-storage-list-image-data(1)
|
||||
oci-storage-set-image-data(1)
|
18
vendor/github.com/containers/storage/docs/oci-storage-image.md
generated
vendored
18
vendor/github.com/containers/storage/docs/oci-storage-image.md
generated
vendored
|
@ -1,18 +0,0 @@
|
|||
## oci-storage-image 1 "August 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage image - Examine a single image
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** **image** *imageNameOrID*
|
||||
|
||||
## DESCRIPTION
|
||||
Retrieve information about an image: its ID, any names it has, and the ID of
|
||||
its top layer.
|
||||
|
||||
## EXAMPLE
|
||||
**oci-storage image 49bff34e4baf9378c01733d02276a731a4c4771ebeab305020c5303679f88bb8**
|
||||
**oci-storage image my-favorite-image**
|
||||
|
||||
## SEE ALSO
|
||||
oci-storage-images(1)
|
16
vendor/github.com/containers/storage/docs/oci-storage-images.md
generated
vendored
16
vendor/github.com/containers/storage/docs/oci-storage-images.md
generated
vendored
|
@ -1,16 +0,0 @@
|
|||
## oci-storage-images 1 "August 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage images - List known images
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** **images**
|
||||
|
||||
## DESCRIPTION
|
||||
Retrieves information about all known images and lists their IDs and names.
|
||||
|
||||
## EXAMPLE
|
||||
**oci-storage images**
|
||||
|
||||
## SEE ALSO
|
||||
oci-storage-image(1)
|
23
vendor/github.com/containers/storage/docs/oci-storage-layers.md
generated
vendored
23
vendor/github.com/containers/storage/docs/oci-storage-layers.md
generated
vendored
|
@ -1,23 +0,0 @@
|
|||
## oci-storage-layers 1 "August 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage layers - List known layers
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** [*options* [...]] **layers**
|
||||
|
||||
## DESCRIPTION
|
||||
Retrieves information about all known layers and lists their IDs and names, the
|
||||
IDs and names of any images which list those layers as their top layer, and the
|
||||
IDs and names of any containers for which the layer serves as the container's
|
||||
own layer.
|
||||
|
||||
## OPTIONS
|
||||
**-t | --tree**
|
||||
|
||||
Display results using a tree to show the hierarchy of parent-child
|
||||
relationships between layers.
|
||||
|
||||
## EXAMPLE
|
||||
**oci-storage layers**
|
||||
**oci-storage layers -t**
|
17
vendor/github.com/containers/storage/docs/oci-storage-list-container-data.md
generated
vendored
17
vendor/github.com/containers/storage/docs/oci-storage-list-container-data.md
generated
vendored
|
@ -1,17 +0,0 @@
|
|||
## oci-storage-list-container-data 1 "August 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage list-container-data - List lookaside data for a container
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** **list-container-data** *containerNameOrID*
|
||||
|
||||
## DESCRIPTION
|
||||
List the pieces of named data which are associated with a container.
|
||||
|
||||
## EXAMPLE
|
||||
**oci-storage list-container-data my-container**
|
||||
|
||||
## SEE ALSO
|
||||
oci-storage-get-container-data(1)
|
||||
oci-storage-set-container-data(1)
|
17
vendor/github.com/containers/storage/docs/oci-storage-list-image-data.md
generated
vendored
17
vendor/github.com/containers/storage/docs/oci-storage-list-image-data.md
generated
vendored
|
@ -1,17 +0,0 @@
|
|||
## oci-storage-list-image-data 1 "August 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage list-image-data - List lookaside data for an image
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** **list-image-data** *imageNameOrID*
|
||||
|
||||
## DESCRIPTION
|
||||
List the pieces of named data which are associated with an image.
|
||||
|
||||
## EXAMPLE
|
||||
**oci-storage list-image-data my-image**
|
||||
|
||||
## SEE ALSO
|
||||
oci-storage-get-image-data(1)
|
||||
oci-storage-list-image-data(1)
|
22
vendor/github.com/containers/storage/docs/oci-storage-metadata.md
generated
vendored
22
vendor/github.com/containers/storage/docs/oci-storage-metadata.md
generated
vendored
|
@ -1,22 +0,0 @@
|
|||
## oci-storage-metadata 1 "August 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage metadata - Retrieve metadata for a layer, image, or container
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** **metadata** [*options* [...]] *layerOrImageOrContainerNameOrID*
|
||||
|
||||
## DESCRIPTION
|
||||
Outputs metadata associated with a layer, image, or container. Metadata is
|
||||
intended to be small, and is expected to be cached in memory.
|
||||
|
||||
## OPTIONS
|
||||
**-q | --quiet**
|
||||
|
||||
Don't print the ID or name of the item with which the metadata is associated.
|
||||
|
||||
## EXAMPLE
|
||||
**oci-storage metadata -q my-image > my-image.txt**
|
||||
|
||||
## SEE ALSO
|
||||
oci-storage-set-metadata(1)
|
22
vendor/github.com/containers/storage/docs/oci-storage-mount.md
generated
vendored
22
vendor/github.com/containers/storage/docs/oci-storage-mount.md
generated
vendored
|
@ -1,22 +0,0 @@
|
|||
## oci-storage-mount 1 "August 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage mount - Mount a layer or a container's layer for manipulation
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** **mount** [*options* [...]] *layerOrContainerNameOrID*
|
||||
|
||||
## DESCRIPTION
|
||||
Mounts a layer or a container's layer on the host's filesystem and prints the
|
||||
mountpoint.
|
||||
|
||||
## OPTIONS
|
||||
**-l | --label** *label*
|
||||
|
||||
Specify an SELinux context for the mounted layer.
|
||||
|
||||
## EXAMPLE
|
||||
**oci-storage mount my-container**
|
||||
|
||||
## SEE ALSO
|
||||
oci-storage-unmount(1)
|
22
vendor/github.com/containers/storage/docs/oci-storage-set-container-data.md
generated
vendored
22
vendor/github.com/containers/storage/docs/oci-storage-set-container-data.md
generated
vendored
|
@ -1,22 +0,0 @@
|
|||
## oci-storage-set-container-data 1 "August 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage set-container-data - Set lookaside data for a container
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** **set-container-data** [*options* [...]] *containerNameOrID* *dataName*
|
||||
|
||||
## DESCRIPTION
|
||||
Sets a piece of named data which is associated with a container.
|
||||
|
||||
## OPTIONS
|
||||
**-f | --file** *filename*
|
||||
|
||||
Read the data contents from a file instead of stdin.
|
||||
|
||||
## EXAMPLE
|
||||
**oci-storage set-container-data -f ./config.json my-container configuration**
|
||||
|
||||
## SEE ALSO
|
||||
oci-storage-get-container-data(1)
|
||||
oci-storage-list-container-data(1)
|
22
vendor/github.com/containers/storage/docs/oci-storage-set-image-data.md
generated
vendored
22
vendor/github.com/containers/storage/docs/oci-storage-set-image-data.md
generated
vendored
|
@ -1,22 +0,0 @@
|
|||
## oci-storage-set-image-data 1 "August 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage set-image-data - Set lookaside data for an image
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** **set-image-data** [*options* [...]] *imageNameOrID* *dataName*
|
||||
|
||||
## DESCRIPTION
|
||||
Sets a piece of named data which is associated with an image.
|
||||
|
||||
## OPTIONS
|
||||
**-f | --file** *filename*
|
||||
|
||||
Read the data contents from a file instead of stdin.
|
||||
|
||||
## EXAMPLE
|
||||
**oci-storage set-image-data -f ./manifest.json my-image manifest**
|
||||
|
||||
## SEE ALSO
|
||||
oci-storage-get-image-data(1)
|
||||
oci-storage-list-image-data(1)
|
26
vendor/github.com/containers/storage/docs/oci-storage-set-metadata.md
generated
vendored
26
vendor/github.com/containers/storage/docs/oci-storage-set-metadata.md
generated
vendored
|
@ -1,26 +0,0 @@
|
|||
## oci-storage-set-metadata 1 "August 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage set-metadata - Set metadata for a layer, image, or container
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** **set-metadata** [*options* [...]] *layerOrImageOrContainerNameOrID*
|
||||
|
||||
## DESCRIPTION
|
||||
Updates the metadata associated with a layer, image, or container. Metadata is
|
||||
intended to be small, and is expected to be cached in memory.
|
||||
|
||||
## OPTIONS
|
||||
**-f | --metadata-file** *filename*
|
||||
|
||||
Use the contents of the specified file as the metadata.
|
||||
|
||||
**-m | --metadata** *value*
|
||||
|
||||
Use the specified value as the metadata.
|
||||
|
||||
## EXAMPLE
|
||||
**oci-storage set-metadata -m "compression: gzip" my-layer**
|
||||
|
||||
## SEE ALSO
|
||||
oci-storage-metadata(1)
|
27
vendor/github.com/containers/storage/docs/oci-storage-set-names.md
generated
vendored
27
vendor/github.com/containers/storage/docs/oci-storage-set-names.md
generated
vendored
|
@ -1,27 +0,0 @@
|
|||
## oci-storage-set-names 1 "August 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage set-names - Set names for a layer/image/container
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** **set-names** [**-n** *name* [...]] *layerOrImageOrContainerNameOrID*
|
||||
|
||||
## DESCRIPTION
|
||||
In addition to IDs, *layers*, *images*, and *containers* can have
|
||||
human-readable names assigned to them in *oci-storage*. The *set-names*
|
||||
command can be used to reset the list of names for any of them.
|
||||
|
||||
## OPTIONS
|
||||
**-n | --name** *name*
|
||||
|
||||
Specifies a name to set on the layer, image, or container. If a specified name
|
||||
is already used by another layer, image, or container, it is removed from that
|
||||
other layer, image, or container. Any names which are currently assigned to
|
||||
this layer, image, or container, and which are not specified using this option,
|
||||
will be removed from the layer, image, or container.
|
||||
|
||||
## EXAMPLE
|
||||
**oci-storage set-names -n my-one-and-only-name f3be6c6134d0d980936b4c894f1613b69a62b79588fdeda744d0be3693bde8ec**
|
||||
|
||||
## SEE ALSO
|
||||
oci-storage-add-names(1)
|
20
vendor/github.com/containers/storage/docs/oci-storage-shutdown.md
generated
vendored
20
vendor/github.com/containers/storage/docs/oci-storage-shutdown.md
generated
vendored
|
@ -1,20 +0,0 @@
|
|||
## oci-storage-shutdown 1 "October 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage shutdown - Shut down layer storage
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** **shutdown** [*options* [...]]
|
||||
|
||||
## DESCRIPTION
|
||||
Shuts down the layer storage driver, which may be using kernel resources.
|
||||
|
||||
## OPTIONS
|
||||
**-f | --force**
|
||||
|
||||
Attempt to unmount any mounted layers before attempting to shut down the
|
||||
driver. If this option is not specified, if any layers are mounted, shutdown
|
||||
will not be attempted.
|
||||
|
||||
## EXAMPLE
|
||||
**oci-storage shutdown**
|
16
vendor/github.com/containers/storage/docs/oci-storage-status.md
generated
vendored
16
vendor/github.com/containers/storage/docs/oci-storage-status.md
generated
vendored
|
@ -1,16 +0,0 @@
|
|||
## oci-storage-status 1 "August 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage status - Output status information from the storage library's driver
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** **status**
|
||||
|
||||
## DESCRIPTION
|
||||
Queries the storage library's driver for status information.
|
||||
|
||||
## EXAMPLE
|
||||
**oci-storage status**
|
||||
|
||||
## SEE ALSO
|
||||
oci-storage-version(1)
|
17
vendor/github.com/containers/storage/docs/oci-storage-unmount.md
generated
vendored
17
vendor/github.com/containers/storage/docs/oci-storage-unmount.md
generated
vendored
|
@ -1,17 +0,0 @@
|
|||
## oci-storage-unmount 1 "August 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage unmount - Unmount a layer or a container's layer
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** **unmount** *layerOrContainerMountpointOrNameOrID*
|
||||
|
||||
## DESCRIPTION
|
||||
Unmounts a layer or a container's layer from the host's filesystem.
|
||||
|
||||
## EXAMPLE
|
||||
**oci-storage unmount my-container**
|
||||
**oci-storage unmount /var/lib/oci-storage/mounts/my-container**
|
||||
|
||||
## SEE ALSO
|
||||
oci-storage-mount(1)
|
16
vendor/github.com/containers/storage/docs/oci-storage-version.md
generated
vendored
16
vendor/github.com/containers/storage/docs/oci-storage-version.md
generated
vendored
|
@ -1,16 +0,0 @@
|
|||
## oci-storage-version 1 "August 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage version - Output version information about the storage library
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** **version**
|
||||
|
||||
## DESCRIPTION
|
||||
Outputs version information about the storage library and *oci-storage*.
|
||||
|
||||
## EXAMPLE
|
||||
**oci-storage version**
|
||||
|
||||
## SEE ALSO
|
||||
oci-storage-status(1)
|
14
vendor/github.com/containers/storage/docs/oci-storage-wipe.md
generated
vendored
14
vendor/github.com/containers/storage/docs/oci-storage-wipe.md
generated
vendored
|
@ -1,14 +0,0 @@
|
|||
## oci-storage-wipe 1 "August 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage wipe - Delete all containers, images, and layers
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** **wipe**
|
||||
|
||||
## DESCRIPTION
|
||||
Deletes all known containers, images, and layers. Depending on your use case,
|
||||
use with caution or abandon.
|
||||
|
||||
## EXAMPLE
|
||||
**oci-storage wipe**
|
131
vendor/github.com/containers/storage/docs/oci-storage.md
generated
vendored
131
vendor/github.com/containers/storage/docs/oci-storage.md
generated
vendored
|
@ -1,131 +0,0 @@
|
|||
## oci-storage 1 "August 2016"
|
||||
|
||||
## NAME
|
||||
oci-storage - Manage layer/image/container storage
|
||||
|
||||
## SYNOPSIS
|
||||
**oci-storage** [**subcommand**] [**--help**]
|
||||
|
||||
## DESCRIPTION
|
||||
The *oci-storage* command is a front-end for the *containers/storage* library.
|
||||
While it can be used to manage storage for filesystem layers, images, and
|
||||
containers directly, its main use cases are centered around troubleshooting and
|
||||
querying the state of storage which is being managed by other processes.
|
||||
|
||||
Notionally, a complete filesystem layer is composed of a container filesystem
|
||||
and some bookkeeping information. Other layers, *children* of that layer,
|
||||
default to sharing its contents, but any changes made to the contents of the
|
||||
children are not reflected in the *parent*. This arrangement is intended to
|
||||
save disk space: by storing the *child* layer only as a set of changes relative
|
||||
to its *parent*, the *parent*'s contents should not need to be duplicated for
|
||||
each of the *parent*'s *children*. Of course, each *child* can have its own
|
||||
*children*. The contents of *parent* layers should not be modified.
|
||||
|
||||
An *image* is a reference to a particular *layer*, along with some bookkeeping
|
||||
information. Presumably, the *image* points to a *layer* which has been
|
||||
modified, possibly in multiple steps, from some general-purpose *parent*, so
|
||||
that it is suitable for running an intended application. Multiple *images* can
|
||||
reference a single *layer*, while differing only in the additional bookkeeping
|
||||
information that they carry. The contents of *images* should be considered
|
||||
read-only.
|
||||
|
||||
A *container* is essentially a *layer* which is a *child* of a *layer* which is
|
||||
referred to by an *image* (put another way, a *container* is instantiated from
|
||||
an *image*), along with some bookkeeping information. They do not have
|
||||
*children* and their *layers* can not be directly referred to by *images*.
|
||||
This ensures that changes to the contents of a *container*'s layer do not
|
||||
affect other *images* or *layers*, so they are considered writeable.
|
||||
|
||||
All of *layers*, *images*, and *containers* can have metadata which
|
||||
*oci-storage* manages attached to them. Generally this metadata is not
|
||||
expected to be large, as it is cached in memory.
|
||||
|
||||
*Images* and *containers* can also have arbitrarily-named data items attached
|
||||
to them. Generally, this data can be larger than metadata, and is not kept in
|
||||
memory unless it is being retrieved or written.
|
||||
|
||||
It is expected that signatures which can be used to verify an *image*'s
|
||||
contents will be stored as data items for that *image*, along with any template
|
||||
configuration data which is recommended for use in *containers* which derive
|
||||
from the *image*. It is also expected that a *container*'s run-time
|
||||
configuration will be stored as data items.
|
||||
|
||||
## SUB-COMMANDS
|
||||
The *oci-storage* command's features are broken down into several subcommands:
|
||||
**oci-storage add-names(1)** Add layer, image, or container name or names
|
||||
**oci-storage applydiff(1)** Apply a diff to a layer
|
||||
**oci-storage changes(1)** Compare two layers
|
||||
**oci-storage container(1)** Examine a container
|
||||
**oci-storage containers(1)** List containers
|
||||
**oci-storage create-container(1)** Create a new container from an image
|
||||
**oci-storage create-image(1)** Create a new image using layers
|
||||
**oci-storage create-layer(1)** Create a new layer
|
||||
**oci-storage delete(1)** Delete a layer or image or container, with no safety checks
|
||||
**oci-storage delete-container(1)** Delete a container, with safety checks
|
||||
**oci-storage delete-image(1)** Delete an image, with safety checks
|
||||
**oci-storage delete-layer(1)** Delete a layer, with safety checks
|
||||
**oci-storage diff(1)** Compare two layers
|
||||
**oci-storage diffsize(1)** Compare two layers
|
||||
**oci-storage exists(1)** Check if a layer or image or container exists
|
||||
**oci-storage get-container-data(1)** Get data that is attached to a container
|
||||
**oci-storage get-image-data(1)** Get data that is attached to an image
|
||||
**oci-storage image(1)** Examine an image
|
||||
**oci-storage images(1)** List images
|
||||
**oci-storage layers(1)** List layers
|
||||
**oci-storage list-container-data(1)** List data items that are attached to a container
|
||||
**oci-storage list-image-data(1)** List data items that are attached to an image
|
||||
**oci-storage metadata(1)** Retrieve layer, image, or container metadata
|
||||
**oci-storage mount(1)** Mount a layer or container
|
||||
**oci-storage set-container-data(1)** Set data that is attached to a container
|
||||
**oci-storage set-image-data(1)** Set data that is attached to an image
|
||||
**oci-storage set-metadata(1)** Set layer, image, or container metadata
|
||||
**oci-storage set-names(1)** Set layer, image, or container name or names
|
||||
**oci-storage shutdown(1)** Shut down graph driver
|
||||
**oci-storage status(1)** Check on graph driver status
|
||||
**oci-storage unmount(1)** Unmount a layer or container
|
||||
**oci-storage version(1)** Return oci-storage version information
|
||||
**oci-storage wipe(1)** Wipe all layers, images, and containers
|
||||
|
||||
## OPTIONS
|
||||
**--help**
|
||||
|
||||
Print the list of available sub-commands. When a sub-command is specified,
|
||||
provide information about that command.
|
||||
|
||||
**--debug, -D**
|
||||
|
||||
Increases the amount of debugging information which is printed.
|
||||
|
||||
**--graph, -g=/var/lib/oci-storage**
|
||||
|
||||
Overrides the root of the storage tree, used for storing layer contents and
|
||||
information about layers, images, and containers.
|
||||
|
||||
**--run, -R=/var/run/oci-storage**
|
||||
|
||||
Overrides the root of the runtime state tree, currently used mainly for noting
|
||||
the location where a given layer is mounted (see **oci-storage mount**) so that
|
||||
it can be unmounted by path name as an alternative to unmounting by ID or name.
|
||||
|
||||
**--storage-driver, -s**
|
||||
|
||||
Specifies which storage driver to use. If not set, but *$STORAGE_DRIVER* is
|
||||
set in the environment, its value is used. If the storage tree has previously
|
||||
been initialized, neither needs to be provided. If the tree has not previously
|
||||
been initialized and neither is set, a hard-coded default is selected.
|
||||
|
||||
**--storage-opt=[]**
|
||||
|
||||
Set options which will be passed to the storage driver. If not set, but
|
||||
*$STORAGE_OPTS* is set in the environment, its value is treated as a
|
||||
comma-separated list and used instead. If the storage tree has previously been
|
||||
initialized, these need not be provided.
|
||||
|
||||
## EXAMPLES
|
||||
**oci-storage layers -t**
|
||||
|
||||
## BUGS
|
||||
This is still a work in progress, so some functionality may not yet be
|
||||
implemented, and some will be removed if it is found to be unnecessary. That
|
||||
said, if anything isn't working correctly, please report it to [the project's
|
||||
issue tracker] (https://github.com/containers/storage/issues).
|
801
vendor/github.com/containers/storage/drivers/aufs/aufs_test.go
generated
vendored
801
vendor/github.com/containers/storage/drivers/aufs/aufs_test.go
generated
vendored
|
@ -1,801 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package aufs
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/reexec"
|
||||
"github.com/containers/storage/pkg/stringid"
|
||||
)
|
||||
|
||||
var (
|
||||
tmpOuter = path.Join(os.TempDir(), "aufs-tests")
|
||||
tmp = path.Join(tmpOuter, "aufs")
|
||||
)
|
||||
|
||||
func init() {
|
||||
reexec.Init()
|
||||
}
|
||||
|
||||
func testInit(dir string, t testing.TB) graphdriver.Driver {
|
||||
d, err := Init(dir, nil, nil, nil)
|
||||
if err != nil {
|
||||
if err == graphdriver.ErrNotSupported {
|
||||
t.Skip(err)
|
||||
} else {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
func newDriver(t testing.TB) *Driver {
|
||||
if err := os.MkdirAll(tmp, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
d := testInit(tmp, t)
|
||||
return d.(*Driver)
|
||||
}
|
||||
|
||||
func TestNewDriver(t *testing.T) {
|
||||
if err := os.MkdirAll(tmp, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
d := testInit(tmp, t)
|
||||
defer os.RemoveAll(tmp)
|
||||
if d == nil {
|
||||
t.Fatalf("Driver should not be nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAufsString(t *testing.T) {
|
||||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if d.String() != "aufs" {
|
||||
t.Fatalf("Expected aufs got %s", d.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateDirStructure(t *testing.T) {
|
||||
newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
paths := []string{
|
||||
"mnt",
|
||||
"layers",
|
||||
"diff",
|
||||
}
|
||||
|
||||
for _, p := range paths {
|
||||
if _, err := os.Stat(path.Join(tmp, p)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We should be able to create two drivers with the same dir structure
|
||||
func TestNewDriverFromExistingDir(t *testing.T) {
|
||||
if err := os.MkdirAll(tmp, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testInit(tmp, t)
|
||||
testInit(tmp, t)
|
||||
os.RemoveAll(tmp)
|
||||
}
|
||||
|
||||
func TestCreateNewDir(t *testing.T) {
|
||||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.Create("1", "", "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateNewDirStructure(t *testing.T) {
|
||||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.Create("1", "", "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
paths := []string{
|
||||
"mnt",
|
||||
"diff",
|
||||
"layers",
|
||||
}
|
||||
|
||||
for _, p := range paths {
|
||||
if _, err := os.Stat(path.Join(tmp, p, "1")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveImage(t *testing.T) {
|
||||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.Create("1", "", "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := d.Remove("1"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
paths := []string{
|
||||
"mnt",
|
||||
"diff",
|
||||
"layers",
|
||||
}
|
||||
|
||||
for _, p := range paths {
|
||||
if _, err := os.Stat(path.Join(tmp, p, "1")); err == nil {
|
||||
t.Fatalf("Error should not be nil because dirs with id 1 should be delted: %s", p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetWithoutParent(t *testing.T) {
|
||||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.Create("1", "", "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
diffPath, err := d.Get("1", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected := path.Join(tmp, "diff", "1")
|
||||
if diffPath != expected {
|
||||
t.Fatalf("Expected path %s got %s", expected, diffPath)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanupWithNoDirs(t *testing.T) {
|
||||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.Cleanup(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanupWithDir(t *testing.T) {
|
||||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.Create("1", "", "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := d.Cleanup(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMountedFalseResponse(t *testing.T) {
|
||||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.Create("1", "", "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
response, err := d.mounted(d.getDiffPath("1"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if response != false {
|
||||
t.Fatalf("Response if dir id 1 is mounted should be false")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMountedTrueReponse(t *testing.T) {
|
||||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
defer d.Cleanup()
|
||||
|
||||
if err := d.Create("1", "", "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := d.Create("2", "1", "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err := d.Get("2", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
response, err := d.mounted(d.pathCache["2"])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if response != true {
|
||||
t.Fatalf("Response if dir id 2 is mounted should be true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMountWithParent(t *testing.T) {
|
||||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.Create("1", "", "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := d.Create("2", "1", "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := d.Cleanup(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
mntPath, err := d.Get("2", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if mntPath == "" {
|
||||
t.Fatal("mntPath should not be empty string")
|
||||
}
|
||||
|
||||
expected := path.Join(tmp, "mnt", "2")
|
||||
if mntPath != expected {
|
||||
t.Fatalf("Expected %s got %s", expected, mntPath)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveMountedDir(t *testing.T) {
|
||||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.Create("1", "", "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := d.Create("2", "1", "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := d.Cleanup(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
mntPath, err := d.Get("2", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if mntPath == "" {
|
||||
t.Fatal("mntPath should not be empty string")
|
||||
}
|
||||
|
||||
mounted, err := d.mounted(d.pathCache["2"])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !mounted {
|
||||
t.Fatalf("Dir id 2 should be mounted")
|
||||
}
|
||||
|
||||
if err := d.Remove("2"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateWithInvalidParent(t *testing.T) {
|
||||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.Create("1", "storage", "", nil); err == nil {
|
||||
t.Fatalf("Error should not be nil with parent does not exist")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetDiff(t *testing.T) {
|
||||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.CreateReadWrite("1", "", "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
diffPath, err := d.Get("1", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Add a file to the diff path with a fixed size
|
||||
size := int64(1024)
|
||||
|
||||
f, err := os.Create(path.Join(diffPath, "test_file"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := f.Truncate(size); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
f.Close()
|
||||
|
||||
a, err := d.Diff("1", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if a == nil {
|
||||
t.Fatalf("Archive should not be nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestChanges(t *testing.T) {
|
||||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.Create("1", "", "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := d.CreateReadWrite("2", "1", "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := d.Cleanup(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
mntPoint, err := d.Get("2", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create a file to save in the mountpoint
|
||||
f, err := os.Create(path.Join(mntPoint, "test.txt"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := f.WriteString("testline"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := f.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
changes, err := d.Changes("2", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(changes) != 1 {
|
||||
t.Fatalf("Dir 2 should have one change from parent got %d", len(changes))
|
||||
}
|
||||
change := changes[0]
|
||||
|
||||
expectedPath := "/test.txt"
|
||||
if change.Path != expectedPath {
|
||||
t.Fatalf("Expected path %s got %s", expectedPath, change.Path)
|
||||
}
|
||||
|
||||
if change.Kind != archive.ChangeAdd {
|
||||
t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind)
|
||||
}
|
||||
|
||||
if err := d.CreateReadWrite("3", "2", "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mntPoint, err = d.Get("3", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create a file to save in the mountpoint
|
||||
f, err = os.Create(path.Join(mntPoint, "test2.txt"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := f.WriteString("testline"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := f.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
changes, err = d.Changes("3", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(changes) != 1 {
|
||||
t.Fatalf("Dir 2 should have one change from parent got %d", len(changes))
|
||||
}
|
||||
change = changes[0]
|
||||
|
||||
expectedPath = "/test2.txt"
|
||||
if change.Path != expectedPath {
|
||||
t.Fatalf("Expected path %s got %s", expectedPath, change.Path)
|
||||
}
|
||||
|
||||
if change.Kind != archive.ChangeAdd {
|
||||
t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDiffSize(t *testing.T) {
|
||||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
if err := d.CreateReadWrite("1", "", "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
diffPath, err := d.Get("1", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Add a file to the diff path with a fixed size
|
||||
size := int64(1024)
|
||||
|
||||
f, err := os.Create(path.Join(diffPath, "test_file"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := f.Truncate(size); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
s, err := f.Stat()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
size = s.Size()
|
||||
if err := f.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
diffSize, err := d.DiffSize("1", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if diffSize != size {
|
||||
t.Fatalf("Expected size to be %d got %d", size, diffSize)
|
||||
}
|
||||
}
|
||||
|
||||
func TestChildDiffSize(t *testing.T) {
|
||||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
defer d.Cleanup()
|
||||
|
||||
if err := d.CreateReadWrite("1", "", "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
diffPath, err := d.Get("1", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Add a file to the diff path with a fixed size
|
||||
size := int64(1024)
|
||||
|
||||
f, err := os.Create(path.Join(diffPath, "test_file"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := f.Truncate(size); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
s, err := f.Stat()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
size = s.Size()
|
||||
if err := f.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
diffSize, err := d.DiffSize("1", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if diffSize != size {
|
||||
t.Fatalf("Expected size to be %d got %d", size, diffSize)
|
||||
}
|
||||
|
||||
if err := d.Create("2", "1", "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
diffSize, err = d.DiffSize("2", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// The diff size for the child should be zero
|
||||
if diffSize != 0 {
|
||||
t.Fatalf("Expected size to be %d got %d", 0, diffSize)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExists(t *testing.T) {
|
||||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
defer d.Cleanup()
|
||||
|
||||
if err := d.Create("1", "", "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if d.Exists("none") {
|
||||
t.Fatal("id name should not exist in the driver")
|
||||
}
|
||||
|
||||
if !d.Exists("1") {
|
||||
t.Fatal("id 1 should exist in the driver")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStatus(t *testing.T) {
|
||||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
defer d.Cleanup()
|
||||
|
||||
if err := d.Create("1", "", "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
status := d.Status()
|
||||
if status == nil || len(status) == 0 {
|
||||
t.Fatal("Status should not be nil or empty")
|
||||
}
|
||||
rootDir := status[0]
|
||||
dirs := status[2]
|
||||
if rootDir[0] != "Root Dir" {
|
||||
t.Fatalf("Expected Root Dir got %s", rootDir[0])
|
||||
}
|
||||
if rootDir[1] != d.rootPath() {
|
||||
t.Fatalf("Expected %s got %s", d.rootPath(), rootDir[1])
|
||||
}
|
||||
if dirs[0] != "Dirs" {
|
||||
t.Fatalf("Expected Dirs got %s", dirs[0])
|
||||
}
|
||||
if dirs[1] != "1" {
|
||||
t.Fatalf("Expected 1 got %s", dirs[1])
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyDiff(t *testing.T) {
|
||||
d := newDriver(t)
|
||||
defer os.RemoveAll(tmp)
|
||||
defer d.Cleanup()
|
||||
|
||||
if err := d.CreateReadWrite("1", "", "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
diffPath, err := d.Get("1", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Add a file to the diff path with a fixed size
|
||||
size := int64(1024)
|
||||
|
||||
f, err := os.Create(path.Join(diffPath, "test_file"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := f.Truncate(size); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
f.Close()
|
||||
|
||||
diff, err := d.Diff("1", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := d.Create("2", "", "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := d.Create("3", "2", "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := d.applyDiff("3", diff); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Ensure that the file is in the mount point for id 3
|
||||
|
||||
mountPoint, err := d.Get("3", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := os.Stat(path.Join(mountPoint, "test_file")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func hash(c string) string {
|
||||
h := sha256.New()
|
||||
fmt.Fprint(h, c)
|
||||
return hex.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
func testMountMoreThan42Layers(t *testing.T, mountPath string) {
|
||||
if err := os.MkdirAll(mountPath, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer os.RemoveAll(mountPath)
|
||||
d := testInit(mountPath, t).(*Driver)
|
||||
defer d.Cleanup()
|
||||
var last string
|
||||
var expected int
|
||||
|
||||
for i := 1; i < 127; i++ {
|
||||
expected++
|
||||
var (
|
||||
parent = fmt.Sprintf("%d", i-1)
|
||||
current = fmt.Sprintf("%d", i)
|
||||
)
|
||||
|
||||
if parent == "0" {
|
||||
parent = ""
|
||||
} else {
|
||||
parent = hash(parent)
|
||||
}
|
||||
current = hash(current)
|
||||
|
||||
if err := d.CreateReadWrite(current, parent, "", nil); err != nil {
|
||||
t.Logf("Current layer %d", i)
|
||||
t.Error(err)
|
||||
}
|
||||
point, err := d.Get(current, "")
|
||||
if err != nil {
|
||||
t.Logf("Current layer %d", i)
|
||||
t.Error(err)
|
||||
}
|
||||
f, err := os.Create(path.Join(point, current))
|
||||
if err != nil {
|
||||
t.Logf("Current layer %d", i)
|
||||
t.Error(err)
|
||||
}
|
||||
f.Close()
|
||||
|
||||
if i%10 == 0 {
|
||||
if err := os.Remove(path.Join(point, parent)); err != nil {
|
||||
t.Logf("Current layer %d", i)
|
||||
t.Error(err)
|
||||
}
|
||||
expected--
|
||||
}
|
||||
last = current
|
||||
}
|
||||
|
||||
// Perform the actual mount for the top most image
|
||||
point, err := d.Get(last, "")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
files, err := ioutil.ReadDir(point)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if len(files) != expected {
|
||||
t.Errorf("Expected %d got %d", expected, len(files))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMountMoreThan42Layers(t *testing.T) {
|
||||
os.RemoveAll(tmpOuter)
|
||||
testMountMoreThan42Layers(t, tmp)
|
||||
}
|
||||
|
||||
func TestMountMoreThan42LayersMatchingPathLength(t *testing.T) {
|
||||
defer os.RemoveAll(tmpOuter)
|
||||
zeroes := "0"
|
||||
for {
|
||||
// This finds a mount path so that when combined into aufs mount options
|
||||
// 4096 byte boundary would be in between the paths or in permission
|
||||
// section. For '/tmp' it will use '/tmp/aufs-tests/00000000/aufs'
|
||||
mountPath := path.Join(tmpOuter, zeroes, "aufs")
|
||||
pathLength := 77 + len(mountPath)
|
||||
|
||||
if mod := 4095 % pathLength; mod == 0 || mod > pathLength-2 {
|
||||
t.Logf("Using path: %s", mountPath)
|
||||
testMountMoreThan42Layers(t, mountPath)
|
||||
return
|
||||
}
|
||||
zeroes += "0"
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkConcurrentAccess(b *testing.B) {
|
||||
b.StopTimer()
|
||||
b.ResetTimer()
|
||||
|
||||
d := newDriver(b)
|
||||
defer os.RemoveAll(tmp)
|
||||
defer d.Cleanup()
|
||||
|
||||
numConcurent := 256
|
||||
// create a bunch of ids
|
||||
var ids []string
|
||||
for i := 0; i < numConcurent; i++ {
|
||||
ids = append(ids, stringid.GenerateNonCryptoID())
|
||||
}
|
||||
|
||||
if err := d.Create(ids[0], "", "", nil); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
if err := d.Create(ids[1], ids[0], "", nil); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
parent := ids[1]
|
||||
ids = append(ids[2:])
|
||||
|
||||
chErr := make(chan error, numConcurent)
|
||||
var outerGroup sync.WaitGroup
|
||||
outerGroup.Add(len(ids))
|
||||
b.StartTimer()
|
||||
|
||||
// here's the actual bench
|
||||
for _, id := range ids {
|
||||
go func(id string) {
|
||||
defer outerGroup.Done()
|
||||
if err := d.Create(id, parent, "", nil); err != nil {
|
||||
b.Logf("Create %s failed", id)
|
||||
chErr <- err
|
||||
return
|
||||
}
|
||||
var innerGroup sync.WaitGroup
|
||||
for i := 0; i < b.N; i++ {
|
||||
innerGroup.Add(1)
|
||||
go func() {
|
||||
d.Get(id, "")
|
||||
d.Put(id)
|
||||
innerGroup.Done()
|
||||
}()
|
||||
}
|
||||
innerGroup.Wait()
|
||||
d.Remove(id)
|
||||
}(id)
|
||||
}
|
||||
|
||||
outerGroup.Wait()
|
||||
b.StopTimer()
|
||||
close(chErr)
|
||||
for err := range chErr {
|
||||
if err != nil {
|
||||
b.Log(err)
|
||||
b.Fail()
|
||||
}
|
||||
}
|
||||
}
|
63
vendor/github.com/containers/storage/drivers/btrfs/btrfs_test.go
generated
vendored
63
vendor/github.com/containers/storage/drivers/btrfs/btrfs_test.go
generated
vendored
|
@ -1,63 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package btrfs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/containers/storage/drivers/graphtest"
|
||||
)
|
||||
|
||||
// This avoids creating a new driver for each test if all tests are run
|
||||
// Make sure to put new tests between TestBtrfsSetup and TestBtrfsTeardown
|
||||
func TestBtrfsSetup(t *testing.T) {
|
||||
graphtest.GetDriver(t, "btrfs")
|
||||
}
|
||||
|
||||
func TestBtrfsCreateEmpty(t *testing.T) {
|
||||
graphtest.DriverTestCreateEmpty(t, "btrfs")
|
||||
}
|
||||
|
||||
func TestBtrfsCreateBase(t *testing.T) {
|
||||
graphtest.DriverTestCreateBase(t, "btrfs")
|
||||
}
|
||||
|
||||
func TestBtrfsCreateSnap(t *testing.T) {
|
||||
graphtest.DriverTestCreateSnap(t, "btrfs")
|
||||
}
|
||||
|
||||
func TestBtrfsSubvolDelete(t *testing.T) {
|
||||
d := graphtest.GetDriver(t, "btrfs")
|
||||
if err := d.CreateReadWrite("test", "", "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer graphtest.PutDriver(t)
|
||||
|
||||
dir, err := d.Get("test", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer d.Put("test")
|
||||
|
||||
if err := subvolCreate(dir, "subvoltest"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(path.Join(dir, "subvoltest")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := d.Remove("test"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(path.Join(dir, "subvoltest")); !os.IsNotExist(err) {
|
||||
t.Fatalf("expected not exist error on nested subvol, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBtrfsTeardown(t *testing.T) {
|
||||
graphtest.PutDriver(t)
|
||||
}
|
13
vendor/github.com/containers/storage/drivers/btrfs/version_test.go
generated
vendored
13
vendor/github.com/containers/storage/drivers/btrfs/version_test.go
generated
vendored
|
@ -1,13 +0,0 @@
|
|||
// +build linux,!btrfs_noversion
|
||||
|
||||
package btrfs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLibVersion(t *testing.T) {
|
||||
if btrfsLibVersion() <= 0 {
|
||||
t.Errorf("expected output from btrfs lib version > 0")
|
||||
}
|
||||
}
|
110
vendor/github.com/containers/storage/drivers/devmapper/devmapper_test.go
generated
vendored
110
vendor/github.com/containers/storage/drivers/devmapper/devmapper_test.go
generated
vendored
|
@ -1,110 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package devmapper
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/drivers/graphtest"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Reduce the size the the base fs and loopback for the tests
|
||||
defaultDataLoopbackSize = 300 * 1024 * 1024
|
||||
defaultMetaDataLoopbackSize = 200 * 1024 * 1024
|
||||
defaultBaseFsSize = 300 * 1024 * 1024
|
||||
defaultUdevSyncOverride = true
|
||||
if err := graphtest.InitLoopbacks(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// This avoids creating a new driver for each test if all tests are run
|
||||
// Make sure to put new tests between TestDevmapperSetup and TestDevmapperTeardown
|
||||
func TestDevmapperSetup(t *testing.T) {
|
||||
graphtest.GetDriver(t, "devicemapper")
|
||||
}
|
||||
|
||||
func TestDevmapperCreateEmpty(t *testing.T) {
|
||||
graphtest.DriverTestCreateEmpty(t, "devicemapper")
|
||||
}
|
||||
|
||||
func TestDevmapperCreateBase(t *testing.T) {
|
||||
graphtest.DriverTestCreateBase(t, "devicemapper")
|
||||
}
|
||||
|
||||
func TestDevmapperCreateSnap(t *testing.T) {
|
||||
graphtest.DriverTestCreateSnap(t, "devicemapper")
|
||||
}
|
||||
|
||||
func TestDevmapperTeardown(t *testing.T) {
|
||||
graphtest.PutDriver(t)
|
||||
}
|
||||
|
||||
func TestDevmapperReduceLoopBackSize(t *testing.T) {
|
||||
tenMB := int64(10 * 1024 * 1024)
|
||||
testChangeLoopBackSize(t, -tenMB, defaultDataLoopbackSize, defaultMetaDataLoopbackSize)
|
||||
}
|
||||
|
||||
func TestDevmapperIncreaseLoopBackSize(t *testing.T) {
|
||||
tenMB := int64(10 * 1024 * 1024)
|
||||
testChangeLoopBackSize(t, tenMB, defaultDataLoopbackSize+tenMB, defaultMetaDataLoopbackSize+tenMB)
|
||||
}
|
||||
|
||||
func testChangeLoopBackSize(t *testing.T, delta, expectDataSize, expectMetaDataSize int64) {
|
||||
driver := graphtest.GetDriver(t, "devicemapper").(*graphtest.Driver).Driver.(*graphdriver.NaiveDiffDriver).ProtoDriver.(*Driver)
|
||||
defer graphtest.PutDriver(t)
|
||||
// make sure data or metadata loopback size are the default size
|
||||
if s := driver.DeviceSet.Status(); s.Data.Total != uint64(defaultDataLoopbackSize) || s.Metadata.Total != uint64(defaultMetaDataLoopbackSize) {
|
||||
t.Fatalf("data or metadata loop back size is incorrect")
|
||||
}
|
||||
if err := driver.Cleanup(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
//Reload
|
||||
d, err := Init(driver.home, []string{
|
||||
fmt.Sprintf("dm.loopdatasize=%d", defaultDataLoopbackSize+delta),
|
||||
fmt.Sprintf("dm.loopmetadatasize=%d", defaultMetaDataLoopbackSize+delta),
|
||||
}, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating devicemapper driver: %v", err)
|
||||
}
|
||||
driver = d.(*graphdriver.NaiveDiffDriver).ProtoDriver.(*Driver)
|
||||
if s := driver.DeviceSet.Status(); s.Data.Total != uint64(expectDataSize) || s.Metadata.Total != uint64(expectMetaDataSize) {
|
||||
t.Fatalf("data or metadata loop back size is incorrect")
|
||||
}
|
||||
if err := driver.Cleanup(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure devices.Lock() has been release upon return from cleanupDeletedDevices() function
|
||||
func TestDevmapperLockReleasedDeviceDeletion(t *testing.T) {
|
||||
driver := graphtest.GetDriver(t, "devicemapper").(*graphtest.Driver).Driver.(*graphdriver.NaiveDiffDriver).ProtoDriver.(*Driver)
|
||||
defer graphtest.PutDriver(t)
|
||||
|
||||
// Call cleanupDeletedDevices() and after the call take and release
|
||||
// DeviceSet Lock. If lock has not been released, this will hang.
|
||||
driver.DeviceSet.cleanupDeletedDevices()
|
||||
|
||||
doneChan := make(chan bool)
|
||||
|
||||
go func() {
|
||||
driver.DeviceSet.Lock()
|
||||
defer driver.DeviceSet.Unlock()
|
||||
doneChan <- true
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(time.Second * 5):
|
||||
// Timer expired. That means lock was not released upon
|
||||
// function return and we are deadlocked. Release lock
|
||||
// here so that cleanup could succeed and fail the test.
|
||||
driver.DeviceSet.Unlock()
|
||||
t.Fatalf("Could not acquire devices lock after call to cleanupDeletedDevices()")
|
||||
case <-doneChan:
|
||||
}
|
||||
}
|
264
vendor/github.com/containers/storage/drivers/graphtest/graphbench_unix.go
generated
vendored
264
vendor/github.com/containers/storage/drivers/graphtest/graphbench_unix.go
generated
vendored
|
@ -1,264 +0,0 @@
|
|||
// +build linux freebsd
|
||||
|
||||
package graphtest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/containers/storage/pkg/stringid"
|
||||
)
|
||||
|
||||
// DriverBenchExists benchmarks calls to exist
|
||||
func DriverBenchExists(b *testing.B, drivername string, driveroptions ...string) {
|
||||
driver := GetDriver(b, drivername, driveroptions...)
|
||||
defer PutDriver(b)
|
||||
|
||||
base := stringid.GenerateRandomID()
|
||||
|
||||
if err := driver.Create(base, "", "", nil); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
if !driver.Exists(base) {
|
||||
b.Fatal("Newly created image doesn't exist")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DriverBenchGetEmpty benchmarks calls to get on an empty layer
|
||||
func DriverBenchGetEmpty(b *testing.B, drivername string, driveroptions ...string) {
|
||||
driver := GetDriver(b, drivername, driveroptions...)
|
||||
defer PutDriver(b)
|
||||
|
||||
base := stringid.GenerateRandomID()
|
||||
|
||||
if err := driver.Create(base, "", "", nil); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := driver.Get(base, "")
|
||||
b.StopTimer()
|
||||
if err != nil {
|
||||
b.Fatalf("Error getting mount: %s", err)
|
||||
}
|
||||
if err := driver.Put(base); err != nil {
|
||||
b.Fatalf("Error putting mount: %s", err)
|
||||
}
|
||||
b.StartTimer()
|
||||
}
|
||||
}
|
||||
|
||||
// DriverBenchDiffBase benchmarks calls to diff on a root layer
|
||||
func DriverBenchDiffBase(b *testing.B, drivername string, driveroptions ...string) {
|
||||
driver := GetDriver(b, drivername, driveroptions...)
|
||||
defer PutDriver(b)
|
||||
|
||||
base := stringid.GenerateRandomID()
|
||||
|
||||
if err := driver.Create(base, "", "", nil); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
if err := addFiles(driver, base, 3); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
arch, err := driver.Diff(base, "")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
_, err = io.Copy(ioutil.Discard, arch)
|
||||
if err != nil {
|
||||
b.Fatalf("Error copying archive: %s", err)
|
||||
}
|
||||
arch.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// DriverBenchDiffN benchmarks calls to diff on two layers with
|
||||
// a provided number of files on the lower and upper layers.
|
||||
func DriverBenchDiffN(b *testing.B, bottom, top int, drivername string, driveroptions ...string) {
|
||||
driver := GetDriver(b, drivername, driveroptions...)
|
||||
defer PutDriver(b)
|
||||
base := stringid.GenerateRandomID()
|
||||
upper := stringid.GenerateRandomID()
|
||||
|
||||
if err := driver.Create(base, "", "", nil); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
if err := addManyFiles(driver, base, bottom, 3); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
if err := driver.Create(upper, base, "", nil); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
if err := addManyFiles(driver, upper, top, 6); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
arch, err := driver.Diff(upper, "")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
_, err = io.Copy(ioutil.Discard, arch)
|
||||
if err != nil {
|
||||
b.Fatalf("Error copying archive: %s", err)
|
||||
}
|
||||
arch.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// DriverBenchDiffApplyN benchmarks calls to diff and apply together
|
||||
func DriverBenchDiffApplyN(b *testing.B, fileCount int, drivername string, driveroptions ...string) {
|
||||
driver := GetDriver(b, drivername, driveroptions...)
|
||||
defer PutDriver(b)
|
||||
base := stringid.GenerateRandomID()
|
||||
upper := stringid.GenerateRandomID()
|
||||
|
||||
if err := driver.Create(base, "", "", nil); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
if err := addManyFiles(driver, base, fileCount, 3); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
if err := driver.Create(upper, base, "", nil); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
if err := addManyFiles(driver, upper, fileCount, 6); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
diffSize, err := driver.DiffSize(upper, "")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.ResetTimer()
|
||||
b.StopTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
diff := stringid.GenerateRandomID()
|
||||
if err := driver.Create(diff, base, "", nil); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
if err := checkManyFiles(driver, diff, fileCount, 3); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.StartTimer()
|
||||
|
||||
arch, err := driver.Diff(upper, "")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
applyDiffSize, err := driver.ApplyDiff(diff, "", arch)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.StopTimer()
|
||||
arch.Close()
|
||||
|
||||
if applyDiffSize != diffSize {
|
||||
// TODO: enforce this
|
||||
//b.Fatalf("Apply diff size different, got %d, expected %s", applyDiffSize, diffSize)
|
||||
}
|
||||
if err := checkManyFiles(driver, diff, fileCount, 6); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DriverBenchDeepLayerDiff benchmarks calls to diff on top of a given number of layers.
|
||||
func DriverBenchDeepLayerDiff(b *testing.B, layerCount int, drivername string, driveroptions ...string) {
|
||||
driver := GetDriver(b, drivername, driveroptions...)
|
||||
defer PutDriver(b)
|
||||
|
||||
base := stringid.GenerateRandomID()
|
||||
|
||||
if err := driver.Create(base, "", "", nil); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
if err := addFiles(driver, base, 50); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
topLayer, err := addManyLayers(driver, base, layerCount)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
arch, err := driver.Diff(topLayer, "")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
_, err = io.Copy(ioutil.Discard, arch)
|
||||
if err != nil {
|
||||
b.Fatalf("Error copying archive: %s", err)
|
||||
}
|
||||
arch.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// DriverBenchDeepLayerRead benchmarks calls to read a file under a given number of layers.
|
||||
func DriverBenchDeepLayerRead(b *testing.B, layerCount int, drivername string, driveroptions ...string) {
|
||||
driver := GetDriver(b, drivername, driveroptions...)
|
||||
defer PutDriver(b)
|
||||
|
||||
base := stringid.GenerateRandomID()
|
||||
|
||||
if err := driver.Create(base, "", "", nil); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
content := []byte("test content")
|
||||
if err := addFile(driver, base, "testfile.txt", content); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
topLayer, err := addManyLayers(driver, base, layerCount)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
root, err := driver.Get(topLayer, "")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer driver.Put(topLayer)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
// Read content
|
||||
c, err := ioutil.ReadFile(filepath.Join(root, "testfile.txt"))
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.StopTimer()
|
||||
if bytes.Compare(c, content) != 0 {
|
||||
b.Fatalf("Wrong content in file %v, expected %v", c, content)
|
||||
}
|
||||
b.StartTimer()
|
||||
}
|
||||
}
|
350
vendor/github.com/containers/storage/drivers/graphtest/graphtest_unix.go
generated
vendored
350
vendor/github.com/containers/storage/drivers/graphtest/graphtest_unix.go
generated
vendored
|
@ -1,350 +0,0 @@
|
|||
// +build linux freebsd
|
||||
|
||||
package graphtest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"syscall"
|
||||
"testing"
|
||||
"unsafe"
|
||||
|
||||
"github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/pkg/stringid"
|
||||
"github.com/docker/go-units"
|
||||
)
|
||||
|
||||
var (
|
||||
drv *Driver
|
||||
)
|
||||
|
||||
// Driver conforms to graphdriver.Driver interface and
|
||||
// contains information such as root and reference count of the number of clients using it.
|
||||
// This helps in testing drivers added into the framework.
|
||||
type Driver struct {
|
||||
graphdriver.Driver
|
||||
root string
|
||||
refCount int
|
||||
}
|
||||
|
||||
func newDriver(t testing.TB, name string, options []string) *Driver {
|
||||
root, err := ioutil.TempDir("", "storage-graphtest-")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(root, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
d, err := graphdriver.GetDriver(name, root, options, nil, nil)
|
||||
if err != nil {
|
||||
t.Logf("graphdriver: %v\n", err)
|
||||
if err == graphdriver.ErrNotSupported || err == graphdriver.ErrPrerequisites || err == graphdriver.ErrIncompatibleFS {
|
||||
t.Skipf("Driver %s not supported", name)
|
||||
}
|
||||
t.Fatal(err)
|
||||
}
|
||||
return &Driver{d, root, 1}
|
||||
}
|
||||
|
||||
func cleanup(t testing.TB, d *Driver) {
|
||||
if err := drv.Cleanup(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
os.RemoveAll(d.root)
|
||||
}
|
||||
|
||||
// GetDriver create a new driver with given name or return an existing driver with the name updating the reference count.
|
||||
func GetDriver(t testing.TB, name string, options ...string) graphdriver.Driver {
|
||||
if drv == nil {
|
||||
drv = newDriver(t, name, options)
|
||||
} else {
|
||||
drv.refCount++
|
||||
}
|
||||
return drv
|
||||
}
|
||||
|
||||
// PutDriver removes the driver if it is no longer used and updates the reference count.
|
||||
func PutDriver(t testing.TB) {
|
||||
if drv == nil {
|
||||
t.Skip("No driver to put!")
|
||||
}
|
||||
drv.refCount--
|
||||
if drv.refCount == 0 {
|
||||
cleanup(t, drv)
|
||||
drv = nil
|
||||
}
|
||||
}
|
||||
|
||||
// DriverTestCreateEmpty creates a new image and verifies it is empty and the right metadata
|
||||
func DriverTestCreateEmpty(t testing.TB, drivername string, driverOptions ...string) {
|
||||
driver := GetDriver(t, drivername, driverOptions...)
|
||||
defer PutDriver(t)
|
||||
|
||||
if err := driver.Create("empty", "", "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := driver.Remove("empty"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
if !driver.Exists("empty") {
|
||||
t.Fatal("Newly created image doesn't exist")
|
||||
}
|
||||
|
||||
dir, err := driver.Get("empty", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
verifyFile(t, dir, 0755|os.ModeDir, 0, 0)
|
||||
|
||||
// Verify that the directory is empty
|
||||
fis, err := readDir(dir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(fis) != 0 {
|
||||
t.Fatal("New directory not empty")
|
||||
}
|
||||
|
||||
driver.Put("empty")
|
||||
}
|
||||
|
||||
// DriverTestCreateBase create a base driver and verify.
|
||||
func DriverTestCreateBase(t testing.TB, drivername string, driverOptions ...string) {
|
||||
driver := GetDriver(t, drivername, driverOptions...)
|
||||
defer PutDriver(t)
|
||||
|
||||
createBase(t, driver, "Base")
|
||||
defer func() {
|
||||
if err := driver.Remove("Base"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
verifyBase(t, driver, "Base")
|
||||
}
|
||||
|
||||
// DriverTestCreateSnap Create a driver and snap and verify.
|
||||
func DriverTestCreateSnap(t testing.TB, drivername string, driverOptions ...string) {
|
||||
driver := GetDriver(t, drivername, driverOptions...)
|
||||
defer PutDriver(t)
|
||||
|
||||
createBase(t, driver, "Base")
|
||||
|
||||
defer func() {
|
||||
if err := driver.Remove("Base"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := driver.Create("Snap", "Base", "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := driver.Remove("Snap"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
verifyBase(t, driver, "Snap")
|
||||
}
|
||||
|
||||
// DriverTestDeepLayerRead reads a file from a lower layer under a given number of layers
|
||||
func DriverTestDeepLayerRead(t testing.TB, layerCount int, drivername string, driverOptions ...string) {
|
||||
driver := GetDriver(t, drivername, driverOptions...)
|
||||
defer PutDriver(t)
|
||||
|
||||
base := stringid.GenerateRandomID()
|
||||
|
||||
if err := driver.Create(base, "", "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
content := []byte("test content")
|
||||
if err := addFile(driver, base, "testfile.txt", content); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
topLayer, err := addManyLayers(driver, base, layerCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = checkManyLayers(driver, topLayer, layerCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := checkFile(driver, topLayer, "testfile.txt", content); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// DriverTestDiffApply tests diffing and applying produces the same layer
|
||||
func DriverTestDiffApply(t testing.TB, fileCount int, drivername string, driverOptions ...string) {
|
||||
driver := GetDriver(t, drivername, driverOptions...)
|
||||
defer PutDriver(t)
|
||||
base := stringid.GenerateRandomID()
|
||||
upper := stringid.GenerateRandomID()
|
||||
deleteFile := "file-remove.txt"
|
||||
deleteFileContent := []byte("This file should get removed in upper!")
|
||||
|
||||
if err := driver.Create(base, "", "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := addManyFiles(driver, base, fileCount, 3); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := addFile(driver, base, deleteFile, deleteFileContent); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := driver.Create(upper, base, "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := addManyFiles(driver, upper, fileCount, 6); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := removeFile(driver, upper, deleteFile); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
diffSize, err := driver.DiffSize(upper, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
diff := stringid.GenerateRandomID()
|
||||
if err := driver.Create(diff, base, "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := checkManyFiles(driver, diff, fileCount, 3); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := checkFile(driver, diff, deleteFile, deleteFileContent); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
arch, err := driver.Diff(upper, base)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
if _, err := buf.ReadFrom(arch); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := arch.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
applyDiffSize, err := driver.ApplyDiff(diff, base, bytes.NewReader(buf.Bytes()))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if applyDiffSize != diffSize {
|
||||
t.Fatalf("Apply diff size different, got %d, expected %d", applyDiffSize, diffSize)
|
||||
}
|
||||
|
||||
if err := checkManyFiles(driver, diff, fileCount, 6); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := checkFileRemoved(driver, diff, deleteFile); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// DriverTestChanges tests computed changes on a layer matches changes made
|
||||
func DriverTestChanges(t testing.TB, drivername string, driverOptions ...string) {
|
||||
driver := GetDriver(t, drivername, driverOptions...)
|
||||
defer PutDriver(t)
|
||||
base := stringid.GenerateRandomID()
|
||||
upper := stringid.GenerateRandomID()
|
||||
|
||||
if err := driver.Create(base, "", "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := addManyFiles(driver, base, 20, 3); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := driver.Create(upper, base, "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expectedChanges, err := changeManyFiles(driver, upper, 20, 6)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
changes, err := driver.Changes(upper, base)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err = checkChanges(expectedChanges, changes); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func writeRandomFile(path string, size uint64) error {
|
||||
buf := make([]int64, size/8)
|
||||
|
||||
r := rand.NewSource(0)
|
||||
for i := range buf {
|
||||
buf[i] = r.Int63()
|
||||
}
|
||||
|
||||
// Cast to []byte
|
||||
header := *(*reflect.SliceHeader)(unsafe.Pointer(&buf))
|
||||
header.Len *= 8
|
||||
header.Cap *= 8
|
||||
data := *(*[]byte)(unsafe.Pointer(&header))
|
||||
|
||||
return ioutil.WriteFile(path, data, 0700)
|
||||
}
|
||||
|
||||
// DriverTestSetQuota Create a driver and test setting quota.
|
||||
func DriverTestSetQuota(t *testing.T, drivername string) {
|
||||
driver := GetDriver(t, drivername)
|
||||
defer PutDriver(t)
|
||||
|
||||
createBase(t, driver, "Base")
|
||||
storageOpt := make(map[string]string, 1)
|
||||
storageOpt["size"] = "50M"
|
||||
if err := driver.Create("zfsTest", "Base", "", storageOpt); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
mountPath, err := driver.Get("zfsTest", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
quota := uint64(50 * units.MiB)
|
||||
err = writeRandomFile(path.Join(mountPath, "file"), quota*2)
|
||||
if pathError, ok := err.(*os.PathError); ok && pathError.Err != syscall.EDQUOT {
|
||||
t.Fatalf("expect write() to fail with %v, got %v", syscall.EDQUOT, err)
|
||||
}
|
||||
|
||||
}
|
1
vendor/github.com/containers/storage/drivers/graphtest/graphtest_windows.go
generated
vendored
1
vendor/github.com/containers/storage/drivers/graphtest/graphtest_windows.go
generated
vendored
|
@ -1 +0,0 @@
|
|||
package graphtest
|
327
vendor/github.com/containers/storage/drivers/graphtest/testutil.go
generated
vendored
327
vendor/github.com/containers/storage/drivers/graphtest/testutil.go
generated
vendored
|
@ -1,327 +0,0 @@
|
|||
package graphtest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
|
||||
"github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/stringid"
|
||||
)
|
||||
|
||||
func randomContent(size int, seed int64) []byte {
|
||||
s := rand.NewSource(seed)
|
||||
content := make([]byte, size)
|
||||
|
||||
for i := 0; i < len(content); i += 7 {
|
||||
val := s.Int63()
|
||||
for j := 0; i+j < len(content) && j < 7; j++ {
|
||||
content[i+j] = byte(val)
|
||||
val >>= 8
|
||||
}
|
||||
}
|
||||
|
||||
return content
|
||||
}
|
||||
|
||||
func addFiles(drv graphdriver.Driver, layer string, seed int64) error {
|
||||
root, err := drv.Get(layer, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer drv.Put(layer)
|
||||
|
||||
if err := ioutil.WriteFile(path.Join(root, "file-a"), randomContent(64, seed), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.MkdirAll(path.Join(root, "dir-b"), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ioutil.WriteFile(path.Join(root, "dir-b", "file-b"), randomContent(128, seed+1), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ioutil.WriteFile(path.Join(root, "file-c"), randomContent(128*128, seed+2), 0755)
|
||||
}
|
||||
|
||||
func checkFile(drv graphdriver.Driver, layer, filename string, content []byte) error {
|
||||
root, err := drv.Get(layer, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer drv.Put(layer)
|
||||
|
||||
fileContent, err := ioutil.ReadFile(path.Join(root, filename))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if bytes.Compare(fileContent, content) != 0 {
|
||||
return fmt.Errorf("mismatched file content %v, expecting %v", fileContent, content)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func addFile(drv graphdriver.Driver, layer, filename string, content []byte) error {
|
||||
root, err := drv.Get(layer, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer drv.Put(layer)
|
||||
|
||||
return ioutil.WriteFile(path.Join(root, filename), content, 0755)
|
||||
}
|
||||
|
||||
func removeFile(drv graphdriver.Driver, layer, filename string) error {
|
||||
root, err := drv.Get(layer, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer drv.Put(layer)
|
||||
|
||||
return os.Remove(path.Join(root, filename))
|
||||
}
|
||||
|
||||
func checkFileRemoved(drv graphdriver.Driver, layer, filename string) error {
|
||||
root, err := drv.Get(layer, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer drv.Put(layer)
|
||||
|
||||
if _, err := os.Stat(path.Join(root, filename)); err == nil {
|
||||
return fmt.Errorf("file still exists: %s", path.Join(root, filename))
|
||||
} else if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func addManyFiles(drv graphdriver.Driver, layer string, count int, seed int64) error {
|
||||
root, err := drv.Get(layer, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer drv.Put(layer)
|
||||
|
||||
for i := 0; i < count; i += 100 {
|
||||
dir := path.Join(root, fmt.Sprintf("directory-%d", i))
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
for j := 0; i+j < count && j < 100; j++ {
|
||||
file := path.Join(dir, fmt.Sprintf("file-%d", i+j))
|
||||
if err := ioutil.WriteFile(file, randomContent(64, seed+int64(i+j)), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func changeManyFiles(drv graphdriver.Driver, layer string, count int, seed int64) ([]archive.Change, error) {
|
||||
root, err := drv.Get(layer, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer drv.Put(layer)
|
||||
|
||||
changes := []archive.Change{}
|
||||
for i := 0; i < count; i += 100 {
|
||||
archiveRoot := fmt.Sprintf("/directory-%d", i)
|
||||
if err := os.MkdirAll(path.Join(root, archiveRoot), 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for j := 0; i+j < count && j < 100; j++ {
|
||||
if j == 0 {
|
||||
changes = append(changes, archive.Change{
|
||||
Path: archiveRoot,
|
||||
Kind: archive.ChangeModify,
|
||||
})
|
||||
}
|
||||
var change archive.Change
|
||||
switch j % 3 {
|
||||
// Update file
|
||||
case 0:
|
||||
change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d", i+j))
|
||||
change.Kind = archive.ChangeModify
|
||||
if err := ioutil.WriteFile(path.Join(root, change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Add file
|
||||
case 1:
|
||||
change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d-%d", seed, i+j))
|
||||
change.Kind = archive.ChangeAdd
|
||||
if err := ioutil.WriteFile(path.Join(root, change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Remove file
|
||||
case 2:
|
||||
change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d", i+j))
|
||||
change.Kind = archive.ChangeDelete
|
||||
if err := os.Remove(path.Join(root, change.Path)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
changes = append(changes, change)
|
||||
}
|
||||
}
|
||||
|
||||
return changes, nil
|
||||
}
|
||||
|
||||
func checkManyFiles(drv graphdriver.Driver, layer string, count int, seed int64) error {
|
||||
root, err := drv.Get(layer, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer drv.Put(layer)
|
||||
|
||||
for i := 0; i < count; i += 100 {
|
||||
dir := path.Join(root, fmt.Sprintf("directory-%d", i))
|
||||
for j := 0; i+j < count && j < 100; j++ {
|
||||
file := path.Join(dir, fmt.Sprintf("file-%d", i+j))
|
||||
fileContent, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
content := randomContent(64, seed+int64(i+j))
|
||||
|
||||
if bytes.Compare(fileContent, content) != 0 {
|
||||
return fmt.Errorf("mismatched file content %v, expecting %v", fileContent, content)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type changeList []archive.Change
|
||||
|
||||
func (c changeList) Less(i, j int) bool {
|
||||
if c[i].Path == c[j].Path {
|
||||
return c[i].Kind < c[j].Kind
|
||||
}
|
||||
return c[i].Path < c[j].Path
|
||||
}
|
||||
func (c changeList) Len() int { return len(c) }
|
||||
func (c changeList) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
|
||||
|
||||
func checkChanges(expected, actual []archive.Change) error {
|
||||
if len(expected) != len(actual) {
|
||||
return fmt.Errorf("unexpected number of changes, expected %d, got %d", len(expected), len(actual))
|
||||
}
|
||||
sort.Sort(changeList(expected))
|
||||
sort.Sort(changeList(actual))
|
||||
|
||||
for i := range expected {
|
||||
if expected[i] != actual[i] {
|
||||
return fmt.Errorf("unexpected change, expecting %v, got %v", expected[i], actual[i])
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func addLayerFiles(drv graphdriver.Driver, layer, parent string, i int) error {
|
||||
root, err := drv.Get(layer, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer drv.Put(layer)
|
||||
|
||||
if err := ioutil.WriteFile(path.Join(root, "top-id"), []byte(layer), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
layerDir := path.Join(root, fmt.Sprintf("layer-%d", i))
|
||||
if err := os.MkdirAll(layerDir, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ioutil.WriteFile(path.Join(layerDir, "layer-id"), []byte(layer), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ioutil.WriteFile(path.Join(layerDir, "parent-id"), []byte(parent), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func addManyLayers(drv graphdriver.Driver, baseLayer string, count int) (string, error) {
|
||||
lastLayer := baseLayer
|
||||
for i := 1; i <= count; i++ {
|
||||
nextLayer := stringid.GenerateRandomID()
|
||||
if err := drv.Create(nextLayer, lastLayer, "", nil); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := addLayerFiles(drv, nextLayer, lastLayer, i); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
lastLayer = nextLayer
|
||||
|
||||
}
|
||||
return lastLayer, nil
|
||||
}
|
||||
|
||||
func checkManyLayers(drv graphdriver.Driver, layer string, count int) error {
|
||||
root, err := drv.Get(layer, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer drv.Put(layer)
|
||||
|
||||
layerIDBytes, err := ioutil.ReadFile(path.Join(root, "top-id"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if bytes.Compare(layerIDBytes, []byte(layer)) != 0 {
|
||||
return fmt.Errorf("mismatched file content %v, expecting %v", layerIDBytes, []byte(layer))
|
||||
}
|
||||
|
||||
for i := count; i > 0; i-- {
|
||||
layerDir := path.Join(root, fmt.Sprintf("layer-%d", i))
|
||||
|
||||
thisLayerIDBytes, err := ioutil.ReadFile(path.Join(layerDir, "layer-id"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bytes.Compare(thisLayerIDBytes, layerIDBytes) != 0 {
|
||||
return fmt.Errorf("mismatched file content %v, expecting %v", thisLayerIDBytes, layerIDBytes)
|
||||
}
|
||||
layerIDBytes, err = ioutil.ReadFile(path.Join(layerDir, "parent-id"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// readDir reads a directory just like ioutil.ReadDir()
|
||||
// then hides specific files (currently "lost+found")
|
||||
// so the tests don't "see" it
|
||||
func readDir(dir string) ([]os.FileInfo, error) {
|
||||
a, err := ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
b := a[:0]
|
||||
for _, x := range a {
|
||||
if x.Name() != "lost+found" { // ext4 always have this dir
|
||||
b = append(b, x)
|
||||
}
|
||||
}
|
||||
|
||||
return b, nil
|
||||
}
|
143
vendor/github.com/containers/storage/drivers/graphtest/testutil_unix.go
generated
vendored
143
vendor/github.com/containers/storage/drivers/graphtest/testutil_unix.go
generated
vendored
|
@ -1,143 +0,0 @@
|
|||
// +build linux freebsd
|
||||
|
||||
package graphtest
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/containers/storage/drivers"
|
||||
)
|
||||
|
||||
// InitLoopbacks ensures that the loopback devices are properly created within
|
||||
// the system running the device mapper tests.
|
||||
func InitLoopbacks() error {
|
||||
statT, err := getBaseLoopStats()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// create at least 8 loopback files, ya, that is a good number
|
||||
for i := 0; i < 8; i++ {
|
||||
loopPath := fmt.Sprintf("/dev/loop%d", i)
|
||||
// only create new loopback files if they don't exist
|
||||
if _, err := os.Stat(loopPath); err != nil {
|
||||
if mkerr := syscall.Mknod(loopPath,
|
||||
uint32(statT.Mode|syscall.S_IFBLK), int((7<<8)|(i&0xff)|((i&0xfff00)<<12))); mkerr != nil {
|
||||
return mkerr
|
||||
}
|
||||
os.Chown(loopPath, int(statT.Uid), int(statT.Gid))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getBaseLoopStats inspects /dev/loop0 to collect uid,gid, and mode for the
|
||||
// loop0 device on the system. If it does not exist we assume 0,0,0660 for the
|
||||
// stat data
|
||||
func getBaseLoopStats() (*syscall.Stat_t, error) {
|
||||
loop0, err := os.Stat("/dev/loop0")
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return &syscall.Stat_t{
|
||||
Uid: 0,
|
||||
Gid: 0,
|
||||
Mode: 0660,
|
||||
}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return loop0.Sys().(*syscall.Stat_t), nil
|
||||
}
|
||||
|
||||
func verifyFile(t testing.TB, path string, mode os.FileMode, uid, gid uint32) {
|
||||
fi, err := os.Stat(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if fi.Mode()&os.ModeType != mode&os.ModeType {
|
||||
t.Fatalf("Expected %s type 0x%x, got 0x%x", path, mode&os.ModeType, fi.Mode()&os.ModeType)
|
||||
}
|
||||
|
||||
if fi.Mode()&os.ModePerm != mode&os.ModePerm {
|
||||
t.Fatalf("Expected %s mode %o, got %o", path, mode&os.ModePerm, fi.Mode()&os.ModePerm)
|
||||
}
|
||||
|
||||
if fi.Mode()&os.ModeSticky != mode&os.ModeSticky {
|
||||
t.Fatalf("Expected %s sticky 0x%x, got 0x%x", path, mode&os.ModeSticky, fi.Mode()&os.ModeSticky)
|
||||
}
|
||||
|
||||
if fi.Mode()&os.ModeSetuid != mode&os.ModeSetuid {
|
||||
t.Fatalf("Expected %s setuid 0x%x, got 0x%x", path, mode&os.ModeSetuid, fi.Mode()&os.ModeSetuid)
|
||||
}
|
||||
|
||||
if fi.Mode()&os.ModeSetgid != mode&os.ModeSetgid {
|
||||
t.Fatalf("Expected %s setgid 0x%x, got 0x%x", path, mode&os.ModeSetgid, fi.Mode()&os.ModeSetgid)
|
||||
}
|
||||
|
||||
if stat, ok := fi.Sys().(*syscall.Stat_t); ok {
|
||||
if stat.Uid != uid {
|
||||
t.Fatalf("%s no owned by uid %d", path, uid)
|
||||
}
|
||||
if stat.Gid != gid {
|
||||
t.Fatalf("%s not owned by gid %d", path, gid)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createBase(t testing.TB, driver graphdriver.Driver, name string) {
|
||||
// We need to be able to set any perms
|
||||
oldmask := syscall.Umask(0)
|
||||
defer syscall.Umask(oldmask)
|
||||
|
||||
if err := driver.CreateReadWrite(name, "", "", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
dir, err := driver.Get(name, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer driver.Put(name)
|
||||
|
||||
subdir := path.Join(dir, "a subdir")
|
||||
if err := os.Mkdir(subdir, 0705|os.ModeSticky); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.Chown(subdir, 1, 2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
file := path.Join(dir, "a file")
|
||||
if err := ioutil.WriteFile(file, []byte("Some data"), 0222|os.ModeSetuid); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func verifyBase(t testing.TB, driver graphdriver.Driver, name string) {
|
||||
dir, err := driver.Get(name, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer driver.Put(name)
|
||||
|
||||
subdir := path.Join(dir, "a subdir")
|
||||
verifyFile(t, subdir, 0705|os.ModeDir|os.ModeSticky, 1, 2)
|
||||
|
||||
file := path.Join(dir, "a file")
|
||||
verifyFile(t, file, 0222|os.ModeSetuid, 0, 0)
|
||||
|
||||
fis, err := readDir(dir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(fis) != 2 {
|
||||
t.Fatal("Unexpected files in base image")
|
||||
}
|
||||
|
||||
}
|
108
vendor/github.com/containers/storage/drivers/overlay/overlay_test.go
generated
vendored
108
vendor/github.com/containers/storage/drivers/overlay/overlay_test.go
generated
vendored
|
@ -1,108 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package overlay
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/drivers/graphtest"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/reexec"
|
||||
)
|
||||
|
||||
const driverName = "overlay"
|
||||
|
||||
func init() {
|
||||
// Do not sure chroot to speed run time and allow archive
|
||||
// errors or hangs to be debugged directly from the test process.
|
||||
untar = archive.UntarUncompressed
|
||||
graphdriver.ApplyUncompressedLayer = archive.ApplyUncompressedLayer
|
||||
|
||||
reexec.Init()
|
||||
}
|
||||
|
||||
func cdMountFrom(dir, device, target, mType, label string) error {
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
os.Chdir(dir)
|
||||
defer os.Chdir(wd)
|
||||
|
||||
return syscall.Mount(device, target, mType, 0, label)
|
||||
}
|
||||
|
||||
// This avoids creating a new driver for each test if all tests are run
|
||||
// Make sure to put new tests between TestOverlaySetup and TestOverlayTeardown
|
||||
func TestOverlaySetup(t *testing.T) {
|
||||
graphtest.GetDriver(t, driverName)
|
||||
}
|
||||
|
||||
func TestOverlayCreateEmpty(t *testing.T) {
|
||||
graphtest.DriverTestCreateEmpty(t, driverName)
|
||||
}
|
||||
|
||||
func TestOverlayCreateBase(t *testing.T) {
|
||||
graphtest.DriverTestCreateBase(t, driverName)
|
||||
}
|
||||
|
||||
func TestOverlayCreateSnap(t *testing.T) {
|
||||
graphtest.DriverTestCreateSnap(t, driverName)
|
||||
}
|
||||
|
||||
func TestOverlay128LayerRead(t *testing.T) {
|
||||
graphtest.DriverTestDeepLayerRead(t, 128, driverName)
|
||||
}
|
||||
|
||||
func TestOverlayDiffApply10Files(t *testing.T) {
|
||||
graphtest.DriverTestDiffApply(t, 10, driverName)
|
||||
}
|
||||
|
||||
func TestOverlayChanges(t *testing.T) {
|
||||
graphtest.DriverTestChanges(t, driverName)
|
||||
}
|
||||
|
||||
func TestOverlayTeardown(t *testing.T) {
|
||||
graphtest.PutDriver(t)
|
||||
}
|
||||
|
||||
// Benchmarks should always setup new driver
|
||||
|
||||
func BenchmarkExists(b *testing.B) {
|
||||
graphtest.DriverBenchExists(b, driverName)
|
||||
}
|
||||
|
||||
func BenchmarkGetEmpty(b *testing.B) {
|
||||
graphtest.DriverBenchGetEmpty(b, driverName)
|
||||
}
|
||||
|
||||
func BenchmarkDiffBase(b *testing.B) {
|
||||
graphtest.DriverBenchDiffBase(b, driverName)
|
||||
}
|
||||
|
||||
func BenchmarkDiffSmallUpper(b *testing.B) {
|
||||
graphtest.DriverBenchDiffN(b, 10, 10, driverName)
|
||||
}
|
||||
|
||||
func BenchmarkDiff10KFileUpper(b *testing.B) {
|
||||
graphtest.DriverBenchDiffN(b, 10, 10000, driverName)
|
||||
}
|
||||
|
||||
func BenchmarkDiff10KFilesBottom(b *testing.B) {
|
||||
graphtest.DriverBenchDiffN(b, 10000, 10, driverName)
|
||||
}
|
||||
|
||||
func BenchmarkDiffApply100(b *testing.B) {
|
||||
graphtest.DriverBenchDiffApplyN(b, 100, driverName)
|
||||
}
|
||||
|
||||
func BenchmarkDiff20Layers(b *testing.B) {
|
||||
graphtest.DriverBenchDeepLayerDiff(b, 20, driverName)
|
||||
}
|
||||
|
||||
func BenchmarkRead20Layers(b *testing.B) {
|
||||
graphtest.DriverBenchDeepLayerRead(b, 20, driverName)
|
||||
}
|
37
vendor/github.com/containers/storage/drivers/vfs/vfs_test.go
generated
vendored
37
vendor/github.com/containers/storage/drivers/vfs/vfs_test.go
generated
vendored
|
@ -1,37 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package vfs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/containers/storage/drivers/graphtest"
|
||||
|
||||
"github.com/containers/storage/pkg/reexec"
|
||||
)
|
||||
|
||||
func init() {
|
||||
reexec.Init()
|
||||
}
|
||||
|
||||
// This avoids creating a new driver for each test if all tests are run
|
||||
// Make sure to put new tests between TestVfsSetup and TestVfsTeardown
|
||||
func TestVfsSetup(t *testing.T) {
|
||||
graphtest.GetDriver(t, "vfs")
|
||||
}
|
||||
|
||||
func TestVfsCreateEmpty(t *testing.T) {
|
||||
graphtest.DriverTestCreateEmpty(t, "vfs")
|
||||
}
|
||||
|
||||
func TestVfsCreateBase(t *testing.T) {
|
||||
graphtest.DriverTestCreateBase(t, "vfs")
|
||||
}
|
||||
|
||||
func TestVfsCreateSnap(t *testing.T) {
|
||||
graphtest.DriverTestCreateSnap(t, "vfs")
|
||||
}
|
||||
|
||||
func TestVfsTeardown(t *testing.T) {
|
||||
graphtest.PutDriver(t)
|
||||
}
|
18
vendor/github.com/containers/storage/drivers/windows/windows_windows_test.go
generated
vendored
18
vendor/github.com/containers/storage/drivers/windows/windows_windows_test.go
generated
vendored
|
@ -1,18 +0,0 @@
|
|||
package windows
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestAddAceToSddlDacl(t *testing.T) {
|
||||
cases := [][3]string{
|
||||
{"D:", "(A;;;)", "D:(A;;;)"},
|
||||
{"D:(A;;;)", "(A;;;)", "D:(A;;;)"},
|
||||
{"O:D:(A;;;stuff)", "(A;;;new)", "O:D:(A;;;new)(A;;;stuff)"},
|
||||
{"O:D:(D;;;no)(A;;;stuff)", "(A;;;new)", "O:D:(D;;;no)(A;;;new)(A;;;stuff)"},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
if newSddl, worked := addAceToSddlDacl(c[0], c[1]); !worked || newSddl != c[2] {
|
||||
t.Errorf("%s + %s == %s, expected %s (%v)", c[0], c[1], newSddl, c[2], worked)
|
||||
}
|
||||
}
|
||||
}
|
2
vendor/github.com/containers/storage/drivers/zfs/MAINTAINERS
generated
vendored
2
vendor/github.com/containers/storage/drivers/zfs/MAINTAINERS
generated
vendored
|
@ -1,2 +0,0 @@
|
|||
Jörg Thalheim <joerg@higgsboson.tk> (@Mic92)
|
||||
Arthur Gautier <baloo@gandi.net> (@baloose)
|
35
vendor/github.com/containers/storage/drivers/zfs/zfs_test.go
generated
vendored
35
vendor/github.com/containers/storage/drivers/zfs/zfs_test.go
generated
vendored
|
@ -1,35 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package zfs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/containers/storage/drivers/graphtest"
|
||||
)
|
||||
|
||||
// This avoids creating a new driver for each test if all tests are run
|
||||
// Make sure to put new tests between TestZfsSetup and TestZfsTeardown
|
||||
func TestZfsSetup(t *testing.T) {
|
||||
graphtest.GetDriver(t, "zfs")
|
||||
}
|
||||
|
||||
func TestZfsCreateEmpty(t *testing.T) {
|
||||
graphtest.DriverTestCreateEmpty(t, "zfs")
|
||||
}
|
||||
|
||||
func TestZfsCreateBase(t *testing.T) {
|
||||
graphtest.DriverTestCreateBase(t, "zfs")
|
||||
}
|
||||
|
||||
func TestZfsCreateSnap(t *testing.T) {
|
||||
graphtest.DriverTestCreateSnap(t, "zfs")
|
||||
}
|
||||
|
||||
func TestZfsSetQuota(t *testing.T) {
|
||||
graphtest.DriverTestSetQuota(t, "zfs")
|
||||
}
|
||||
|
||||
func TestZfsTeardown(t *testing.T) {
|
||||
graphtest.PutDriver(t)
|
||||
}
|
35
vendor/github.com/containers/storage/hack/Jenkins/W2L/postbuild.sh
generated
vendored
35
vendor/github.com/containers/storage/hack/Jenkins/W2L/postbuild.sh
generated
vendored
|
@ -1,35 +0,0 @@
|
|||
set +x
|
||||
set +e
|
||||
|
||||
echo ""
|
||||
echo ""
|
||||
echo "---"
|
||||
echo "Now starting POST-BUILD steps"
|
||||
echo "---"
|
||||
echo ""
|
||||
|
||||
echo INFO: Pointing to $DOCKER_HOST
|
||||
|
||||
if [ ! $(docker ps -aq | wc -l) -eq 0 ]; then
|
||||
echo INFO: Removing containers...
|
||||
! docker rm -vf $(docker ps -aq)
|
||||
fi
|
||||
|
||||
# Remove all images which don't have docker or debian in the name
|
||||
if [ ! $(docker images | sed -n '1!p' | grep -v 'docker' | grep -v 'debian' | awk '{ print $3 }' | wc -l) -eq 0 ]; then
|
||||
echo INFO: Removing images...
|
||||
! docker rmi -f $(docker images | sed -n '1!p' | grep -v 'docker' | grep -v 'debian' | awk '{ print $3 }')
|
||||
fi
|
||||
|
||||
# Kill off any instances of git, go and docker, just in case
|
||||
! taskkill -F -IM git.exe -T >& /dev/null
|
||||
! taskkill -F -IM go.exe -T >& /dev/null
|
||||
! taskkill -F -IM docker.exe -T >& /dev/null
|
||||
|
||||
# Remove everything
|
||||
! cd /c/jenkins/gopath/src/github.com/docker/docker
|
||||
! rm -rfd * >& /dev/null
|
||||
! rm -rfd .* >& /dev/null
|
||||
|
||||
echo INFO: Cleanup complete
|
||||
exit 0
|
309
vendor/github.com/containers/storage/hack/Jenkins/W2L/setup.sh
generated
vendored
309
vendor/github.com/containers/storage/hack/Jenkins/W2L/setup.sh
generated
vendored
|
@ -1,309 +0,0 @@
|
|||
# Jenkins CI script for Windows to Linux CI.
|
||||
# Heavily modified by John Howard (@jhowardmsft) December 2015 to try to make it more reliable.
|
||||
set +xe
|
||||
SCRIPT_VER="Wed Apr 20 18:30:19 UTC 2016"
|
||||
|
||||
# TODO to make (even) more resilient:
|
||||
# - Wait for daemon to be running before executing docker commands
|
||||
# - Check if jq is installed
|
||||
# - Make sure bash is v4.3 or later. Can't do until all Azure nodes on the latest version
|
||||
# - Make sure we are not running as local system. Can't do until all Azure nodes are updated.
|
||||
# - Error if docker versions are not equal. Can't do until all Azure nodes are updated
|
||||
# - Error if go versions are not equal. Can't do until all Azure nodes are updated.
|
||||
# - Error if running 32-bit posix tools. Probably can take from bash --version and check contains "x86_64"
|
||||
# - Warn if the CI directory cannot be deleted afterwards. Otherwise turdlets are left behind
|
||||
# - Use %systemdrive% ($SYSTEMDRIVE) rather than hard code to c: for TEMP
|
||||
# - Consider cross builing the Windows binary and copy across. That's a bit of a heavy lift. Only reason
|
||||
# for doing that is that it mirrors the actual release process for docker.exe which is cross-built.
|
||||
# However, should absolutely not be a problem if built natively, so nit-picking.
|
||||
# - Tidy up of images and containers. Either here, or in the teardown script.
|
||||
|
||||
ec=0
|
||||
uniques=1
|
||||
echo INFO: Started at `date`. Script version $SCRIPT_VER
|
||||
|
||||
|
||||
# !README!
|
||||
# There are two daemons running on the remote Linux host:
|
||||
# - outer: specified by DOCKER_HOST, this is the daemon that will build and run the inner docker daemon
|
||||
# from the sources matching the PR.
|
||||
# - inner: runs on the host network, on a port number similar to that of DOCKER_HOST but the last two digits are inverted
|
||||
# (2357 if DOCKER_HOST had port 2375; and 2367 if DOCKER_HOST had port 2376).
|
||||
# The windows integration tests are run against this inner daemon.
|
||||
|
||||
# get the ip, inner and outer ports.
|
||||
ip="${DOCKER_HOST#*://}"
|
||||
port_outer="${ip#*:}"
|
||||
# inner port is like outer port with last two digits inverted.
|
||||
port_inner=$(echo "$port_outer" | sed -E 's/(.)(.)$/\2\1/')
|
||||
ip="${ip%%:*}"
|
||||
|
||||
echo "INFO: IP=$ip PORT_OUTER=$port_outer PORT_INNER=$port_inner"
|
||||
|
||||
# If TLS is enabled
|
||||
if [ -n "$DOCKER_TLS_VERIFY" ]; then
|
||||
protocol=https
|
||||
if [ -z "$DOCKER_MACHINE_NAME" ]; then
|
||||
ec=1
|
||||
echo "ERROR: DOCKER_MACHINE_NAME is undefined"
|
||||
fi
|
||||
certs=$(echo ~/.docker/machine/machines/$DOCKER_MACHINE_NAME)
|
||||
curlopts="--cacert $certs/ca.pem --cert $certs/cert.pem --key $certs/key.pem"
|
||||
run_extra_args="-v tlscerts:/etc/docker"
|
||||
daemon_extra_args="--tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem"
|
||||
else
|
||||
protocol=http
|
||||
fi
|
||||
|
||||
# Save for use by make.sh and scripts it invokes
|
||||
export MAIN_DOCKER_HOST="tcp://$ip:$port_inner"
|
||||
|
||||
# Verify we can get the remote node to respond to _ping
|
||||
if [ $ec -eq 0 ]; then
|
||||
reply=`curl -s $curlopts $protocol://$ip:$port_outer/_ping`
|
||||
if [ "$reply" != "OK" ]; then
|
||||
ec=1
|
||||
echo "ERROR: Failed to get an 'OK' response from the docker daemon on the Linux node"
|
||||
echo " at $ip:$port_outer when called with an http request for '_ping'. This implies that"
|
||||
echo " either the daemon has crashed/is not running, or the Linux node is unavailable."
|
||||
echo
|
||||
echo " A regular ping to the remote Linux node is below. It should reply. If not, the"
|
||||
echo " machine cannot be reached at all and may have crashed. If it does reply, it is"
|
||||
echo " likely a case of the Linux daemon not running or having crashed, which requires"
|
||||
echo " further investigation."
|
||||
echo
|
||||
echo " Try re-running this CI job, or ask on #docker-dev or #docker-maintainers"
|
||||
echo " for someone to perform further diagnostics, or take this node out of rotation."
|
||||
echo
|
||||
ping $ip
|
||||
else
|
||||
echo "INFO: The Linux nodes outer daemon replied to a ping. Good!"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Get the version from the remote node. Note this may fail if jq is not installed.
|
||||
# That's probably worth checking to make sure, just in case.
|
||||
if [ $ec -eq 0 ]; then
|
||||
remoteVersion=`curl -s $curlopts $protocol://$ip:$port_outer/version | jq -c '.Version'`
|
||||
echo "INFO: Remote daemon is running docker version $remoteVersion"
|
||||
fi
|
||||
|
||||
# Compare versions. We should really fail if result is no 1. Output at end of script.
|
||||
if [ $ec -eq 0 ]; then
|
||||
uniques=`docker version | grep Version | /usr/bin/sort -u | wc -l`
|
||||
fi
|
||||
|
||||
# Make sure we are in repo
|
||||
if [ $ec -eq 0 ]; then
|
||||
if [ ! -d hack ]; then
|
||||
echo "ERROR: Are you sure this is being launched from a the root of docker repository?"
|
||||
echo " If this is a Windows CI machine, it should be c:\jenkins\gopath\src\github.com\docker\docker."
|
||||
echo " Current directory is `pwd`"
|
||||
ec=1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Are we in split binary mode?
|
||||
if [ `grep DOCKER_CLIENTONLY Makefile | wc -l` -gt 0 ]; then
|
||||
splitBinary=0
|
||||
echo "INFO: Running in single binary mode"
|
||||
else
|
||||
splitBinary=1
|
||||
echo "INFO: Running in split binary mode"
|
||||
fi
|
||||
|
||||
|
||||
# Get the commit has and verify we have something
|
||||
if [ $ec -eq 0 ]; then
|
||||
export COMMITHASH=$(git rev-parse --short HEAD)
|
||||
echo INFO: Commmit hash is $COMMITHASH
|
||||
if [ -z $COMMITHASH ]; then
|
||||
echo "ERROR: Failed to get commit hash. Are you sure this is a docker repository?"
|
||||
ec=1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Redirect to a temporary location. Check is here for local runs from Jenkins machines just in case not
|
||||
# in the right directory where the repo is cloned. We also redirect TEMP to not use the environment
|
||||
# TEMP as when running as a standard user (not local system), it otherwise exposes a bug in posix tar which
|
||||
# will cause CI to fail from Windows to Linux. Obviously it's not best practice to ever run as local system...
|
||||
if [ $ec -eq 0 ]; then
|
||||
export TEMP=/c/CI/CI-$COMMITHASH
|
||||
export TMP=$TEMP
|
||||
/usr/bin/mkdir -p $TEMP # Make sure Linux mkdir for -p
|
||||
fi
|
||||
|
||||
# Tidy up time
|
||||
if [ $ec -eq 0 ]; then
|
||||
echo INFO: Deleting pre-existing containers and images...
|
||||
|
||||
# Force remove all containers based on a previously built image with this commit
|
||||
! docker rm -f $(docker ps -aq --filter "ancestor=docker:$COMMITHASH") &>/dev/null
|
||||
|
||||
# Force remove any container with this commithash as a name
|
||||
! docker rm -f $(docker ps -aq --filter "name=docker-$COMMITHASH") &>/dev/null
|
||||
|
||||
# This SHOULD never happen, but just in case, also blow away any containers
|
||||
# that might be around.
|
||||
! if [ ! $(docker ps -aq | wc -l) -eq 0 ]; then
|
||||
echo WARN: There were some leftover containers. Cleaning them up.
|
||||
! docker rm -f $(docker ps -aq)
|
||||
fi
|
||||
|
||||
# Force remove the image if it exists
|
||||
! docker rmi -f "docker-$COMMITHASH" &>/dev/null
|
||||
fi
|
||||
|
||||
# Provide the docker version for debugging purposes. If these fail, game over.
|
||||
# as the Linux box isn't responding for some reason.
|
||||
if [ $ec -eq 0 ]; then
|
||||
echo INFO: Docker version and info of the outer daemon on the Linux node
|
||||
echo
|
||||
docker version
|
||||
ec=$?
|
||||
if [ 0 -ne $ec ]; then
|
||||
echo "ERROR: The main linux daemon does not appear to be running. Has the Linux node crashed?"
|
||||
fi
|
||||
echo
|
||||
fi
|
||||
|
||||
# Same as above, but docker info
|
||||
if [ $ec -eq 0 ]; then
|
||||
echo
|
||||
docker info
|
||||
ec=$?
|
||||
if [ 0 -ne $ec ]; then
|
||||
echo "ERROR: The main linux daemon does not appear to be running. Has the Linux node crashed?"
|
||||
fi
|
||||
echo
|
||||
fi
|
||||
|
||||
# build the daemon image
|
||||
if [ $ec -eq 0 ]; then
|
||||
echo "INFO: Running docker build on Linux host at $DOCKER_HOST"
|
||||
if [ $splitBinary -eq 0 ]; then
|
||||
set -x
|
||||
docker build --rm --force-rm --build-arg APT_MIRROR=cdn-fastly.deb.debian.org -t "docker:$COMMITHASH" .
|
||||
cat <<EOF | docker build --rm --force-rm -t "docker:$COMMITHASH" -
|
||||
FROM docker:$COMMITHASH
|
||||
RUN hack/make.sh binary
|
||||
RUN cp bundles/latest/binary/docker /bin/docker
|
||||
CMD docker daemon -D -H tcp://0.0.0.0:$port_inner $daemon_extra_args
|
||||
EOF
|
||||
else
|
||||
set -x
|
||||
docker build --rm --force-rm --build-arg APT_MIRROR=cdn-fastly.deb.debian.org -t "docker:$COMMITHASH" .
|
||||
cat <<EOF | docker build --rm --force-rm -t "docker:$COMMITHASH" -
|
||||
FROM docker:$COMMITHASH
|
||||
RUN hack/make.sh binary
|
||||
RUN cp bundles/latest/binary-daemon/dockerd /bin/dockerd
|
||||
CMD dockerd -D -H tcp://0.0.0.0:$port_inner $daemon_extra_args
|
||||
EOF
|
||||
|
||||
fi
|
||||
ec=$?
|
||||
set +x
|
||||
if [ 0 -ne $ec ]; then
|
||||
echo "ERROR: docker build failed"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Start the docker-in-docker daemon from the image we just built
|
||||
if [ $ec -eq 0 ]; then
|
||||
echo "INFO: Starting build of a Linux daemon to test against, and starting it..."
|
||||
set -x
|
||||
# aufs in aufs is faster than vfs in aufs
|
||||
docker run -d $run_extra_args -e DOCKER_GRAPHDRIVER=aufs --pid host --privileged --name "docker-$COMMITHASH" --net host "docker:$COMMITHASH"
|
||||
ec=$?
|
||||
set +x
|
||||
if [ 0 -ne $ec ]; then
|
||||
echo "ERROR: Failed to compile and start the linux daemon"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Build locally.
|
||||
if [ $ec -eq 0 ]; then
|
||||
echo "INFO: Starting local build of Windows binary..."
|
||||
set -x
|
||||
export TIMEOUT="120m"
|
||||
export DOCKER_HOST="tcp://$ip:$port_inner"
|
||||
# This can be removed
|
||||
export DOCKER_TEST_HOST="tcp://$ip:$port_inner"
|
||||
unset DOCKER_CLIENTONLY
|
||||
export DOCKER_REMOTE_DAEMON=1
|
||||
hack/make.sh binary
|
||||
ec=$?
|
||||
set +x
|
||||
if [ 0 -ne $ec ]; then
|
||||
echo "ERROR: Build of binary on Windows failed"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Make a local copy of the built binary and ensure that is first in our path
|
||||
if [ $ec -eq 0 ]; then
|
||||
VERSION=$(< ./VERSION)
|
||||
if [ $splitBinary -eq 0 ]; then
|
||||
cp bundles/$VERSION/binary/docker.exe $TEMP
|
||||
else
|
||||
cp bundles/$VERSION/binary-client/docker.exe $TEMP
|
||||
fi
|
||||
ec=$?
|
||||
if [ 0 -ne $ec ]; then
|
||||
echo "ERROR: Failed to copy built binary to $TEMP"
|
||||
fi
|
||||
export PATH=$TEMP:$PATH
|
||||
fi
|
||||
|
||||
# Run the integration tests
|
||||
if [ $ec -eq 0 ]; then
|
||||
echo "INFO: Running Integration tests..."
|
||||
set -x
|
||||
export DOCKER_TEST_TLS_VERIFY="$DOCKER_TLS_VERIFY"
|
||||
export DOCKER_TEST_CERT_PATH="$DOCKER_CERT_PATH"
|
||||
#export TESTFLAGS='-check.vv'
|
||||
hack/make.sh test-integration-cli
|
||||
ec=$?
|
||||
set +x
|
||||
if [ 0 -ne $ec ]; then
|
||||
echo "ERROR: CLI test failed."
|
||||
# Next line is useful, but very long winded if included
|
||||
docker -H=$MAIN_DOCKER_HOST logs --tail 100 "docker-$COMMITHASH"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Tidy up any temporary files from the CI run
|
||||
if [ ! -z $COMMITHASH ]; then
|
||||
rm -rf $TEMP
|
||||
fi
|
||||
|
||||
# CI Integrity check - ensure we are using the same version of go as present in the Dockerfile
|
||||
GOVER_DOCKERFILE=`grep 'ENV GO_VERSION' Dockerfile | awk '{print $3}'`
|
||||
GOVER_INSTALLED=`go version | awk '{print $3}'`
|
||||
if [ "${GOVER_INSTALLED:2}" != "$GOVER_DOCKERFILE" ]; then
|
||||
#ec=1 # Uncomment to make CI fail once all nodes are updated.
|
||||
echo
|
||||
echo "---------------------------------------------------------------------------"
|
||||
echo "WARN: CI should be using go version $GOVER_DOCKERFILE, but is using ${GOVER_INSTALLED:2}"
|
||||
echo " Please ping #docker-maintainers on IRC to get this CI server updated."
|
||||
echo "---------------------------------------------------------------------------"
|
||||
echo
|
||||
fi
|
||||
|
||||
# Check the Linux box is running a matching version of docker
|
||||
if [ "$uniques" -ne 1 ]; then
|
||||
ec=0 # Uncomment to make CI fail once all nodes are updated.
|
||||
echo
|
||||
echo "---------------------------------------------------------------------------"
|
||||
echo "ERROR: This CI node is not running the same version of docker as the daemon."
|
||||
echo " This is a CI configuration issue."
|
||||
echo "---------------------------------------------------------------------------"
|
||||
echo
|
||||
fi
|
||||
|
||||
# Tell the user how we did.
|
||||
if [ $ec -eq 0 ]; then
|
||||
echo INFO: Completed successfully at `date`.
|
||||
else
|
||||
echo ERROR: Failed with exitcode $ec at `date`.
|
||||
fi
|
||||
exit $ec
|
33
vendor/github.com/containers/storage/hack/dind
generated
vendored
33
vendor/github.com/containers/storage/hack/dind
generated
vendored
|
@ -1,33 +0,0 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# DinD: a wrapper script which allows docker to be run inside a docker container.
|
||||
# Original version by Jerome Petazzoni <jerome@docker.com>
|
||||
# See the blog post: https://blog.docker.com/2013/09/docker-can-now-run-within-docker/
|
||||
#
|
||||
# This script should be executed inside a docker container in privileged mode
|
||||
# ('docker run --privileged', introduced in docker 0.6).
|
||||
|
||||
# Usage: dind CMD [ARG...]
|
||||
|
||||
# apparmor sucks and Docker needs to know that it's in a container (c) @tianon
|
||||
export container=docker
|
||||
|
||||
if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then
|
||||
mount -t securityfs none /sys/kernel/security || {
|
||||
echo >&2 'Could not mount /sys/kernel/security.'
|
||||
echo >&2 'AppArmor detection and --privileged mode might break.'
|
||||
}
|
||||
fi
|
||||
|
||||
# Mount /tmp (conditionally)
|
||||
if ! mountpoint -q /tmp; then
|
||||
mount -t tmpfs none /tmp
|
||||
fi
|
||||
|
||||
if [ $# -gt 0 ]; then
|
||||
exec "$@"
|
||||
fi
|
||||
|
||||
echo >&2 'ERROR: No command specified.'
|
||||
echo >&2 'You probably want to run hack/make.sh, or maybe a shell?'
|
15
vendor/github.com/containers/storage/hack/generate-authors.sh
generated
vendored
15
vendor/github.com/containers/storage/hack/generate-authors.sh
generated
vendored
|
@ -1,15 +0,0 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/.."
|
||||
|
||||
# see also ".mailmap" for how email addresses and names are deduplicated
|
||||
|
||||
{
|
||||
cat <<-'EOH'
|
||||
# This file lists all individuals having contributed content to the repository.
|
||||
# For how it is generated, see `hack/generate-authors.sh`.
|
||||
EOH
|
||||
echo
|
||||
git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf
|
||||
} > AUTHORS
|
517
vendor/github.com/containers/storage/hack/install.sh
generated
vendored
517
vendor/github.com/containers/storage/hack/install.sh
generated
vendored
|
@ -1,517 +0,0 @@
|
|||
#!/bin/sh
|
||||
set -e
|
||||
#
|
||||
# This script is meant for quick & easy install via:
|
||||
# 'curl -sSL https://get.docker.com/ | sh'
|
||||
# or:
|
||||
# 'wget -qO- https://get.docker.com/ | sh'
|
||||
#
|
||||
# For test builds (ie. release candidates):
|
||||
# 'curl -fsSL https://test.docker.com/ | sh'
|
||||
# or:
|
||||
# 'wget -qO- https://test.docker.com/ | sh'
|
||||
#
|
||||
# For experimental builds:
|
||||
# 'curl -fsSL https://experimental.docker.com/ | sh'
|
||||
# or:
|
||||
# 'wget -qO- https://experimental.docker.com/ | sh'
|
||||
#
|
||||
# Docker Maintainers:
|
||||
# To update this script on https://get.docker.com,
|
||||
# use hack/release.sh during a normal release,
|
||||
# or the following one-liner for script hotfixes:
|
||||
# aws s3 cp --acl public-read hack/install.sh s3://get.docker.com/index
|
||||
#
|
||||
|
||||
url="https://get.docker.com/"
|
||||
apt_url="https://apt.dockerproject.org"
|
||||
yum_url="https://yum.dockerproject.org"
|
||||
gpg_fingerprint="58118E89F3A912897C070ADBF76221572C52609D"
|
||||
|
||||
key_servers="
|
||||
ha.pool.sks-keyservers.net
|
||||
pgp.mit.edu
|
||||
keyserver.ubuntu.com
|
||||
"
|
||||
|
||||
command_exists() {
|
||||
command -v "$@" > /dev/null 2>&1
|
||||
}
|
||||
|
||||
echo_docker_as_nonroot() {
|
||||
if command_exists docker && [ -e /var/run/docker.sock ]; then
|
||||
(
|
||||
set -x
|
||||
$sh_c 'docker version'
|
||||
) || true
|
||||
fi
|
||||
your_user=your-user
|
||||
[ "$user" != 'root' ] && your_user="$user"
|
||||
# intentionally mixed spaces and tabs here -- tabs are stripped by "<<-EOF", spaces are kept in the output
|
||||
cat <<-EOF
|
||||
|
||||
If you would like to use Docker as a non-root user, you should now consider
|
||||
adding your user to the "docker" group with something like:
|
||||
|
||||
sudo usermod -aG docker $your_user
|
||||
|
||||
Remember that you will have to log out and back in for this to take effect!
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
# Check if this is a forked Linux distro
|
||||
check_forked() {
|
||||
|
||||
# Check for lsb_release command existence, it usually exists in forked distros
|
||||
if command_exists lsb_release; then
|
||||
# Check if the `-u` option is supported
|
||||
set +e
|
||||
lsb_release -a -u > /dev/null 2>&1
|
||||
lsb_release_exit_code=$?
|
||||
set -e
|
||||
|
||||
# Check if the command has exited successfully, it means we're in a forked distro
|
||||
if [ "$lsb_release_exit_code" = "0" ]; then
|
||||
# Print info about current distro
|
||||
cat <<-EOF
|
||||
You're using '$lsb_dist' version '$dist_version'.
|
||||
EOF
|
||||
|
||||
# Get the upstream release info
|
||||
lsb_dist=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'id' | cut -d ':' -f 2 | tr -d '[[:space:]]')
|
||||
dist_version=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'codename' | cut -d ':' -f 2 | tr -d '[[:space:]]')
|
||||
|
||||
# Print info about upstream distro
|
||||
cat <<-EOF
|
||||
Upstream release is '$lsb_dist' version '$dist_version'.
|
||||
EOF
|
||||
else
|
||||
if [ -r /etc/debian_version ] && [ "$lsb_dist" != "ubuntu" ]; then
|
||||
# We're Debian and don't even know it!
|
||||
lsb_dist=debian
|
||||
dist_version="$(cat /etc/debian_version | sed 's/\/.*//' | sed 's/\..*//')"
|
||||
case "$dist_version" in
|
||||
8|'Kali Linux 2')
|
||||
dist_version="jessie"
|
||||
;;
|
||||
7)
|
||||
dist_version="wheezy"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
rpm_import_repository_key() {
|
||||
local key=$1; shift
|
||||
local tmpdir=$(mktemp -d)
|
||||
chmod 600 "$tmpdir"
|
||||
for key_server in $key_servers ; do
|
||||
gpg --homedir "$tmpdir" --keyserver "$key_server" --recv-keys "$key" && break
|
||||
done
|
||||
gpg --homedir "$tmpdir" -k "$key" >/dev/null
|
||||
gpg --homedir "$tmpdir" --export --armor "$key" > "$tmpdir"/repo.key
|
||||
rpm --import "$tmpdir"/repo.key
|
||||
rm -rf "$tmpdir"
|
||||
}
|
||||
|
||||
semverParse() {
|
||||
major="${1%%.*}"
|
||||
minor="${1#$major.}"
|
||||
minor="${minor%%.*}"
|
||||
patch="${1#$major.$minor.}"
|
||||
patch="${patch%%[-.]*}"
|
||||
}
|
||||
|
||||
do_install() {
|
||||
case "$(uname -m)" in
|
||||
*64)
|
||||
;;
|
||||
*)
|
||||
cat >&2 <<-'EOF'
|
||||
Error: you are not using a 64bit platform.
|
||||
Docker currently only supports 64bit platforms.
|
||||
EOF
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
if command_exists docker; then
|
||||
version="$(docker -v | awk -F '[ ,]+' '{ print $3 }')"
|
||||
MAJOR_W=1
|
||||
MINOR_W=10
|
||||
|
||||
semverParse $version
|
||||
|
||||
shouldWarn=0
|
||||
if [ $major -lt $MAJOR_W ]; then
|
||||
shouldWarn=1
|
||||
fi
|
||||
|
||||
if [ $major -le $MAJOR_W ] && [ $minor -lt $MINOR_W ]; then
|
||||
shouldWarn=1
|
||||
fi
|
||||
|
||||
cat >&2 <<-'EOF'
|
||||
Warning: the "docker" command appears to already exist on this system.
|
||||
|
||||
If you already have Docker installed, this script can cause trouble, which is
|
||||
why we're displaying this warning and provide the opportunity to cancel the
|
||||
installation.
|
||||
|
||||
If you installed the current Docker package using this script and are using it
|
||||
EOF
|
||||
|
||||
if [ $shouldWarn -eq 1 ]; then
|
||||
cat >&2 <<-'EOF'
|
||||
again to update Docker, we urge you to migrate your image store before upgrading
|
||||
to v1.10+.
|
||||
|
||||
You can find instructions for this here:
|
||||
https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration
|
||||
EOF
|
||||
else
|
||||
cat >&2 <<-'EOF'
|
||||
again to update Docker, you can safely ignore this message.
|
||||
EOF
|
||||
fi
|
||||
|
||||
cat >&2 <<-'EOF'
|
||||
|
||||
You may press Ctrl+C now to abort this script.
|
||||
EOF
|
||||
( set -x; sleep 20 )
|
||||
fi
|
||||
|
||||
user="$(id -un 2>/dev/null || true)"
|
||||
|
||||
sh_c='sh -c'
|
||||
if [ "$user" != 'root' ]; then
|
||||
if command_exists sudo; then
|
||||
sh_c='sudo -E sh -c'
|
||||
elif command_exists su; then
|
||||
sh_c='su -c'
|
||||
else
|
||||
cat >&2 <<-'EOF'
|
||||
Error: this installer needs the ability to run commands as root.
|
||||
We are unable to find either "sudo" or "su" available to make this happen.
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
curl=''
|
||||
if command_exists curl; then
|
||||
curl='curl -sSL'
|
||||
elif command_exists wget; then
|
||||
curl='wget -qO-'
|
||||
elif command_exists busybox && busybox --list-modules | grep -q wget; then
|
||||
curl='busybox wget -qO-'
|
||||
fi
|
||||
|
||||
# check to see which repo they are trying to install from
|
||||
if [ -z "$repo" ]; then
|
||||
repo='main'
|
||||
if [ "https://test.docker.com/" = "$url" ]; then
|
||||
repo='testing'
|
||||
elif [ "https://experimental.docker.com/" = "$url" ]; then
|
||||
repo='experimental'
|
||||
fi
|
||||
fi
|
||||
|
||||
# perform some very rudimentary platform detection
|
||||
lsb_dist=''
|
||||
dist_version=''
|
||||
if command_exists lsb_release; then
|
||||
lsb_dist="$(lsb_release -si)"
|
||||
fi
|
||||
if [ -z "$lsb_dist" ] && [ -r /etc/lsb-release ]; then
|
||||
lsb_dist="$(. /etc/lsb-release && echo "$DISTRIB_ID")"
|
||||
fi
|
||||
if [ -z "$lsb_dist" ] && [ -r /etc/debian_version ]; then
|
||||
lsb_dist='debian'
|
||||
fi
|
||||
if [ -z "$lsb_dist" ] && [ -r /etc/fedora-release ]; then
|
||||
lsb_dist='fedora'
|
||||
fi
|
||||
if [ -z "$lsb_dist" ] && [ -r /etc/oracle-release ]; then
|
||||
lsb_dist='oracleserver'
|
||||
fi
|
||||
if [ -z "$lsb_dist" ] && [ -r /etc/centos-release ]; then
|
||||
lsb_dist='centos'
|
||||
fi
|
||||
if [ -z "$lsb_dist" ] && [ -r /etc/redhat-release ]; then
|
||||
lsb_dist='redhat'
|
||||
fi
|
||||
if [ -z "$lsb_dist" ] && [ -r /etc/os-release ]; then
|
||||
lsb_dist="$(. /etc/os-release && echo "$ID")"
|
||||
fi
|
||||
|
||||
lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')"
|
||||
|
||||
# Special case redhatenterpriseserver
|
||||
if [ "${lsb_dist}" = "redhatenterpriseserver" ]; then
|
||||
# Set it to redhat, it will be changed to centos below anyways
|
||||
lsb_dist='redhat'
|
||||
fi
|
||||
|
||||
case "$lsb_dist" in
|
||||
|
||||
ubuntu)
|
||||
if command_exists lsb_release; then
|
||||
dist_version="$(lsb_release --codename | cut -f2)"
|
||||
fi
|
||||
if [ -z "$dist_version" ] && [ -r /etc/lsb-release ]; then
|
||||
dist_version="$(. /etc/lsb-release && echo "$DISTRIB_CODENAME")"
|
||||
fi
|
||||
;;
|
||||
|
||||
debian)
|
||||
dist_version="$(cat /etc/debian_version | sed 's/\/.*//' | sed 's/\..*//')"
|
||||
case "$dist_version" in
|
||||
8)
|
||||
dist_version="jessie"
|
||||
;;
|
||||
7)
|
||||
dist_version="wheezy"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
|
||||
oracleserver)
|
||||
# need to switch lsb_dist to match yum repo URL
|
||||
lsb_dist="oraclelinux"
|
||||
dist_version="$(rpm -q --whatprovides redhat-release --queryformat "%{VERSION}\n" | sed 's/\/.*//' | sed 's/\..*//' | sed 's/Server*//')"
|
||||
;;
|
||||
|
||||
fedora|centos|redhat)
|
||||
dist_version="$(rpm -q --whatprovides ${lsb_dist}-release --queryformat "%{VERSION}\n" | sed 's/\/.*//' | sed 's/\..*//' | sed 's/Server*//' | sort | tail -1)"
|
||||
;;
|
||||
|
||||
*)
|
||||
if command_exists lsb_release; then
|
||||
dist_version="$(lsb_release --codename | cut -f2)"
|
||||
fi
|
||||
if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then
|
||||
dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
|
||||
fi
|
||||
;;
|
||||
|
||||
|
||||
esac
|
||||
|
||||
# Check if this is a forked Linux distro
|
||||
check_forked
|
||||
|
||||
# Run setup for each distro accordingly
|
||||
case "$lsb_dist" in
|
||||
amzn)
|
||||
(
|
||||
set -x
|
||||
$sh_c 'sleep 3; yum -y -q install docker'
|
||||
)
|
||||
echo_docker_as_nonroot
|
||||
exit 0
|
||||
;;
|
||||
|
||||
'opensuse project'|opensuse)
|
||||
echo 'Going to perform the following operations:'
|
||||
if [ "$repo" != 'main' ]; then
|
||||
echo ' * add repository obs://Virtualization:containers'
|
||||
fi
|
||||
echo ' * install Docker'
|
||||
$sh_c 'echo "Press CTRL-C to abort"; sleep 3'
|
||||
|
||||
if [ "$repo" != 'main' ]; then
|
||||
# install experimental packages from OBS://Virtualization:containers
|
||||
(
|
||||
set -x
|
||||
zypper -n ar -f obs://Virtualization:containers Virtualization:containers
|
||||
rpm_import_repository_key 55A0B34D49501BB7CA474F5AA193FBB572174FC2
|
||||
)
|
||||
fi
|
||||
(
|
||||
set -x
|
||||
zypper -n install docker
|
||||
)
|
||||
echo_docker_as_nonroot
|
||||
exit 0
|
||||
;;
|
||||
'suse linux'|sle[sd])
|
||||
echo 'Going to perform the following operations:'
|
||||
if [ "$repo" != 'main' ]; then
|
||||
echo ' * add repository obs://Virtualization:containers'
|
||||
echo ' * install experimental Docker using packages NOT supported by SUSE'
|
||||
else
|
||||
echo ' * add the "Containers" module'
|
||||
echo ' * install Docker using packages supported by SUSE'
|
||||
fi
|
||||
$sh_c 'echo "Press CTRL-C to abort"; sleep 3'
|
||||
|
||||
if [ "$repo" != 'main' ]; then
|
||||
# install experimental packages from OBS://Virtualization:containers
|
||||
echo >&2 'Warning: installing experimental packages from OBS, these packages are NOT supported by SUSE'
|
||||
(
|
||||
set -x
|
||||
zypper -n ar -f obs://Virtualization:containers/SLE_12 Virtualization:containers
|
||||
rpm_import_repository_key 55A0B34D49501BB7CA474F5AA193FBB572174FC2
|
||||
)
|
||||
else
|
||||
# Add the containers module
|
||||
# Note well-1: the SLE machine must already be registered against SUSE Customer Center
|
||||
# Note well-2: the `-r ""` is required to workaround a known issue of SUSEConnect
|
||||
(
|
||||
set -x
|
||||
SUSEConnect -p sle-module-containers/12/x86_64 -r ""
|
||||
)
|
||||
fi
|
||||
(
|
||||
set -x
|
||||
zypper -n install docker
|
||||
)
|
||||
echo_docker_as_nonroot
|
||||
exit 0
|
||||
;;
|
||||
|
||||
ubuntu|debian)
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
did_apt_get_update=
|
||||
apt_get_update() {
|
||||
if [ -z "$did_apt_get_update" ]; then
|
||||
( set -x; $sh_c 'sleep 3; apt-get update' )
|
||||
did_apt_get_update=1
|
||||
fi
|
||||
}
|
||||
|
||||
# aufs is preferred over devicemapper; try to ensure the driver is available.
|
||||
if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then
|
||||
if uname -r | grep -q -- '-generic' && dpkg -l 'linux-image-*-generic' | grep -qE '^ii|^hi' 2>/dev/null; then
|
||||
kern_extras="linux-image-extra-$(uname -r) linux-image-extra-virtual"
|
||||
|
||||
apt_get_update
|
||||
( set -x; $sh_c 'sleep 3; apt-get install -y -q '"$kern_extras" ) || true
|
||||
|
||||
if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then
|
||||
echo >&2 'Warning: tried to install '"$kern_extras"' (for AUFS)'
|
||||
echo >&2 ' but we still have no AUFS. Docker may not work. Proceeding anyways!'
|
||||
( set -x; sleep 10 )
|
||||
fi
|
||||
else
|
||||
echo >&2 'Warning: current kernel is not supported by the linux-image-extra-virtual'
|
||||
echo >&2 ' package. We have no AUFS support. Consider installing the packages'
|
||||
echo >&2 ' linux-image-virtual kernel and linux-image-extra-virtual for AUFS support.'
|
||||
( set -x; sleep 10 )
|
||||
fi
|
||||
fi
|
||||
|
||||
# install apparmor utils if they're missing and apparmor is enabled in the kernel
|
||||
# otherwise Docker will fail to start
|
||||
if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then
|
||||
if command -v apparmor_parser >/dev/null 2>&1; then
|
||||
echo 'apparmor is enabled in the kernel and apparmor utils were already installed'
|
||||
else
|
||||
echo 'apparmor is enabled in the kernel, but apparmor_parser missing'
|
||||
apt_get_update
|
||||
( set -x; $sh_c 'sleep 3; apt-get install -y -q apparmor' )
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -e /usr/lib/apt/methods/https ]; then
|
||||
apt_get_update
|
||||
( set -x; $sh_c 'sleep 3; apt-get install -y -q apt-transport-https ca-certificates' )
|
||||
fi
|
||||
if [ -z "$curl" ]; then
|
||||
apt_get_update
|
||||
( set -x; $sh_c 'sleep 3; apt-get install -y -q curl ca-certificates' )
|
||||
curl='curl -sSL'
|
||||
fi
|
||||
(
|
||||
set -x
|
||||
for key_server in $key_servers ; do
|
||||
$sh_c "apt-key adv --keyserver hkp://${key_server}:80 --recv-keys ${gpg_fingerprint}" && break
|
||||
done
|
||||
$sh_c "apt-key adv -k ${gpg_fingerprint} >/dev/null"
|
||||
$sh_c "mkdir -p /etc/apt/sources.list.d"
|
||||
$sh_c "echo deb \[arch=$(dpkg --print-architecture)\] ${apt_url}/repo ${lsb_dist}-${dist_version} ${repo} > /etc/apt/sources.list.d/docker.list"
|
||||
$sh_c 'sleep 3; apt-get update; apt-get install -y -q docker-engine'
|
||||
)
|
||||
echo_docker_as_nonroot
|
||||
exit 0
|
||||
;;
|
||||
|
||||
fedora|centos|redhat|oraclelinux)
|
||||
if [ "${lsb_dist}" = "redhat" ]; then
|
||||
# we use the centos repository for both redhat and centos releases
|
||||
lsb_dist='centos'
|
||||
fi
|
||||
$sh_c "cat >/etc/yum.repos.d/docker-${repo}.repo" <<-EOF
|
||||
[docker-${repo}-repo]
|
||||
name=Docker ${repo} Repository
|
||||
baseurl=${yum_url}/repo/${repo}/${lsb_dist}/${dist_version}
|
||||
enabled=1
|
||||
gpgcheck=1
|
||||
gpgkey=${yum_url}/gpg
|
||||
EOF
|
||||
if [ "$lsb_dist" = "fedora" ] && [ "$dist_version" -ge "22" ]; then
|
||||
(
|
||||
set -x
|
||||
$sh_c 'sleep 3; dnf -y -q install docker-engine'
|
||||
)
|
||||
else
|
||||
(
|
||||
set -x
|
||||
$sh_c 'sleep 3; yum -y -q install docker-engine'
|
||||
)
|
||||
fi
|
||||
echo_docker_as_nonroot
|
||||
exit 0
|
||||
;;
|
||||
gentoo)
|
||||
if [ "$url" = "https://test.docker.com/" ]; then
|
||||
# intentionally mixed spaces and tabs here -- tabs are stripped by "<<-'EOF'", spaces are kept in the output
|
||||
cat >&2 <<-'EOF'
|
||||
|
||||
You appear to be trying to install the latest nightly build in Gentoo.'
|
||||
The portage tree should contain the latest stable release of Docker, but'
|
||||
if you want something more recent, you can always use the live ebuild'
|
||||
provided in the "docker" overlay available via layman. For more'
|
||||
instructions, please see the following URL:'
|
||||
|
||||
https://github.com/tianon/docker-overlay#using-this-overlay'
|
||||
|
||||
After adding the "docker" overlay, you should be able to:'
|
||||
|
||||
emerge -av =app-emulation/docker-9999'
|
||||
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
|
||||
(
|
||||
set -x
|
||||
$sh_c 'sleep 3; emerge app-emulation/docker'
|
||||
)
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
|
||||
# intentionally mixed spaces and tabs here -- tabs are stripped by "<<-'EOF'", spaces are kept in the output
|
||||
cat >&2 <<-'EOF'
|
||||
|
||||
Either your platform is not easily detectable, is not supported by this
|
||||
installer script (yet - PRs welcome! [hack/install.sh]), or does not yet have
|
||||
a package for Docker. Please visit the following URL for more detailed
|
||||
installation instructions:
|
||||
|
||||
https://docs.docker.com/engine/installation/
|
||||
|
||||
EOF
|
||||
exit 1
|
||||
}
|
||||
|
||||
# wrapped up in a function so that we have some protection against only getting
|
||||
# half the file during "curl | sh"
|
||||
do_install
|
257
vendor/github.com/containers/storage/hack/make.sh
generated
vendored
257
vendor/github.com/containers/storage/hack/make.sh
generated
vendored
|
@ -1,257 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# This script builds various binary artifacts from a checkout of the storage
|
||||
# source code.
|
||||
#
|
||||
# Requirements:
|
||||
# - The current directory should be a checkout of the storage source code
|
||||
# (https://github.com/containers/storage). Whatever version is checked out will
|
||||
# be built.
|
||||
# - The VERSION file, at the root of the repository, should exist, and
|
||||
# will be used as the oci-storage binary version and package version.
|
||||
# - The hash of the git commit will also be included in the oci-storage binary,
|
||||
# with the suffix -unsupported if the repository isn't clean.
|
||||
# - The right way to call this script is to invoke "make" from
|
||||
# your checkout of the storage repository.
|
||||
|
||||
set -o pipefail
|
||||
|
||||
export PATH=/usr/local/go/bin:${PATH}
|
||||
export PKG='github.com/containers/storage'
|
||||
export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
export MAKEDIR="$SCRIPTDIR/make"
|
||||
export PKG_CONFIG=${PKG_CONFIG:-pkg-config}
|
||||
|
||||
: ${TEST_REPEAT:=0}
|
||||
|
||||
# List of bundles to create when no argument is passed
|
||||
DEFAULT_BUNDLES=(
|
||||
validate-dco
|
||||
validate-gofmt
|
||||
validate-lint
|
||||
validate-pkg
|
||||
validate-test
|
||||
validate-toml
|
||||
validate-vet
|
||||
|
||||
binary
|
||||
|
||||
test-unit
|
||||
|
||||
gccgo
|
||||
cross
|
||||
)
|
||||
|
||||
VERSION=$(< ./VERSION)
|
||||
if command -v git &> /dev/null && [ -d .git ] && git rev-parse &> /dev/null; then
|
||||
GITCOMMIT=$(git rev-parse --short HEAD)
|
||||
if [ -n "$(git status --porcelain --untracked-files=no)" ]; then
|
||||
GITCOMMIT="$GITCOMMIT-unsupported"
|
||||
echo "#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
|
||||
echo "# GITCOMMIT = $GITCOMMIT"
|
||||
echo "# The version you are building is listed as unsupported because"
|
||||
echo "# there are some files in the git repository that are in an uncommited state."
|
||||
echo "# Commit these changes, or add to .gitignore to remove the -unsupported from the version."
|
||||
echo "# Here is the current list:"
|
||||
git status --porcelain --untracked-files=no
|
||||
echo "#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
|
||||
fi
|
||||
! BUILDTIME=$(date --rfc-3339 ns 2> /dev/null | sed -e 's/ /T/') &> /dev/null
|
||||
if [ -z $BUILDTIME ]; then
|
||||
# If using bash 3.1 which doesn't support --rfc-3389, eg Windows CI
|
||||
BUILDTIME=$(date -u)
|
||||
fi
|
||||
elif [ -n "$GITCOMMIT" ]; then
|
||||
:
|
||||
else
|
||||
echo >&2 'error: .git directory missing and GITCOMMIT not specified'
|
||||
echo >&2 ' Please either build with the .git directory accessible, or specify the'
|
||||
echo >&2 ' exact (--short) commit hash you are building using GITCOMMIT for'
|
||||
echo >&2 ' future accountability in diagnosing build issues. Thanks!'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
export GOPATH="${GOPATH:-/go}"
|
||||
|
||||
if [ "$(go env GOOS)" = 'solaris' ]; then
|
||||
# sys/unix is installed outside the standard library on solaris
|
||||
# TODO need to allow for version change, need to get version from go
|
||||
export GOPATH="${GOPATH}:/usr/lib/gocode/1.6.2"
|
||||
fi
|
||||
|
||||
if [ ! "$GOPATH" ]; then
|
||||
echo >&2 'error: missing GOPATH; please see https://golang.org/doc/code.html#GOPATH'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$EXPERIMENTAL" ]; then
|
||||
echo >&2 '# WARNING! EXPERIMENTAL is set: building experimental features'
|
||||
echo >&2
|
||||
BUILDTAGS+=" experimental"
|
||||
fi
|
||||
|
||||
# test whether "btrfs/version.h" exists and apply btrfs_noversion appropriately
|
||||
if \
|
||||
command -v gcc &> /dev/null \
|
||||
&& ! gcc -E - -o /dev/null &> /dev/null <<<'#include <btrfs/version.h>' \
|
||||
; then
|
||||
BUILDTAGS+=' btrfs_noversion'
|
||||
fi
|
||||
|
||||
# test whether "libdevmapper.h" is new enough to support deferred remove
|
||||
# functionality.
|
||||
if \
|
||||
command -v gcc &> /dev/null \
|
||||
&& ! ( echo -e '#include <libdevmapper.h>\nint main() { dm_task_deferred_remove(NULL); }'| gcc -xc - -o /dev/null -ldevmapper &> /dev/null ) \
|
||||
; then
|
||||
BUILDTAGS+=' libdm_no_deferred_remove'
|
||||
fi
|
||||
|
||||
# Use these flags when compiling the tests and final binary
|
||||
source "$SCRIPTDIR/make/.go-autogen"
|
||||
if [ -z "$DEBUG" ]; then
|
||||
LDFLAGS='-w'
|
||||
fi
|
||||
|
||||
BUILDFLAGS=( $BUILDFLAGS "${ORIG_BUILDFLAGS[@]}" )
|
||||
|
||||
if [ "$(uname -s)" = 'FreeBSD' ]; then
|
||||
# Tell cgo the compiler is Clang, not GCC
|
||||
# https://code.google.com/p/go/source/browse/src/cmd/cgo/gcc.go?spec=svne77e74371f2340ee08622ce602e9f7b15f29d8d3&r=e6794866ebeba2bf8818b9261b54e2eef1c9e588#752
|
||||
export CC=clang
|
||||
|
||||
# "-extld clang" is a workaround for
|
||||
# https://code.google.com/p/go/issues/detail?id=6845
|
||||
LDFLAGS="$LDFLAGS -extld clang"
|
||||
fi
|
||||
|
||||
HAVE_GO_TEST_COVER=
|
||||
if \
|
||||
go help testflag | grep -- -cover > /dev/null \
|
||||
&& go tool -n cover > /dev/null 2>&1 \
|
||||
; then
|
||||
HAVE_GO_TEST_COVER=1
|
||||
fi
|
||||
TIMEOUT=5m
|
||||
|
||||
# If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'.
|
||||
# You can use this to select certain tests to run, eg.
|
||||
#
|
||||
# TESTFLAGS='-test.run ^TestBuild$' ./hack/make.sh test-unit
|
||||
#
|
||||
# For integration-cli test, we use [gocheck](https://labix.org/gocheck), if you want
|
||||
# to run certain tests on your local host, you should run with command:
|
||||
#
|
||||
# TESTFLAGS='-check.f DockerSuite.TestBuild*' ./hack/make.sh binary test-integration-cli
|
||||
#
|
||||
go_test_dir() {
|
||||
dir=$1
|
||||
coverpkg=$2
|
||||
testcover=()
|
||||
testcoverprofile=()
|
||||
testbinary="$DEST/test.main"
|
||||
if [ "$HAVE_GO_TEST_COVER" ]; then
|
||||
# if our current go install has -cover, we want to use it :)
|
||||
mkdir -p "$DEST/coverprofiles"
|
||||
coverprofile="storage${dir#.}"
|
||||
coverprofile="$ABS_DEST/coverprofiles/${coverprofile//\//-}"
|
||||
testcover=( -test.cover )
|
||||
testcoverprofile=( -test.coverprofile "$coverprofile" $coverpkg )
|
||||
fi
|
||||
(
|
||||
echo '+ go test' $TESTFLAGS "${PKG}${dir#.}"
|
||||
cd "$dir"
|
||||
export DEST="$ABS_DEST" # we're in a subshell, so this is safe -- our integration-cli tests need DEST, and "cd" screws it up
|
||||
go test -c -o "$testbinary" ${testcover[@]} -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}"
|
||||
i=0
|
||||
while ((++i)); do
|
||||
test_env "$testbinary" ${testcoverprofile[@]} $TESTFLAGS
|
||||
if [ $i -gt "$TEST_REPEAT" ]; then
|
||||
break
|
||||
fi
|
||||
echo "Repeating test ($i)"
|
||||
done
|
||||
)
|
||||
}
|
||||
test_env() {
|
||||
# use "env -i" to tightly control the environment variables that bleed into the tests
|
||||
env -i \
|
||||
DEST="$DEST" \
|
||||
GOPATH="$GOPATH" \
|
||||
GOTRACEBACK=all \
|
||||
HOME="$ABS_DEST/fake-HOME" \
|
||||
PATH="${GOPATH}/bin:/usr/local/go/bin:$PATH" \
|
||||
TEMP="$TEMP" \
|
||||
"$@"
|
||||
}
|
||||
|
||||
# a helper to provide ".exe" when it's appropriate
|
||||
binary_extension() {
|
||||
echo -n $(go env GOEXE)
|
||||
}
|
||||
|
||||
hash_files() {
|
||||
while [ $# -gt 0 ]; do
|
||||
f="$1"
|
||||
shift
|
||||
dir="$(dirname "$f")"
|
||||
base="$(basename "$f")"
|
||||
for hashAlgo in md5 sha256; do
|
||||
if command -v "${hashAlgo}sum" &> /dev/null; then
|
||||
(
|
||||
# subshell and cd so that we get output files like:
|
||||
# $HASH oci-storage-$VERSION
|
||||
# instead of:
|
||||
# $HASH /go/src/github.com/.../$VERSION/binary/oci-storage-$VERSION
|
||||
cd "$dir"
|
||||
"${hashAlgo}sum" "$base" > "$base.$hashAlgo"
|
||||
)
|
||||
fi
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
bundle() {
|
||||
local bundle="$1"; shift
|
||||
echo "---> Making bundle: $(basename "$bundle") (in $DEST)"
|
||||
source "$SCRIPTDIR/make/$bundle" "$@"
|
||||
}
|
||||
|
||||
main() {
|
||||
# We want this to fail if the bundles already exist and cannot be removed.
|
||||
# This is to avoid mixing bundles from different versions of the code.
|
||||
mkdir -p bundles
|
||||
if [ -e "bundles/$VERSION" ] && [ -z "$KEEPBUNDLE" ]; then
|
||||
echo "bundles/$VERSION already exists. Removing."
|
||||
rm -fr "bundles/$VERSION" && mkdir "bundles/$VERSION" || exit 1
|
||||
echo
|
||||
fi
|
||||
|
||||
if [ "$(go env GOHOSTOS)" != 'windows' ]; then
|
||||
# Windows and symlinks don't get along well
|
||||
|
||||
rm -f bundles/latest
|
||||
ln -s "$VERSION" bundles/latest
|
||||
fi
|
||||
|
||||
if [ $# -lt 1 ]; then
|
||||
bundles=(${DEFAULT_BUNDLES[@]})
|
||||
else
|
||||
bundles=($@)
|
||||
fi
|
||||
|
||||
for bundle in ${bundles[@]}; do
|
||||
export DEST="bundles/$VERSION/$(basename "$bundle")"
|
||||
# Cygdrive paths don't play well with go build -o.
|
||||
if [[ "$(uname -s)" == CYGWIN* ]]; then
|
||||
export DEST="$(cygpath -mw "$DEST")"
|
||||
fi
|
||||
mkdir -p "$DEST"
|
||||
ABS_DEST="$(cd "$DEST" && pwd -P)"
|
||||
bundle "$bundle"
|
||||
echo
|
||||
done
|
||||
}
|
||||
|
||||
main "$@"
|
64
vendor/github.com/containers/storage/hack/make/.binary
generated
vendored
64
vendor/github.com/containers/storage/hack/make/.binary
generated
vendored
|
@ -1,64 +0,0 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
BINARY_NAME="$BINARY_SHORT_NAME-$VERSION"
|
||||
BINARY_EXTENSION="$(binary_extension)"
|
||||
BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION"
|
||||
|
||||
source "${MAKEDIR}/.go-autogen"
|
||||
|
||||
(
|
||||
export GOGC=${DOCKER_BUILD_GOGC:-1000}
|
||||
|
||||
if [ "$(go env GOOS)/$(go env GOARCH)" != "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" ]; then
|
||||
# must be cross-compiling!
|
||||
case "$(go env GOOS)/$(go env GOARCH)" in
|
||||
windows/amd64)
|
||||
export CC=x86_64-w64-mingw32-gcc
|
||||
export CGO_ENABLED=1
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
if [ "$(go env GOOS)" == "linux" ] ; then
|
||||
case "$(go env GOARCH)" in
|
||||
arm*|386)
|
||||
# linking for Linux on arm or x86 needs external linking to avoid
|
||||
# https://github.com/golang/go/issues/9510 until we move to Go 1.6
|
||||
if [ "$IAMSTATIC" == "true" ] ; then
|
||||
export EXTLDFLAGS_STATIC="$EXTLDFLAGS_STATIC -zmuldefs"
|
||||
export LDFLAGS_STATIC_DOCKER="$LDFLAGS_STATIC -extldflags \"$EXTLDFLAGS_STATIC\""
|
||||
|
||||
else
|
||||
export LDFLAGS="$LDFLAGS -extldflags -zmuldefs"
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
if [ "$IAMSTATIC" == "true" ] && [ "$(go env GOHOSTOS)" == "linux" ]; then
|
||||
if [ "${GOOS}/${GOARCH}" == "darwin/amd64" ]; then
|
||||
export CGO_ENABLED=1
|
||||
export CC=o64-clang
|
||||
export LDFLAGS='-linkmode external -s'
|
||||
export LDFLAGS_STATIC_DOCKER='-extld='${CC}
|
||||
else
|
||||
export BUILDFLAGS=( "${BUILDFLAGS[@]/pkcs11 /}" ) # we cannot dlopen in pkcs11 in a static binary
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Building: $DEST/$BINARY_FULLNAME"
|
||||
go build \
|
||||
-o "$DEST/$BINARY_FULLNAME" \
|
||||
"${BUILDFLAGS[@]}" ${BUILDTAGS:+-tags "${BUILDTAGS}"} \
|
||||
-ldflags "
|
||||
$LDFLAGS
|
||||
$LDFLAGS_STATIC_DOCKER
|
||||
" \
|
||||
$SOURCE_PATH
|
||||
)
|
||||
|
||||
echo "Created binary: $DEST/$BINARY_FULLNAME"
|
||||
ln -sf "$BINARY_FULLNAME" "$DEST/$BINARY_SHORT_NAME$BINARY_EXTENSION"
|
||||
|
||||
hash_files "$DEST/$BINARY_FULLNAME"
|
5
vendor/github.com/containers/storage/hack/make/.binary-setup
generated
vendored
5
vendor/github.com/containers/storage/hack/make/.binary-setup
generated
vendored
|
@ -1,5 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
DOCKER_CLIENT_BINARY_NAME='docker'
|
||||
DOCKER_DAEMON_BINARY_NAME='dockerd'
|
||||
DOCKER_PROXY_BINARY_NAME='docker-proxy'
|
1
vendor/github.com/containers/storage/hack/make/.build-deb/compat
generated
vendored
1
vendor/github.com/containers/storage/hack/make/.build-deb/compat
generated
vendored
|
@ -1 +0,0 @@
|
|||
9
|
29
vendor/github.com/containers/storage/hack/make/.build-deb/control
generated
vendored
29
vendor/github.com/containers/storage/hack/make/.build-deb/control
generated
vendored
|
@ -1,29 +0,0 @@
|
|||
Source: docker-engine
|
||||
Section: admin
|
||||
Priority: optional
|
||||
Maintainer: Docker <support@docker.com>
|
||||
Standards-Version: 3.9.6
|
||||
Homepage: https://dockerproject.org
|
||||
Vcs-Browser: https://github.com/docker/docker
|
||||
Vcs-Git: git://github.com/docker/docker.git
|
||||
|
||||
Package: docker-engine
|
||||
Architecture: linux-any
|
||||
Depends: iptables, ${misc:Depends}, ${perl:Depends}, ${shlibs:Depends}
|
||||
Recommends: aufs-tools,
|
||||
ca-certificates,
|
||||
cgroupfs-mount | cgroup-lite,
|
||||
git,
|
||||
xz-utils,
|
||||
${apparmor:Recommends}
|
||||
Conflicts: docker (<< 1.5~), docker.io, lxc-docker, lxc-docker-virtual-package, docker-engine-cs
|
||||
Description: Docker: the open-source application container engine
|
||||
Docker is an open source project to build, ship and run any application as a
|
||||
lightweight container
|
||||
.
|
||||
Docker containers are both hardware-agnostic and platform-agnostic. This means
|
||||
they can run anywhere, from your laptop to the largest EC2 compute instance and
|
||||
everything in between - and they don't require you to use a particular
|
||||
language, framework or packaging system. That makes them great building blocks
|
||||
for deploying and scaling web apps, databases, and backend services without
|
||||
depending on a particular stack or provider.
|
|
@ -1 +0,0 @@
|
|||
contrib/completion/bash/docker
|
12
vendor/github.com/containers/storage/hack/make/.build-deb/docker-engine.install
generated
vendored
12
vendor/github.com/containers/storage/hack/make/.build-deb/docker-engine.install
generated
vendored
|
@ -1,12 +0,0 @@
|
|||
#contrib/syntax/vim/doc/* /usr/share/vim/vimfiles/doc/
|
||||
#contrib/syntax/vim/ftdetect/* /usr/share/vim/vimfiles/ftdetect/
|
||||
#contrib/syntax/vim/syntax/* /usr/share/vim/vimfiles/syntax/
|
||||
contrib/*-integration usr/share/docker-engine/contrib/
|
||||
contrib/check-config.sh usr/share/docker-engine/contrib/
|
||||
contrib/completion/fish/docker.fish usr/share/fish/vendor_completions.d/
|
||||
contrib/completion/zsh/_docker usr/share/zsh/vendor-completions/
|
||||
contrib/init/systemd/docker.service lib/systemd/system/
|
||||
contrib/init/systemd/docker.socket lib/systemd/system/
|
||||
contrib/mk* usr/share/docker-engine/contrib/
|
||||
contrib/nuke-graph-directory.sh usr/share/docker-engine/contrib/
|
||||
contrib/syntax/nano/Dockerfile.nanorc usr/share/nano/
|
1
vendor/github.com/containers/storage/hack/make/.build-deb/docker-engine.manpages
generated
vendored
1
vendor/github.com/containers/storage/hack/make/.build-deb/docker-engine.manpages
generated
vendored
|
@ -1 +0,0 @@
|
|||
man/man*/*
|
20
vendor/github.com/containers/storage/hack/make/.build-deb/docker-engine.postinst
generated
vendored
20
vendor/github.com/containers/storage/hack/make/.build-deb/docker-engine.postinst
generated
vendored
|
@ -1,20 +0,0 @@
|
|||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
case "$1" in
|
||||
configure)
|
||||
if [ -z "$2" ]; then
|
||||
if ! getent group docker > /dev/null; then
|
||||
groupadd --system docker
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
abort-*)
|
||||
# How'd we get here??
|
||||
exit 1
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
#DEBHELPER#
|
1
vendor/github.com/containers/storage/hack/make/.build-deb/docs
generated
vendored
1
vendor/github.com/containers/storage/hack/make/.build-deb/docs
generated
vendored
|
@ -1 +0,0 @@
|
|||
README.md
|
54
vendor/github.com/containers/storage/hack/make/.build-deb/rules
generated
vendored
54
vendor/github.com/containers/storage/hack/make/.build-deb/rules
generated
vendored
|
@ -1,54 +0,0 @@
|
|||
#!/usr/bin/make -f
|
||||
|
||||
VERSION = $(shell cat VERSION)
|
||||
SYSTEMD_VERSION := $(shell dpkg-query -W -f='$${Version}\n' systemd | cut -d- -f1)
|
||||
SYSTEMD_GT_227 := $(shell [ '$(SYSTEMD_VERSION)' ] && [ '$(SYSTEMD_VERSION)' -gt 227 ] && echo true )
|
||||
|
||||
override_dh_gencontrol:
|
||||
# if we're on Ubuntu, we need to Recommends: apparmor
|
||||
echo 'apparmor:Recommends=$(shell dpkg-vendor --is Ubuntu && echo apparmor)' >> debian/docker-engine.substvars
|
||||
dh_gencontrol
|
||||
|
||||
override_dh_auto_build:
|
||||
./hack/make.sh dynbinary
|
||||
# ./man/md2man-all.sh runs outside the build container (if at all), since we don't have go-md2man here
|
||||
|
||||
override_dh_auto_test:
|
||||
./bundles/$(VERSION)/dynbinary-daemon/dockerd -v
|
||||
./bundles/$(VERSION)/dynbinary-client/docker -v
|
||||
|
||||
override_dh_strip:
|
||||
# Go has lots of problems with stripping, so just don't
|
||||
|
||||
override_dh_auto_install:
|
||||
mkdir -p debian/docker-engine/usr/bin
|
||||
cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary-client/docker)" debian/docker-engine/usr/bin/docker
|
||||
cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary-daemon/dockerd)" debian/docker-engine/usr/bin/dockerd
|
||||
cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary-daemon/docker-proxy)" debian/docker-engine/usr/bin/docker-proxy
|
||||
cp -aT /usr/local/bin/containerd debian/docker-engine/usr/bin/docker-containerd
|
||||
cp -aT /usr/local/bin/containerd-shim debian/docker-engine/usr/bin/docker-containerd-shim
|
||||
cp -aT /usr/local/bin/ctr debian/docker-engine/usr/bin/docker-containerd-ctr
|
||||
cp -aT /usr/local/sbin/runc debian/docker-engine/usr/bin/docker-runc
|
||||
mkdir -p debian/docker-engine/usr/lib/docker
|
||||
|
||||
override_dh_installinit:
|
||||
# use "docker" as our service name, not "docker-engine"
|
||||
dh_installinit --name=docker
|
||||
ifeq (true, $(SYSTEMD_GT_227))
|
||||
$(warning "Setting TasksMax=infinity")
|
||||
sed -i -- 's/#TasksMax=infinity/TasksMax=infinity/' debian/docker-engine/lib/systemd/system/docker.service
|
||||
endif
|
||||
|
||||
override_dh_installudev:
|
||||
# match our existing priority
|
||||
dh_installudev --priority=z80
|
||||
|
||||
override_dh_install:
|
||||
dh_install
|
||||
dh_apparmor --profile-name=docker-engine -pdocker-engine
|
||||
|
||||
override_dh_shlibdeps:
|
||||
dh_shlibdeps --dpkg-shlibdeps-params=--ignore-missing-info
|
||||
|
||||
%:
|
||||
dh $@ --with=bash-completion $(shell command -v dh_systemd_enable > /dev/null 2>&1 && echo --with=systemd)
|
109
vendor/github.com/containers/storage/hack/make/.build-rpm/docker-engine-selinux.spec
generated
vendored
109
vendor/github.com/containers/storage/hack/make/.build-rpm/docker-engine-selinux.spec
generated
vendored
|
@ -1,109 +0,0 @@
|
|||
# Some bits borrowed from the openstack-selinux package
|
||||
Name: docker-engine-selinux
|
||||
Version: %{_version}
|
||||
Release: %{_release}%{?dist}
|
||||
Summary: SELinux Policies for the open-source application container engine
|
||||
BuildArch: noarch
|
||||
Group: Tools/Docker
|
||||
|
||||
License: GPLv2
|
||||
Source: %{name}.tar.gz
|
||||
|
||||
URL: https://dockerproject.org
|
||||
Vendor: Docker
|
||||
Packager: Docker <support@docker.com>
|
||||
|
||||
# Version of SELinux we were using
|
||||
%if 0%{?fedora} == 20
|
||||
%global selinux_policyver 3.12.1-197
|
||||
%endif # fedora 20
|
||||
%if 0%{?fedora} == 21
|
||||
%global selinux_policyver 3.13.1-105
|
||||
%endif # fedora 21
|
||||
%if 0%{?fedora} >= 22
|
||||
%global selinux_policyver 3.13.1-128
|
||||
%endif # fedora 22
|
||||
%if 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7
|
||||
%global selinux_policyver 3.13.1-23
|
||||
%endif # centos,rhel,oraclelinux 7
|
||||
|
||||
%global selinuxtype targeted
|
||||
%global moduletype services
|
||||
%global modulenames docker
|
||||
|
||||
Requires(post): selinux-policy-base >= %{selinux_policyver}, selinux-policy-targeted >= %{selinux_policyver}, policycoreutils, policycoreutils-python libselinux-utils
|
||||
BuildRequires: selinux-policy selinux-policy-devel
|
||||
|
||||
# conflicting packages
|
||||
Conflicts: docker-selinux
|
||||
|
||||
# Usage: _format var format
|
||||
# Expand 'modulenames' into various formats as needed
|
||||
# Format must contain '$x' somewhere to do anything useful
|
||||
%global _format() export %1=""; for x in %{modulenames}; do %1+=%2; %1+=" "; done;
|
||||
|
||||
# Relabel files
|
||||
%global relabel_files() \
|
||||
/sbin/restorecon -R %{_bindir}/docker %{_localstatedir}/run/docker.sock %{_localstatedir}/run/docker.pid %{_sysconfdir}/docker %{_localstatedir}/log/docker %{_localstatedir}/log/lxc %{_localstatedir}/lock/lxc %{_usr}/lib/systemd/system/docker.service /root/.docker &> /dev/null || : \
|
||||
|
||||
%description
|
||||
SELinux policy modules for use with Docker
|
||||
|
||||
%prep
|
||||
%if 0%{?centos} <= 6
|
||||
%setup -n %{name}
|
||||
%else
|
||||
%autosetup -n %{name}
|
||||
%endif
|
||||
|
||||
%build
|
||||
make SHARE="%{_datadir}" TARGETS="%{modulenames}"
|
||||
|
||||
%install
|
||||
|
||||
# Install SELinux interfaces
|
||||
%_format INTERFACES $x.if
|
||||
install -d %{buildroot}%{_datadir}/selinux/devel/include/%{moduletype}
|
||||
install -p -m 644 $INTERFACES %{buildroot}%{_datadir}/selinux/devel/include/%{moduletype}
|
||||
|
||||
# Install policy modules
|
||||
%_format MODULES $x.pp.bz2
|
||||
install -d %{buildroot}%{_datadir}/selinux/packages
|
||||
install -m 0644 $MODULES %{buildroot}%{_datadir}/selinux/packages
|
||||
|
||||
%post
|
||||
#
|
||||
# Install all modules in a single transaction
|
||||
#
|
||||
if [ $1 -eq 1 ]; then
|
||||
%{_sbindir}/setsebool -P -N virt_use_nfs=1 virt_sandbox_use_all_caps=1
|
||||
fi
|
||||
%_format MODULES %{_datadir}/selinux/packages/$x.pp.bz2
|
||||
%{_sbindir}/semodule -n -s %{selinuxtype} -i $MODULES
|
||||
if %{_sbindir}/selinuxenabled ; then
|
||||
%{_sbindir}/load_policy
|
||||
%relabel_files
|
||||
if [ $1 -eq 1 ]; then
|
||||
restorecon -R %{_sharedstatedir}/docker
|
||||
fi
|
||||
fi
|
||||
|
||||
%postun
|
||||
if [ $1 -eq 0 ]; then
|
||||
%{_sbindir}/semodule -n -r %{modulenames} &> /dev/null || :
|
||||
if %{_sbindir}/selinuxenabled ; then
|
||||
%{_sbindir}/load_policy
|
||||
%relabel_files
|
||||
fi
|
||||
fi
|
||||
|
||||
%files
|
||||
%doc LICENSE
|
||||
%defattr(-,root,root,0755)
|
||||
%attr(0644,root,root) %{_datadir}/selinux/packages/*.pp.bz2
|
||||
%attr(0644,root,root) %{_datadir}/selinux/devel/include/%{moduletype}/*.if
|
||||
|
||||
%changelog
|
||||
* Tue Dec 1 2015 Jessica Frazelle <acidburn@docker.com> 1.9.1-1
|
||||
- add licence to rpm
|
||||
- add selinux-policy and docker-engine-selinux rpm
|
244
vendor/github.com/containers/storage/hack/make/.build-rpm/docker-engine.spec
generated
vendored
244
vendor/github.com/containers/storage/hack/make/.build-rpm/docker-engine.spec
generated
vendored
|
@ -1,244 +0,0 @@
|
|||
Name: docker-engine
|
||||
Version: %{_version}
|
||||
Release: %{_release}%{?dist}
|
||||
Summary: The open-source application container engine
|
||||
Group: Tools/Docker
|
||||
|
||||
License: ASL 2.0
|
||||
Source: %{name}.tar.gz
|
||||
|
||||
URL: https://dockerproject.org
|
||||
Vendor: Docker
|
||||
Packager: Docker <support@docker.com>
|
||||
|
||||
# is_systemd conditional
|
||||
%if 0%{?fedora} >= 21 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?suse_version} >= 1210
|
||||
%global is_systemd 1
|
||||
%endif
|
||||
|
||||
# required packages for build
|
||||
# most are already in the container (see contrib/builder/rpm/ARCH/generate.sh)
|
||||
# only require systemd on those systems
|
||||
%if 0%{?is_systemd}
|
||||
%if 0%{?suse_version} >= 1210
|
||||
BuildRequires: systemd-rpm-macros
|
||||
%{?systemd_requires}
|
||||
%else
|
||||
BuildRequires: pkgconfig(systemd)
|
||||
Requires: systemd-units
|
||||
BuildRequires: pkgconfig(libsystemd-journal)
|
||||
%endif
|
||||
%else
|
||||
Requires(post): chkconfig
|
||||
Requires(preun): chkconfig
|
||||
# This is for /sbin/service
|
||||
Requires(preun): initscripts
|
||||
%endif
|
||||
|
||||
# required packages on install
|
||||
Requires: /bin/sh
|
||||
Requires: iptables
|
||||
%if !0%{?suse_version}
|
||||
Requires: libcgroup
|
||||
%else
|
||||
Requires: libcgroup1
|
||||
%endif
|
||||
Requires: tar
|
||||
Requires: xz
|
||||
%if 0%{?fedora} >= 21 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7
|
||||
# Resolves: rhbz#1165615
|
||||
Requires: device-mapper-libs >= 1.02.90-1
|
||||
%endif
|
||||
%if 0%{?oraclelinux} >= 6
|
||||
# Require Oracle Unbreakable Enterprise Kernel R4 and newer device-mapper
|
||||
Requires: kernel-uek >= 4.1
|
||||
Requires: device-mapper >= 1.02.90-2
|
||||
%endif
|
||||
|
||||
# docker-selinux conditional
|
||||
%if 0%{?fedora} >= 20 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7
|
||||
%global with_selinux 1
|
||||
%endif
|
||||
|
||||
# DWZ problem with multiple golang binary, see bug
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=995136#c12
|
||||
%if 0%{?fedora} >= 20 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7
|
||||
%global _dwz_low_mem_die_limit 0
|
||||
%endif
|
||||
|
||||
# start if with_selinux
|
||||
%if 0%{?with_selinux}
|
||||
# Version of SELinux we were using
|
||||
%if 0%{?fedora} == 20
|
||||
%global selinux_policyver 3.12.1-197
|
||||
%endif # fedora 20
|
||||
%if 0%{?fedora} == 21
|
||||
%global selinux_policyver 3.13.1-105
|
||||
%endif # fedora 21
|
||||
%if 0%{?fedora} >= 22
|
||||
%global selinux_policyver 3.13.1-128
|
||||
%endif # fedora 22
|
||||
%if 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7
|
||||
%global selinux_policyver 3.13.1-23
|
||||
%endif # centos,oraclelinux 7
|
||||
%endif # with_selinux
|
||||
|
||||
# RE: rhbz#1195804 - ensure min NVR for selinux-policy
|
||||
%if 0%{?with_selinux}
|
||||
Requires: selinux-policy >= %{selinux_policyver}
|
||||
Requires(pre): %{name}-selinux >= %{version}-%{release}
|
||||
%endif # with_selinux
|
||||
|
||||
# conflicting packages
|
||||
Conflicts: docker
|
||||
Conflicts: docker-io
|
||||
Conflicts: docker-engine-cs
|
||||
|
||||
%description
|
||||
Docker is an open source project to build, ship and run any application as a
|
||||
lightweight container.
|
||||
|
||||
Docker containers are both hardware-agnostic and platform-agnostic. This means
|
||||
they can run anywhere, from your laptop to the largest EC2 compute instance and
|
||||
everything in between - and they don't require you to use a particular
|
||||
language, framework or packaging system. That makes them great building blocks
|
||||
for deploying and scaling web apps, databases, and backend services without
|
||||
depending on a particular stack or provider.
|
||||
|
||||
%prep
|
||||
%if 0%{?centos} <= 6 || 0%{?oraclelinux} <=6
|
||||
%setup -n %{name}
|
||||
%else
|
||||
%autosetup -n %{name}
|
||||
%endif
|
||||
|
||||
%build
|
||||
export DOCKER_GITCOMMIT=%{_gitcommit}
|
||||
./hack/make.sh dynbinary
|
||||
# ./man/md2man-all.sh runs outside the build container (if at all), since we don't have go-md2man here
|
||||
|
||||
%check
|
||||
./bundles/%{_origversion}/dynbinary-client/docker -v
|
||||
./bundles/%{_origversion}/dynbinary-daemon/dockerd -v
|
||||
|
||||
%install
|
||||
# install binary
|
||||
install -d $RPM_BUILD_ROOT/%{_bindir}
|
||||
install -p -m 755 bundles/%{_origversion}/dynbinary-client/docker-%{_origversion} $RPM_BUILD_ROOT/%{_bindir}/docker
|
||||
install -p -m 755 bundles/%{_origversion}/dynbinary-daemon/dockerd-%{_origversion} $RPM_BUILD_ROOT/%{_bindir}/dockerd
|
||||
install -p -m 755 bundles/%{_origversion}/dynbinary-daemon/docker-proxy-%{_origversion} $RPM_BUILD_ROOT/%{_bindir}/docker-proxy
|
||||
|
||||
# install containerd
|
||||
install -p -m 755 /usr/local/bin/containerd $RPM_BUILD_ROOT/%{_bindir}/docker-containerd
|
||||
install -p -m 755 /usr/local/bin/containerd-shim $RPM_BUILD_ROOT/%{_bindir}/docker-containerd-shim
|
||||
install -p -m 755 /usr/local/bin/ctr $RPM_BUILD_ROOT/%{_bindir}/docker-containerd-ctr
|
||||
|
||||
# install runc
|
||||
install -p -m 755 /usr/local/sbin/runc $RPM_BUILD_ROOT/%{_bindir}/docker-runc
|
||||
|
||||
# install udev rules
|
||||
install -d $RPM_BUILD_ROOT/%{_sysconfdir}/udev/rules.d
|
||||
install -p -m 644 contrib/udev/80-docker.rules $RPM_BUILD_ROOT/%{_sysconfdir}/udev/rules.d/80-docker.rules
|
||||
|
||||
# add init scripts
|
||||
install -d $RPM_BUILD_ROOT/etc/sysconfig
|
||||
install -d $RPM_BUILD_ROOT/%{_initddir}
|
||||
|
||||
|
||||
%if 0%{?is_systemd}
|
||||
install -d $RPM_BUILD_ROOT/%{_unitdir}
|
||||
install -p -m 644 contrib/init/systemd/docker.service $RPM_BUILD_ROOT/%{_unitdir}/docker.service
|
||||
install -p -m 644 contrib/init/systemd/docker.socket $RPM_BUILD_ROOT/%{_unitdir}/docker.socket
|
||||
%else
|
||||
install -p -m 644 contrib/init/sysvinit-redhat/docker.sysconfig $RPM_BUILD_ROOT/etc/sysconfig/docker
|
||||
install -p -m 755 contrib/init/sysvinit-redhat/docker $RPM_BUILD_ROOT/%{_initddir}/docker
|
||||
%endif
|
||||
# add bash, zsh, and fish completions
|
||||
install -d $RPM_BUILD_ROOT/usr/share/bash-completion/completions
|
||||
install -d $RPM_BUILD_ROOT/usr/share/zsh/vendor-completions
|
||||
install -d $RPM_BUILD_ROOT/usr/share/fish/vendor_completions.d
|
||||
install -p -m 644 contrib/completion/bash/docker $RPM_BUILD_ROOT/usr/share/bash-completion/completions/docker
|
||||
install -p -m 644 contrib/completion/zsh/_docker $RPM_BUILD_ROOT/usr/share/zsh/vendor-completions/_docker
|
||||
install -p -m 644 contrib/completion/fish/docker.fish $RPM_BUILD_ROOT/usr/share/fish/vendor_completions.d/docker.fish
|
||||
|
||||
# install manpages
|
||||
install -d %{buildroot}%{_mandir}/man1
|
||||
install -p -m 644 man/man1/*.1 $RPM_BUILD_ROOT/%{_mandir}/man1
|
||||
install -d %{buildroot}%{_mandir}/man5
|
||||
install -p -m 644 man/man5/*.5 $RPM_BUILD_ROOT/%{_mandir}/man5
|
||||
install -d %{buildroot}%{_mandir}/man8
|
||||
install -p -m 644 man/man8/*.8 $RPM_BUILD_ROOT/%{_mandir}/man8
|
||||
|
||||
# add vimfiles
|
||||
install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/doc
|
||||
install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/ftdetect
|
||||
install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/syntax
|
||||
install -p -m 644 contrib/syntax/vim/doc/dockerfile.txt $RPM_BUILD_ROOT/usr/share/vim/vimfiles/doc/dockerfile.txt
|
||||
install -p -m 644 contrib/syntax/vim/ftdetect/dockerfile.vim $RPM_BUILD_ROOT/usr/share/vim/vimfiles/ftdetect/dockerfile.vim
|
||||
install -p -m 644 contrib/syntax/vim/syntax/dockerfile.vim $RPM_BUILD_ROOT/usr/share/vim/vimfiles/syntax/dockerfile.vim
|
||||
|
||||
# add nano
|
||||
install -d $RPM_BUILD_ROOT/usr/share/nano
|
||||
install -p -m 644 contrib/syntax/nano/Dockerfile.nanorc $RPM_BUILD_ROOT/usr/share/nano/Dockerfile.nanorc
|
||||
|
||||
# list files owned by the package here
|
||||
%files
|
||||
%doc AUTHORS CHANGELOG.md CONTRIBUTING.md LICENSE MAINTAINERS NOTICE README.md
|
||||
/%{_bindir}/docker
|
||||
/%{_bindir}/dockerd
|
||||
/%{_bindir}/docker-containerd
|
||||
/%{_bindir}/docker-containerd-shim
|
||||
/%{_bindir}/docker-containerd-ctr
|
||||
/%{_bindir}/docker-proxy
|
||||
/%{_bindir}/docker-runc
|
||||
/%{_sysconfdir}/udev/rules.d/80-docker.rules
|
||||
%if 0%{?is_systemd}
|
||||
/%{_unitdir}/docker.service
|
||||
/%{_unitdir}/docker.socket
|
||||
%else
|
||||
%config(noreplace,missingok) /etc/sysconfig/docker
|
||||
/%{_initddir}/docker
|
||||
%endif
|
||||
/usr/share/bash-completion/completions/docker
|
||||
/usr/share/zsh/vendor-completions/_docker
|
||||
/usr/share/fish/vendor_completions.d/docker.fish
|
||||
%doc
|
||||
/%{_mandir}/man1/*
|
||||
/%{_mandir}/man5/*
|
||||
/%{_mandir}/man8/*
|
||||
/usr/share/vim/vimfiles/doc/dockerfile.txt
|
||||
/usr/share/vim/vimfiles/ftdetect/dockerfile.vim
|
||||
/usr/share/vim/vimfiles/syntax/dockerfile.vim
|
||||
/usr/share/nano/Dockerfile.nanorc
|
||||
|
||||
%post
|
||||
%if 0%{?is_systemd}
|
||||
%systemd_post docker
|
||||
%else
|
||||
# This adds the proper /etc/rc*.d links for the script
|
||||
/sbin/chkconfig --add docker
|
||||
%endif
|
||||
if ! getent group docker > /dev/null; then
|
||||
groupadd --system docker
|
||||
fi
|
||||
|
||||
%preun
|
||||
%if 0%{?is_systemd}
|
||||
%systemd_preun docker
|
||||
%else
|
||||
if [ $1 -eq 0 ] ; then
|
||||
/sbin/service docker stop >/dev/null 2>&1
|
||||
/sbin/chkconfig --del docker
|
||||
fi
|
||||
%endif
|
||||
|
||||
%postun
|
||||
%if 0%{?is_systemd}
|
||||
%systemd_postun_with_restart docker
|
||||
%else
|
||||
if [ "$1" -ge "1" ] ; then
|
||||
/sbin/service docker condrestart >/dev/null 2>&1 || :
|
||||
fi
|
||||
%endif
|
||||
|
||||
%changelog
|
66
vendor/github.com/containers/storage/hack/make/.detect-daemon-osarch
generated
vendored
66
vendor/github.com/containers/storage/hack/make/.detect-daemon-osarch
generated
vendored
|
@ -1,66 +0,0 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
docker-version-osarch() {
|
||||
local target="$1" # "Client" or "Server"
|
||||
local fmtStr="{{.${target}.Os}}/{{.${target}.Arch}}"
|
||||
if docker version -f "$fmtStr" 2>/dev/null; then
|
||||
# if "docker version -f" works, let's just use that!
|
||||
return
|
||||
fi
|
||||
docker version | awk '
|
||||
$1 ~ /^(Client|Server):$/ { section = 0 }
|
||||
$1 == "'"$target"':" { section = 1; next }
|
||||
section && $1 == "OS/Arch:" { print $2 }
|
||||
|
||||
# old versions of Docker
|
||||
$1 == "OS/Arch" && $2 == "('"${target,,}"'):" { print $3 }
|
||||
'
|
||||
}
|
||||
|
||||
# Retrieve OS/ARCH of docker daemon, eg. linux/amd64
|
||||
export DOCKER_ENGINE_OSARCH="$(docker-version-osarch 'Server')"
|
||||
export DOCKER_ENGINE_GOOS="${DOCKER_ENGINE_OSARCH%/*}"
|
||||
export DOCKER_ENGINE_GOARCH="${DOCKER_ENGINE_OSARCH##*/}"
|
||||
DOCKER_ENGINE_GOARCH=${DOCKER_ENGINE_GOARCH:=amd64}
|
||||
|
||||
# and the client, just in case
|
||||
export DOCKER_CLIENT_OSARCH="$(docker-version-osarch 'Client')"
|
||||
export DOCKER_CLIENT_GOOS="${DOCKER_CLIENT_OSARCH%/*}"
|
||||
export DOCKER_CLIENT_GOARCH="${DOCKER_CLIENT_OSARCH##*/}"
|
||||
DOCKER_CLIENT_GOARCH=${DOCKER_CLIENT_GOARCH:=amd64}
|
||||
|
||||
# Retrieve the architecture used in contrib/builder/(deb|rpm)/$PACKAGE_ARCH/
|
||||
PACKAGE_ARCH='amd64'
|
||||
case "${DOCKER_ENGINE_GOARCH:-$DOCKER_CLIENT_GOARCH}" in
|
||||
arm)
|
||||
PACKAGE_ARCH='armhf'
|
||||
;;
|
||||
arm64)
|
||||
PACKAGE_ARCH='aarch64'
|
||||
;;
|
||||
amd64|ppc64le|s390x)
|
||||
PACKAGE_ARCH="${DOCKER_ENGINE_GOARCH:-$DOCKER_CLIENT_GOARCH}"
|
||||
;;
|
||||
*)
|
||||
echo >&2 "warning: not sure how to convert '$DOCKER_ENGINE_GOARCH' to a 'Docker' arch, assuming '$PACKAGE_ARCH'"
|
||||
;;
|
||||
esac
|
||||
export PACKAGE_ARCH
|
||||
|
||||
DOCKERFILE='Dockerfile'
|
||||
TEST_IMAGE_NAMESPACE=
|
||||
case "$PACKAGE_ARCH" in
|
||||
amd64)
|
||||
case "${DOCKER_ENGINE_GOOS:-$DOCKER_CLIENT_GOOS}" in
|
||||
windows)
|
||||
DOCKERFILE='Dockerfile.windows'
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
DOCKERFILE="Dockerfile.$PACKAGE_ARCH"
|
||||
TEST_IMAGE_NAMESPACE="$PACKAGE_ARCH"
|
||||
;;
|
||||
esac
|
||||
export DOCKERFILE TEST_IMAGE_NAMESPACE
|
23
vendor/github.com/containers/storage/hack/make/.ensure-emptyfs
generated
vendored
23
vendor/github.com/containers/storage/hack/make/.ensure-emptyfs
generated
vendored
|
@ -1,23 +0,0 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
if ! docker inspect emptyfs &> /dev/null; then
|
||||
# let's build a "docker save" tarball for "emptyfs"
|
||||
# see https://github.com/docker/docker/pull/5262
|
||||
# and also https://github.com/docker/docker/issues/4242
|
||||
dir="$DEST/emptyfs"
|
||||
mkdir -p "$dir"
|
||||
(
|
||||
cd "$dir"
|
||||
echo '{"emptyfs":{"latest":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158"}}' > repositories
|
||||
mkdir -p 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158
|
||||
(
|
||||
cd 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158
|
||||
echo '{"id":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","comment":"Imported from -","created":"2013-06-13T14:03:50.821769-07:00","container_config":{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":null},"docker_version":"0.4.0","architecture":"x86_64","Size":0}' > json
|
||||
echo '1.0' > VERSION
|
||||
tar -cf layer.tar --files-from /dev/null
|
||||
)
|
||||
)
|
||||
( set -x; tar -cC "$dir" . | docker load )
|
||||
rm -rf "$dir"
|
||||
fi
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue