From 9b88295f692a31cf772f185e5dba642559c81212 Mon Sep 17 00:00:00 2001 From: Nalin Dahyabhai Date: Tue, 22 Nov 2016 14:32:10 -0500 Subject: [PATCH] Update containers/storage and containers/image Update the versions of containers/storage and containers/image, and add new dependencies that they pull in. Signed-off-by: Nalin Dahyabhai --- hack/.vendor-helpers.sh | 2 +- hack/vendor.sh | 14 +- server/image_pull.go | 10 +- .../containers/image/copy/compression.go | 61 + .../github.com/containers/image/copy/copy.go | 568 ++++ .../image/directory/directory_dest.go | 63 +- .../image/directory/directory_src.go | 16 +- .../image/directory/directory_transport.go | 13 +- .../image/docker/daemon/daemon_dest.go | 312 ++ .../image/docker/daemon/daemon_src.go | 361 +++ .../image/docker/daemon/daemon_transport.go | 179 ++ .../image/docker/daemon/daemon_types.go | 53 + .../containers/image/docker/docker_client.go | 269 +- .../containers/image/docker/docker_image.go | 6 +- .../image/docker/docker_image_dest.go | 139 +- .../image/docker/docker_image_src.go | 101 +- .../image/docker/docker_transport.go | 6 +- .../containers/image/docker/lookaside.go | 5 +- .../docker/policyconfiguration/naming.go | 2 +- .../containers/image/docker/reference/doc.go | 6 + .../image/docker/reference/reference.go | 220 ++ .../containers/image/image/docker_list.go | 64 + .../containers/image/image/docker_schema1.go | 327 ++ .../containers/image/image/docker_schema2.go | 299 ++ .../containers/image/image/image.go | 365 --- .../containers/image/image/manifest.go | 121 + .../containers/image/image/memory.go | 74 + .../github.com/containers/image/image/oci.go | 167 ++ .../containers/image/image/sourced.go | 90 + .../containers/image/image/unparsed.go | 83 + .../containers/image/manifest/manifest.go | 35 +- .../containers/image/oci/layout/oci_dest.go | 85 +- .../containers/image/oci/layout/oci_src.go | 94 + .../image/oci/layout/oci_transport.go | 22 +- .../containers/image/openshift/openshift.go | 91 +- .../image/openshift/openshift_transport.go | 14 +- .../containers/image/signature/docker.go | 61 + .../containers/image/signature/json.go | 107 + .../containers/image/signature/mechanism.go | 124 + .../image/signature/policy_config.go | 732 +++++ .../containers/image/signature/policy_eval.go | 287 ++ .../image/signature/policy_eval_baselayer.go | 18 + .../image/signature/policy_eval_signedby.go | 138 + .../image/signature/policy_eval_simple.go | 28 + .../image/signature/policy_reference_match.go | 101 + .../image/signature/policy_types.go | 152 + .../containers/image/signature/signature.go | 192 ++ .../containers/image/storage/storage_image.go | 570 ++++ .../image/storage/storage_reference.go | 127 + .../image/storage/storage_transport.go | 285 ++ .../containers/image/transports/transports.go | 6 + .../containers/image/types/types.go | 135 +- .../src/github.com/containers/storage/LICENSE | 191 ++ .../containers/storage/drivers/aufs/aufs.go | 586 ++++ .../containers/storage/drivers/aufs/dirs.go | 64 + .../containers/storage/drivers/aufs/mount.go | 21 + .../storage/drivers/aufs/mount_linux.go | 7 + .../storage/drivers/aufs/mount_unsupported.go | 12 + .../containers/storage/drivers/btrfs/btrfs.go | 520 ++++ .../drivers/btrfs/dummy_unsupported.go | 3 + .../storage/drivers/btrfs/version.go | 26 + .../storage/drivers/btrfs/version_none.go | 14 + .../containers/storage/drivers/counter.go | 67 + .../storage/drivers/devmapper/deviceset.go | 2661 +++++++++++++++++ .../drivers/devmapper/devmapper_doc.go | 106 + .../storage/drivers/devmapper/driver.go | 226 ++ .../storage/drivers/devmapper/mount.go | 89 + .../containers/storage/drivers/driver.go | 243 ++ .../storage/drivers/driver_freebsd.go | 19 + .../storage/drivers/driver_linux.go | 133 + .../storage/drivers/driver_solaris.go | 65 + .../storage/drivers/driver_unsupported.go | 15 + .../storage/drivers/driver_windows.go | 14 + .../containers/storage/drivers/fsdiff.go | 157 + .../storage/drivers/overlay/copy.go | 169 ++ .../storage/drivers/overlay/overlay.go | 450 +++ .../drivers/overlay/overlay_unsupported.go | 3 + .../storage/drivers/overlay2/mount.go | 88 + .../storage/drivers/overlay2/overlay.go | 514 ++++ .../drivers/overlay2/overlay_unsupported.go | 3 + .../storage/drivers/overlay2/randomid.go | 80 + .../containers/storage/drivers/plugin.go | 32 + .../storage/drivers/plugin_unsupported.go | 7 + .../containers/storage/drivers/proxy.go | 225 ++ .../storage/drivers/register/register_aufs.go | 8 + .../drivers/register/register_btrfs.go | 8 + .../drivers/register/register_devicemapper.go | 8 + .../drivers/register/register_overlay.go | 9 + .../storage/drivers/register/register_vfs.go | 6 + .../drivers/register/register_windows.go | 6 + .../storage/drivers/register/register_zfs.go | 8 + .../containers/storage/drivers/vfs/driver.go | 145 + .../storage/drivers/zfs/MAINTAINERS | 2 + .../containers/storage/drivers/zfs/zfs.go | 405 +++ .../storage/drivers/zfs/zfs_freebsd.go | 38 + .../storage/drivers/zfs/zfs_linux.go | 27 + .../storage/drivers/zfs/zfs_solaris.go | 59 + .../storage/drivers/zfs/zfs_unsupported.go | 11 + .../containers/storage/pkg/archive/archive.go | 1147 +++++++ .../storage/pkg/archive/archive_linux.go | 91 + .../storage/pkg/archive/archive_other.go | 7 + .../storage/pkg/archive/archive_unix.go | 112 + .../storage/pkg/archive/archive_windows.go | 70 + .../containers/storage/pkg/archive/changes.go | 446 +++ .../storage/pkg/archive/changes_linux.go | 312 ++ .../storage/pkg/archive/changes_other.go | 97 + .../storage/pkg/archive/changes_unix.go | 36 + .../storage/pkg/archive/changes_windows.go | 30 + .../containers/storage/pkg/archive/copy.go | 458 +++ .../storage/pkg/archive/copy_unix.go | 11 + .../storage/pkg/archive/copy_windows.go | 9 + .../containers/storage/pkg/archive/diff.go | 279 ++ .../storage/pkg/archive/example_changes.go | 97 + .../storage/pkg/archive/time_linux.go | 16 + .../storage/pkg/archive/time_unsupported.go | 16 + .../storage/pkg/archive/whiteouts.go | 23 + .../containers/storage/pkg/archive/wrap.go | 59 + .../storage/pkg/chrootarchive/archive.go | 97 + .../storage/pkg/chrootarchive/archive_unix.go | 86 + .../pkg/chrootarchive/archive_windows.go | 22 + .../storage/pkg/chrootarchive/chroot_linux.go | 103 + .../storage/pkg/chrootarchive/chroot_unix.go | 12 + .../storage/pkg/chrootarchive/diff.go | 19 + .../storage/pkg/chrootarchive/diff_unix.go | 120 + .../storage/pkg/chrootarchive/diff_windows.go | 44 + .../storage/pkg/chrootarchive/init_unix.go | 28 + .../storage/pkg/chrootarchive/init_windows.go | 4 + .../storage/pkg/devicemapper/devmapper.go | 807 +++++ .../storage/pkg/devicemapper/devmapper_log.go | 35 + .../pkg/devicemapper/devmapper_wrapper.go | 251 ++ .../devmapper_wrapper_deferred_remove.go | 34 + .../devmapper_wrapper_no_deferred_remove.go | 15 + .../storage/pkg/devicemapper/ioctl.go | 27 + .../storage/pkg/devicemapper/log.go | 11 + .../storage/pkg/directory/directory.go | 26 + .../storage/pkg/directory/directory_unix.go | 48 + .../pkg/directory/directory_windows.go | 37 + .../storage/pkg/fileutils/fileutils.go | 283 ++ .../storage/pkg/fileutils/fileutils_darwin.go | 27 + .../pkg/fileutils/fileutils_solaris.go | 7 + .../storage/pkg/fileutils/fileutils_unix.go | 22 + .../pkg/fileutils/fileutils_windows.go | 7 + .../containers/storage/pkg/homedir/homedir.go | 39 + .../containers/storage/pkg/idtools/idtools.go | 197 ++ .../storage/pkg/idtools/idtools_unix.go | 60 + .../storage/pkg/idtools/idtools_windows.go | 18 + .../storage/pkg/idtools/usergroupadd_linux.go | 188 ++ .../pkg/idtools/usergroupadd_unsupported.go | 12 + .../containers/storage/pkg/ioutils/buffer.go | 51 + .../containers/storage/pkg/ioutils/fmt.go | 22 + .../storage/pkg/ioutils/fswriters.go | 82 + .../storage/pkg/ioutils/multireader.go | 226 ++ .../containers/storage/pkg/ioutils/readers.go | 154 + .../storage/pkg/ioutils/temp_unix.go | 10 + .../storage/pkg/ioutils/temp_windows.go | 18 + .../storage/pkg/ioutils/writeflusher.go | 92 + .../containers/storage/pkg/ioutils/writers.go | 66 + .../storage/pkg/loopback/attach_loopback.go | 137 + .../containers/storage/pkg/loopback/ioctl.go | 53 + .../storage/pkg/loopback/loop_wrapper.go | 52 + .../storage/pkg/loopback/loopback.go | 63 + .../containers/storage/pkg/mflag/LICENSE | 27 + .../containers/storage/pkg/mount/flags.go | 149 + .../storage/pkg/mount/flags_freebsd.go | 48 + .../storage/pkg/mount/flags_linux.go | 85 + .../storage/pkg/mount/flags_unsupported.go | 30 + .../containers/storage/pkg/mount/mount.go | 74 + .../storage/pkg/mount/mounter_freebsd.go | 59 + .../storage/pkg/mount/mounter_linux.go | 21 + .../storage/pkg/mount/mounter_solaris.go | 33 + .../storage/pkg/mount/mounter_unsupported.go | 11 + .../containers/storage/pkg/mount/mountinfo.go | 40 + .../storage/pkg/mount/mountinfo_freebsd.go | 41 + .../storage/pkg/mount/mountinfo_linux.go | 95 + .../storage/pkg/mount/mountinfo_solaris.go | 37 + .../pkg/mount/mountinfo_unsupported.go | 12 + .../storage/pkg/mount/mountinfo_windows.go | 6 + .../storage/pkg/mount/sharedsubtree_linux.go | 69 + .../storage/pkg/parsers/kernel/kernel.go | 74 + .../pkg/parsers/kernel/kernel_darwin.go | 56 + .../storage/pkg/parsers/kernel/kernel_unix.go | 30 + .../pkg/parsers/kernel/kernel_windows.go | 69 + .../storage/pkg/parsers/kernel/uname_linux.go | 19 + .../pkg/parsers/kernel/uname_solaris.go | 14 + .../pkg/parsers/kernel/uname_unsupported.go | 18 + .../containers/storage/pkg/parsers/parsers.go | 69 + .../containers/storage/pkg/pools/pools.go | 119 + .../containers/storage/pkg/promise/promise.go | 11 + .../containers/storage/pkg/random/random.go | 71 + .../storage/pkg/reexec/command_linux.go | 28 + .../storage/pkg/reexec/command_unix.go | 23 + .../storage/pkg/reexec/command_unsupported.go | 12 + .../storage/pkg/reexec/command_windows.go | 23 + .../containers/storage/pkg/reexec/reexec.go | 47 + .../storage/pkg/stringid/stringid.go | 71 + .../containers/storage/pkg/system/chtimes.go | 52 + .../storage/pkg/system/chtimes_unix.go | 14 + .../storage/pkg/system/chtimes_windows.go | 27 + .../containers/storage/pkg/system/errors.go | 10 + .../storage/pkg/system/events_windows.go | 83 + .../containers/storage/pkg/system/filesys.go | 19 + .../storage/pkg/system/filesys_windows.go | 82 + .../containers/storage/pkg/system/lstat.go | 19 + .../storage/pkg/system/lstat_windows.go | 25 + .../containers/storage/pkg/system/meminfo.go | 17 + .../storage/pkg/system/meminfo_linux.go | 65 + .../storage/pkg/system/meminfo_solaris.go | 128 + .../storage/pkg/system/meminfo_unsupported.go | 8 + .../storage/pkg/system/meminfo_windows.go | 44 + .../containers/storage/pkg/system/mknod.go | 22 + .../storage/pkg/system/mknod_windows.go | 13 + .../storage/pkg/system/path_unix.go | 14 + .../storage/pkg/system/path_windows.go | 37 + .../containers/storage/pkg/system/stat.go | 53 + .../storage/pkg/system/stat_freebsd.go | 27 + .../storage/pkg/system/stat_linux.go | 33 + .../storage/pkg/system/stat_openbsd.go | 15 + .../storage/pkg/system/stat_solaris.go | 34 + .../storage/pkg/system/stat_unsupported.go | 17 + .../storage/pkg/system/stat_windows.go | 43 + .../storage/pkg/system/syscall_unix.go | 17 + .../storage/pkg/system/syscall_windows.go | 103 + .../containers/storage/pkg/system/umask.go | 13 + .../storage/pkg/system/umask_windows.go | 9 + .../storage/pkg/system/utimes_darwin.go | 8 + .../storage/pkg/system/utimes_freebsd.go | 22 + .../storage/pkg/system/utimes_linux.go | 26 + .../storage/pkg/system/utimes_unsupported.go | 10 + .../storage/pkg/system/xattrs_linux.go | 63 + .../storage/pkg/system/xattrs_unsupported.go | 13 + .../containers/storage/storage/containers.go | 478 +++ .../containers/storage/storage/images.go | 448 +++ .../containers/storage/storage/layers.go | 881 ++++++ .../containers/storage/storage/lockfile.go | 138 + .../containers/storage/storage/store.go | 2171 ++++++++++++++ .../storage/storageversion/version_lib.go | 13 + .../docker/distribution/CHANGELOG.md | 2 +- .../docker/distribution/RELEASE-CHECKLIST.md | 3 +- .../registry/api/errcode/errors.go | 267 ++ .../registry/api/errcode/handler.go | 44 + .../registry/api/errcode/register.go | 138 + .../registry/api/v2/descriptors.go | 1596 ++++++++++ .../distribution/registry/api/v2/doc.go | 9 + .../distribution/registry/api/v2/errors.go | 136 + .../registry/api/v2/headerparser.go | 161 + .../distribution/registry/api/v2/routes.go | 49 + .../distribution/registry/api/v2/urls.go | 314 ++ .../registry/client/auth/challenge/addr.go | 27 + .../client/auth/challenge/authchallenge.go | 237 ++ .../registry/client/blob_writer.go | 162 + .../distribution/registry/client/errors.go | 139 + .../registry/client/repository.go | 853 ++++++ .../registry/client/transport/http_reader.go | 251 ++ .../registry/client/transport/transport.go | 147 + .../registry/storage/cache/cache.go | 35 + .../cache/cachedblobdescriptorstore.go | 101 + .../registry/storage/cache/memory/memory.go | 179 ++ .../src/github.com/docker/engine-api/LICENSE | 191 ++ .../engine-api/client/checkpoint_create.go | 13 + .../engine-api/client/checkpoint_delete.go | 12 + .../engine-api/client/checkpoint_list.go | 22 + .../docker/engine-api/client/client.go | 153 + .../docker/engine-api/client/client_darwin.go | 4 + .../docker/engine-api/client/client_unix.go | 6 + .../engine-api/client/client_windows.go | 4 + .../engine-api/client/container_attach.go | 34 + .../engine-api/client/container_commit.go | 53 + .../engine-api/client/container_copy.go | 97 + .../engine-api/client/container_create.go | 46 + .../engine-api/client/container_diff.go | 23 + .../engine-api/client/container_exec.go | 49 + .../engine-api/client/container_export.go | 20 + .../engine-api/client/container_inspect.go | 54 + .../engine-api/client/container_kill.go | 17 + .../engine-api/client/container_list.go | 56 + .../engine-api/client/container_logs.go | 52 + .../engine-api/client/container_pause.go | 10 + .../engine-api/client/container_remove.go | 27 + .../engine-api/client/container_rename.go | 16 + .../engine-api/client/container_resize.go | 29 + .../engine-api/client/container_restart.go | 22 + .../engine-api/client/container_start.go | 21 + .../engine-api/client/container_stats.go | 24 + .../engine-api/client/container_stop.go | 21 + .../docker/engine-api/client/container_top.go | 28 + .../engine-api/client/container_unpause.go | 10 + .../engine-api/client/container_update.go | 13 + .../engine-api/client/container_wait.go | 26 + .../docker/engine-api/client/errors.go | 203 ++ .../docker/engine-api/client/events.go | 48 + .../docker/engine-api/client/hijack.go | 174 ++ .../docker/engine-api/client/image_build.go | 119 + .../docker/engine-api/client/image_create.go | 34 + .../docker/engine-api/client/image_history.go | 22 + .../docker/engine-api/client/image_import.go | 37 + .../docker/engine-api/client/image_inspect.go | 38 + .../docker/engine-api/client/image_list.go | 40 + .../docker/engine-api/client/image_load.go | 30 + .../docker/engine-api/client/image_pull.go | 46 + .../docker/engine-api/client/image_push.go | 54 + .../docker/engine-api/client/image_remove.go | 31 + .../docker/engine-api/client/image_save.go | 22 + .../docker/engine-api/client/image_search.go | 51 + .../docker/engine-api/client/image_tag.go | 34 + .../docker/engine-api/client/info.go | 26 + .../docker/engine-api/client/interface.go | 135 + .../client/interface_experimental.go | 37 + .../engine-api/client/interface_stable.go | 11 + .../docker/engine-api/client/login.go | 28 + .../engine-api/client/network_connect.go | 18 + .../engine-api/client/network_create.go | 25 + .../engine-api/client/network_disconnect.go | 14 + .../engine-api/client/network_inspect.go | 38 + .../docker/engine-api/client/network_list.go | 31 + .../engine-api/client/network_remove.go | 10 + .../docker/engine-api/client/node_inspect.go | 33 + .../docker/engine-api/client/node_list.go | 36 + .../docker/engine-api/client/node_remove.go | 10 + .../docker/engine-api/client/node_update.go | 18 + .../engine-api/client/plugin_disable.go | 14 + .../docker/engine-api/client/plugin_enable.go | 14 + .../engine-api/client/plugin_inspect.go | 22 + .../engine-api/client/plugin_install.go | 59 + .../docker/engine-api/client/plugin_list.go | 23 + .../docker/engine-api/client/plugin_push.go | 15 + .../docker/engine-api/client/plugin_remove.go | 14 + .../docker/engine-api/client/plugin_set.go | 14 + .../docker/engine-api/client/request.go | 207 ++ .../engine-api/client/service_create.go | 30 + .../engine-api/client/service_inspect.go | 33 + .../docker/engine-api/client/service_list.go | 35 + .../engine-api/client/service_remove.go | 10 + .../engine-api/client/service_update.go | 30 + .../docker/engine-api/client/swarm_init.go | 21 + .../docker/engine-api/client/swarm_inspect.go | 21 + .../docker/engine-api/client/swarm_join.go | 13 + .../docker/engine-api/client/swarm_leave.go | 18 + .../docker/engine-api/client/swarm_update.go | 21 + .../docker/engine-api/client/task_inspect.go | 34 + .../docker/engine-api/client/task_list.go | 35 + .../client/transport/cancellable/LICENSE | 27 + .../client/transport/cancellable/canceler.go | 23 + .../transport/cancellable/canceler_go14.go | 27 + .../transport/cancellable/cancellable.go | 113 + .../engine-api/client/transport/client.go | 47 + .../engine-api/client/transport/transport.go | 57 + .../docker/engine-api/client/version.go | 21 + .../docker/engine-api/client/volume_create.go | 20 + .../engine-api/client/volume_inspect.go | 38 + .../docker/engine-api/client/volume_list.go | 32 + .../docker/engine-api/client/volume_remove.go | 10 + .../docker/engine-api/types/auth.go | 22 + .../docker/engine-api/types/blkiodev/blkio.go | 23 + .../docker/engine-api/types/client.go | 286 ++ .../docker/engine-api/types/configs.go | 53 + .../engine-api/types/container/config.go | 62 + .../engine-api/types/container/host_config.go | 320 ++ .../types/container/hostconfig_unix.go | 81 + .../types/container/hostconfig_windows.go | 87 + .../docker/engine-api/types/errors.go | 6 + .../docker/engine-api/types/filters/parse.go | 307 ++ .../engine-api/types/network/network.go | 53 + .../docker/engine-api/types/plugin.go | 169 ++ .../types/reference/image_reference.go | 34 + .../engine-api/types/registry/registry.go | 99 + .../docker/engine-api/types/seccomp.go | 73 + .../docker/engine-api/types/stats.go | 115 + .../engine-api/types/strslice/strslice.go | 30 + .../docker/engine-api/types/swarm/common.go | 21 + .../engine-api/types/swarm/container.go | 67 + .../docker/engine-api/types/swarm/network.go | 99 + .../docker/engine-api/types/swarm/node.go | 107 + .../docker/engine-api/types/swarm/service.go | 73 + .../docker/engine-api/types/swarm/swarm.go | 141 + .../docker/engine-api/types/swarm/task.go | 115 + .../engine-api/types/time/duration_convert.go | 12 + .../docker/engine-api/types/time/timestamp.go | 124 + .../docker/engine-api/types/types.go | 515 ++++ .../engine-api/types/versions/README.md | 14 + .../engine-api/types/versions/compare.go | 62 + .../engine-api/types/versions/v1p20/types.go | 40 + .../docker/go-connections/nat/nat.go | 49 +- .../docker/go-connections/nat/parse.go | 1 + .../docker/go-connections/sockets/sockets.go | 16 +- .../go-connections/sockets/sockets_unix.go | 20 + .../go-connections/sockets/sockets_windows.go | 14 + .../go-connections/sockets/tcp_socket.go | 2 +- .../go-connections/tlsconfig/certpool_go17.go | 21 + .../tlsconfig/certpool_other.go | 16 + .../docker/go-connections/tlsconfig/config.go | 7 +- .../github.com/mattn/go-runewidth/.travis.yml | 8 + .../src/github.com/mattn/go-runewidth/LICENSE | 21 + .../github.com/mattn/go-runewidth/README.mkd | 27 + .../mattn/go-runewidth/runewidth.go | 481 +++ .../mattn/go-runewidth/runewidth_js.go | 8 + .../mattn/go-runewidth/runewidth_posix.go | 77 + .../mattn/go-runewidth/runewidth_windows.go | 25 + .../github.com/mistifyio/go-zfs/.gitignore | 1 + .../github.com/mistifyio/go-zfs/.travis.yml | 41 + .../mistifyio/go-zfs/CONTRIBUTING.md | 60 + .../src/github.com/mistifyio/go-zfs/LICENSE | 201 ++ .../src/github.com/mistifyio/go-zfs/README.md | 54 + .../github.com/mistifyio/go-zfs/Vagrantfile | 34 + .../src/github.com/mistifyio/go-zfs/error.go | 18 + .../src/github.com/mistifyio/go-zfs/utils.go | 352 +++ .../mistifyio/go-zfs/utils_notsolaris.go | 17 + .../mistifyio/go-zfs/utils_solaris.go | 17 + vendor/src/github.com/mistifyio/go-zfs/zfs.go | 451 +++ .../src/github.com/mistifyio/go-zfs/zpool.go | 112 + vendor/src/github.com/mtrmac/gpgme/.gitignore | 1 + vendor/src/github.com/mtrmac/gpgme/LICENSE | 12 + vendor/src/github.com/mtrmac/gpgme/README.md | 13 + .../src/github.com/mtrmac/gpgme/callbacks.go | 42 + vendor/src/github.com/mtrmac/gpgme/data.go | 191 ++ vendor/src/github.com/mtrmac/gpgme/go_gpgme.c | 89 + vendor/src/github.com/mtrmac/gpgme/go_gpgme.h | 37 + vendor/src/github.com/mtrmac/gpgme/gpgme.go | 740 +++++ .../image-spec/specs-go/v1/config.go | 38 +- .../specs-go/{ => v1}/descriptor.go | 5 +- .../image-spec/specs-go/v1/layout.go | 28 + .../image-spec/specs-go/v1/manifest.go | 6 +- .../image-spec/specs-go/v1/manifest_list.go | 4 +- .../image-spec/specs-go/v1/mediatype.go | 4 +- .../image-spec/specs-go/version.go | 6 +- .../image-spec/specs-go/versioned.go | 2 +- .../src/github.com/pborman/uuid/.travis.yml | 9 + .../github.com/pborman/uuid/CONTRIBUTING.md | 10 + .../src/github.com/pborman/uuid/CONTRIBUTORS | 1 + vendor/src/github.com/pborman/uuid/LICENSE | 27 + vendor/src/github.com/pborman/uuid/README.md | 13 + vendor/src/github.com/pborman/uuid/dce.go | 84 + vendor/src/github.com/pborman/uuid/doc.go | 8 + vendor/src/github.com/pborman/uuid/hash.go | 53 + vendor/src/github.com/pborman/uuid/json.go | 34 + vendor/src/github.com/pborman/uuid/node.go | 117 + vendor/src/github.com/pborman/uuid/sql.go | 66 + vendor/src/github.com/pborman/uuid/time.go | 132 + vendor/src/github.com/pborman/uuid/util.go | 43 + vendor/src/github.com/pborman/uuid/uuid.go | 201 ++ .../src/github.com/pborman/uuid/version1.go | 41 + .../src/github.com/pborman/uuid/version4.go | 25 + .../src/gopkg.in/cheggaaa/pb.v1/.travis.yml | 7 + vendor/src/gopkg.in/cheggaaa/pb.v1/LICENSE | 12 + vendor/src/gopkg.in/cheggaaa/pb.v1/README.md | 176 ++ vendor/src/gopkg.in/cheggaaa/pb.v1/format.go | 87 + vendor/src/gopkg.in/cheggaaa/pb.v1/pb.go | 444 +++ .../gopkg.in/cheggaaa/pb.v1/pb_appengine.go | 11 + vendor/src/gopkg.in/cheggaaa/pb.v1/pb_nix.go | 8 + .../src/gopkg.in/cheggaaa/pb.v1/pb_solaris.go | 6 + vendor/src/gopkg.in/cheggaaa/pb.v1/pb_win.go | 141 + vendor/src/gopkg.in/cheggaaa/pb.v1/pb_x.go | 110 + vendor/src/gopkg.in/cheggaaa/pb.v1/pool.go | 76 + .../src/gopkg.in/cheggaaa/pb.v1/pool_win.go | 35 + vendor/src/gopkg.in/cheggaaa/pb.v1/pool_x.go | 22 + vendor/src/gopkg.in/cheggaaa/pb.v1/reader.go | 25 + .../src/gopkg.in/cheggaaa/pb.v1/runecount.go | 17 + .../gopkg.in/cheggaaa/pb.v1/termios_bsd.go | 9 + .../gopkg.in/cheggaaa/pb.v1/termios_nix.go | 7 + 458 files changed, 45347 insertions(+), 726 deletions(-) create mode 100644 vendor/src/github.com/containers/image/copy/compression.go create mode 100644 vendor/src/github.com/containers/image/copy/copy.go create mode 100644 vendor/src/github.com/containers/image/docker/daemon/daemon_dest.go create mode 100644 vendor/src/github.com/containers/image/docker/daemon/daemon_src.go create mode 100644 vendor/src/github.com/containers/image/docker/daemon/daemon_transport.go create mode 100644 vendor/src/github.com/containers/image/docker/daemon/daemon_types.go create mode 100644 vendor/src/github.com/containers/image/docker/reference/doc.go create mode 100644 vendor/src/github.com/containers/image/docker/reference/reference.go create mode 100644 vendor/src/github.com/containers/image/image/docker_list.go create mode 100644 vendor/src/github.com/containers/image/image/docker_schema1.go create mode 100644 vendor/src/github.com/containers/image/image/docker_schema2.go delete mode 100644 vendor/src/github.com/containers/image/image/image.go create mode 100644 vendor/src/github.com/containers/image/image/manifest.go create mode 100644 vendor/src/github.com/containers/image/image/memory.go create mode 100644 vendor/src/github.com/containers/image/image/oci.go create mode 100644 vendor/src/github.com/containers/image/image/sourced.go create mode 100644 vendor/src/github.com/containers/image/image/unparsed.go create mode 100644 vendor/src/github.com/containers/image/oci/layout/oci_src.go create mode 100644 vendor/src/github.com/containers/image/signature/docker.go create mode 100644 vendor/src/github.com/containers/image/signature/json.go create mode 100644 vendor/src/github.com/containers/image/signature/mechanism.go create mode 100644 vendor/src/github.com/containers/image/signature/policy_config.go create mode 100644 vendor/src/github.com/containers/image/signature/policy_eval.go create mode 100644 vendor/src/github.com/containers/image/signature/policy_eval_baselayer.go create mode 100644 vendor/src/github.com/containers/image/signature/policy_eval_signedby.go create mode 100644 vendor/src/github.com/containers/image/signature/policy_eval_simple.go create mode 100644 vendor/src/github.com/containers/image/signature/policy_reference_match.go create mode 100644 vendor/src/github.com/containers/image/signature/policy_types.go create mode 100644 vendor/src/github.com/containers/image/signature/signature.go create mode 100644 vendor/src/github.com/containers/image/storage/storage_image.go create mode 100644 vendor/src/github.com/containers/image/storage/storage_reference.go create mode 100644 vendor/src/github.com/containers/image/storage/storage_transport.go create mode 100644 vendor/src/github.com/containers/storage/LICENSE create mode 100644 vendor/src/github.com/containers/storage/drivers/aufs/aufs.go create mode 100644 vendor/src/github.com/containers/storage/drivers/aufs/dirs.go create mode 100644 vendor/src/github.com/containers/storage/drivers/aufs/mount.go create mode 100644 vendor/src/github.com/containers/storage/drivers/aufs/mount_linux.go create mode 100644 vendor/src/github.com/containers/storage/drivers/aufs/mount_unsupported.go create mode 100644 vendor/src/github.com/containers/storage/drivers/btrfs/btrfs.go create mode 100644 vendor/src/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go create mode 100644 vendor/src/github.com/containers/storage/drivers/btrfs/version.go create mode 100644 vendor/src/github.com/containers/storage/drivers/btrfs/version_none.go create mode 100644 vendor/src/github.com/containers/storage/drivers/counter.go create mode 100644 vendor/src/github.com/containers/storage/drivers/devmapper/deviceset.go create mode 100644 vendor/src/github.com/containers/storage/drivers/devmapper/devmapper_doc.go create mode 100644 vendor/src/github.com/containers/storage/drivers/devmapper/driver.go create mode 100644 vendor/src/github.com/containers/storage/drivers/devmapper/mount.go create mode 100644 vendor/src/github.com/containers/storage/drivers/driver.go create mode 100644 vendor/src/github.com/containers/storage/drivers/driver_freebsd.go create mode 100644 vendor/src/github.com/containers/storage/drivers/driver_linux.go create mode 100644 vendor/src/github.com/containers/storage/drivers/driver_solaris.go create mode 100644 vendor/src/github.com/containers/storage/drivers/driver_unsupported.go create mode 100644 vendor/src/github.com/containers/storage/drivers/driver_windows.go create mode 100644 vendor/src/github.com/containers/storage/drivers/fsdiff.go create mode 100644 vendor/src/github.com/containers/storage/drivers/overlay/copy.go create mode 100644 vendor/src/github.com/containers/storage/drivers/overlay/overlay.go create mode 100644 vendor/src/github.com/containers/storage/drivers/overlay/overlay_unsupported.go create mode 100644 vendor/src/github.com/containers/storage/drivers/overlay2/mount.go create mode 100644 vendor/src/github.com/containers/storage/drivers/overlay2/overlay.go create mode 100644 vendor/src/github.com/containers/storage/drivers/overlay2/overlay_unsupported.go create mode 100644 vendor/src/github.com/containers/storage/drivers/overlay2/randomid.go create mode 100644 vendor/src/github.com/containers/storage/drivers/plugin.go create mode 100644 vendor/src/github.com/containers/storage/drivers/plugin_unsupported.go create mode 100644 vendor/src/github.com/containers/storage/drivers/proxy.go create mode 100644 vendor/src/github.com/containers/storage/drivers/register/register_aufs.go create mode 100644 vendor/src/github.com/containers/storage/drivers/register/register_btrfs.go create mode 100644 vendor/src/github.com/containers/storage/drivers/register/register_devicemapper.go create mode 100644 vendor/src/github.com/containers/storage/drivers/register/register_overlay.go create mode 100644 vendor/src/github.com/containers/storage/drivers/register/register_vfs.go create mode 100644 vendor/src/github.com/containers/storage/drivers/register/register_windows.go create mode 100644 vendor/src/github.com/containers/storage/drivers/register/register_zfs.go create mode 100644 vendor/src/github.com/containers/storage/drivers/vfs/driver.go create mode 100644 vendor/src/github.com/containers/storage/drivers/zfs/MAINTAINERS create mode 100644 vendor/src/github.com/containers/storage/drivers/zfs/zfs.go create mode 100644 vendor/src/github.com/containers/storage/drivers/zfs/zfs_freebsd.go create mode 100644 vendor/src/github.com/containers/storage/drivers/zfs/zfs_linux.go create mode 100644 vendor/src/github.com/containers/storage/drivers/zfs/zfs_solaris.go create mode 100644 vendor/src/github.com/containers/storage/drivers/zfs/zfs_unsupported.go create mode 100644 vendor/src/github.com/containers/storage/pkg/archive/archive.go create mode 100644 vendor/src/github.com/containers/storage/pkg/archive/archive_linux.go create mode 100644 vendor/src/github.com/containers/storage/pkg/archive/archive_other.go create mode 100644 vendor/src/github.com/containers/storage/pkg/archive/archive_unix.go create mode 100644 vendor/src/github.com/containers/storage/pkg/archive/archive_windows.go create mode 100644 vendor/src/github.com/containers/storage/pkg/archive/changes.go create mode 100644 vendor/src/github.com/containers/storage/pkg/archive/changes_linux.go create mode 100644 vendor/src/github.com/containers/storage/pkg/archive/changes_other.go create mode 100644 vendor/src/github.com/containers/storage/pkg/archive/changes_unix.go create mode 100644 vendor/src/github.com/containers/storage/pkg/archive/changes_windows.go create mode 100644 vendor/src/github.com/containers/storage/pkg/archive/copy.go create mode 100644 vendor/src/github.com/containers/storage/pkg/archive/copy_unix.go create mode 100644 vendor/src/github.com/containers/storage/pkg/archive/copy_windows.go create mode 100644 vendor/src/github.com/containers/storage/pkg/archive/diff.go create mode 100644 vendor/src/github.com/containers/storage/pkg/archive/example_changes.go create mode 100644 vendor/src/github.com/containers/storage/pkg/archive/time_linux.go create mode 100644 vendor/src/github.com/containers/storage/pkg/archive/time_unsupported.go create mode 100644 vendor/src/github.com/containers/storage/pkg/archive/whiteouts.go create mode 100644 vendor/src/github.com/containers/storage/pkg/archive/wrap.go create mode 100644 vendor/src/github.com/containers/storage/pkg/chrootarchive/archive.go create mode 100644 vendor/src/github.com/containers/storage/pkg/chrootarchive/archive_unix.go create mode 100644 vendor/src/github.com/containers/storage/pkg/chrootarchive/archive_windows.go create mode 100644 vendor/src/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go create mode 100644 vendor/src/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go create mode 100644 vendor/src/github.com/containers/storage/pkg/chrootarchive/diff.go create mode 100644 vendor/src/github.com/containers/storage/pkg/chrootarchive/diff_unix.go create mode 100644 vendor/src/github.com/containers/storage/pkg/chrootarchive/diff_windows.go create mode 100644 vendor/src/github.com/containers/storage/pkg/chrootarchive/init_unix.go create mode 100644 vendor/src/github.com/containers/storage/pkg/chrootarchive/init_windows.go create mode 100644 vendor/src/github.com/containers/storage/pkg/devicemapper/devmapper.go create mode 100644 vendor/src/github.com/containers/storage/pkg/devicemapper/devmapper_log.go create mode 100644 vendor/src/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go create mode 100644 vendor/src/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go create mode 100644 vendor/src/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go create mode 100644 vendor/src/github.com/containers/storage/pkg/devicemapper/ioctl.go create mode 100644 vendor/src/github.com/containers/storage/pkg/devicemapper/log.go create mode 100644 vendor/src/github.com/containers/storage/pkg/directory/directory.go create mode 100644 vendor/src/github.com/containers/storage/pkg/directory/directory_unix.go create mode 100644 vendor/src/github.com/containers/storage/pkg/directory/directory_windows.go create mode 100644 vendor/src/github.com/containers/storage/pkg/fileutils/fileutils.go create mode 100644 vendor/src/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go create mode 100644 vendor/src/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go create mode 100644 vendor/src/github.com/containers/storage/pkg/fileutils/fileutils_unix.go create mode 100644 vendor/src/github.com/containers/storage/pkg/fileutils/fileutils_windows.go create mode 100644 vendor/src/github.com/containers/storage/pkg/homedir/homedir.go create mode 100644 vendor/src/github.com/containers/storage/pkg/idtools/idtools.go create mode 100644 vendor/src/github.com/containers/storage/pkg/idtools/idtools_unix.go create mode 100644 vendor/src/github.com/containers/storage/pkg/idtools/idtools_windows.go create mode 100644 vendor/src/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go create mode 100644 vendor/src/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go create mode 100644 vendor/src/github.com/containers/storage/pkg/ioutils/buffer.go create mode 100644 vendor/src/github.com/containers/storage/pkg/ioutils/fmt.go create mode 100644 vendor/src/github.com/containers/storage/pkg/ioutils/fswriters.go create mode 100644 vendor/src/github.com/containers/storage/pkg/ioutils/multireader.go create mode 100644 vendor/src/github.com/containers/storage/pkg/ioutils/readers.go create mode 100644 vendor/src/github.com/containers/storage/pkg/ioutils/temp_unix.go create mode 100644 vendor/src/github.com/containers/storage/pkg/ioutils/temp_windows.go create mode 100644 vendor/src/github.com/containers/storage/pkg/ioutils/writeflusher.go create mode 100644 vendor/src/github.com/containers/storage/pkg/ioutils/writers.go create mode 100644 vendor/src/github.com/containers/storage/pkg/loopback/attach_loopback.go create mode 100644 vendor/src/github.com/containers/storage/pkg/loopback/ioctl.go create mode 100644 vendor/src/github.com/containers/storage/pkg/loopback/loop_wrapper.go create mode 100644 vendor/src/github.com/containers/storage/pkg/loopback/loopback.go create mode 100644 vendor/src/github.com/containers/storage/pkg/mflag/LICENSE create mode 100644 vendor/src/github.com/containers/storage/pkg/mount/flags.go create mode 100644 vendor/src/github.com/containers/storage/pkg/mount/flags_freebsd.go create mode 100644 vendor/src/github.com/containers/storage/pkg/mount/flags_linux.go create mode 100644 vendor/src/github.com/containers/storage/pkg/mount/flags_unsupported.go create mode 100644 vendor/src/github.com/containers/storage/pkg/mount/mount.go create mode 100644 vendor/src/github.com/containers/storage/pkg/mount/mounter_freebsd.go create mode 100644 vendor/src/github.com/containers/storage/pkg/mount/mounter_linux.go create mode 100644 vendor/src/github.com/containers/storage/pkg/mount/mounter_solaris.go create mode 100644 vendor/src/github.com/containers/storage/pkg/mount/mounter_unsupported.go create mode 100644 vendor/src/github.com/containers/storage/pkg/mount/mountinfo.go create mode 100644 vendor/src/github.com/containers/storage/pkg/mount/mountinfo_freebsd.go create mode 100644 vendor/src/github.com/containers/storage/pkg/mount/mountinfo_linux.go create mode 100644 vendor/src/github.com/containers/storage/pkg/mount/mountinfo_solaris.go create mode 100644 vendor/src/github.com/containers/storage/pkg/mount/mountinfo_unsupported.go create mode 100644 vendor/src/github.com/containers/storage/pkg/mount/mountinfo_windows.go create mode 100644 vendor/src/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go create mode 100644 vendor/src/github.com/containers/storage/pkg/parsers/kernel/kernel.go create mode 100644 vendor/src/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go create mode 100644 vendor/src/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go create mode 100644 vendor/src/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go create mode 100644 vendor/src/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go create mode 100644 vendor/src/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go create mode 100644 vendor/src/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go create mode 100644 vendor/src/github.com/containers/storage/pkg/parsers/parsers.go create mode 100644 vendor/src/github.com/containers/storage/pkg/pools/pools.go create mode 100644 vendor/src/github.com/containers/storage/pkg/promise/promise.go create mode 100644 vendor/src/github.com/containers/storage/pkg/random/random.go create mode 100644 vendor/src/github.com/containers/storage/pkg/reexec/command_linux.go create mode 100644 vendor/src/github.com/containers/storage/pkg/reexec/command_unix.go create mode 100644 vendor/src/github.com/containers/storage/pkg/reexec/command_unsupported.go create mode 100644 vendor/src/github.com/containers/storage/pkg/reexec/command_windows.go create mode 100644 vendor/src/github.com/containers/storage/pkg/reexec/reexec.go create mode 100644 vendor/src/github.com/containers/storage/pkg/stringid/stringid.go create mode 100644 vendor/src/github.com/containers/storage/pkg/system/chtimes.go create mode 100644 vendor/src/github.com/containers/storage/pkg/system/chtimes_unix.go create mode 100644 vendor/src/github.com/containers/storage/pkg/system/chtimes_windows.go create mode 100644 vendor/src/github.com/containers/storage/pkg/system/errors.go create mode 100644 vendor/src/github.com/containers/storage/pkg/system/events_windows.go create mode 100644 vendor/src/github.com/containers/storage/pkg/system/filesys.go create mode 100644 vendor/src/github.com/containers/storage/pkg/system/filesys_windows.go create mode 100644 vendor/src/github.com/containers/storage/pkg/system/lstat.go create mode 100644 vendor/src/github.com/containers/storage/pkg/system/lstat_windows.go create mode 100644 vendor/src/github.com/containers/storage/pkg/system/meminfo.go create mode 100644 vendor/src/github.com/containers/storage/pkg/system/meminfo_linux.go create mode 100644 vendor/src/github.com/containers/storage/pkg/system/meminfo_solaris.go create mode 100644 vendor/src/github.com/containers/storage/pkg/system/meminfo_unsupported.go create mode 100644 vendor/src/github.com/containers/storage/pkg/system/meminfo_windows.go create mode 100644 vendor/src/github.com/containers/storage/pkg/system/mknod.go create mode 100644 vendor/src/github.com/containers/storage/pkg/system/mknod_windows.go create mode 100644 vendor/src/github.com/containers/storage/pkg/system/path_unix.go create mode 100644 vendor/src/github.com/containers/storage/pkg/system/path_windows.go create mode 100644 vendor/src/github.com/containers/storage/pkg/system/stat.go create mode 100644 vendor/src/github.com/containers/storage/pkg/system/stat_freebsd.go create mode 100644 vendor/src/github.com/containers/storage/pkg/system/stat_linux.go create mode 100644 vendor/src/github.com/containers/storage/pkg/system/stat_openbsd.go create mode 100644 vendor/src/github.com/containers/storage/pkg/system/stat_solaris.go create mode 100644 vendor/src/github.com/containers/storage/pkg/system/stat_unsupported.go create mode 100644 vendor/src/github.com/containers/storage/pkg/system/stat_windows.go create mode 100644 vendor/src/github.com/containers/storage/pkg/system/syscall_unix.go create mode 100644 vendor/src/github.com/containers/storage/pkg/system/syscall_windows.go create mode 100644 vendor/src/github.com/containers/storage/pkg/system/umask.go create mode 100644 vendor/src/github.com/containers/storage/pkg/system/umask_windows.go create mode 100644 vendor/src/github.com/containers/storage/pkg/system/utimes_darwin.go create mode 100644 vendor/src/github.com/containers/storage/pkg/system/utimes_freebsd.go create mode 100644 vendor/src/github.com/containers/storage/pkg/system/utimes_linux.go create mode 100644 vendor/src/github.com/containers/storage/pkg/system/utimes_unsupported.go create mode 100644 vendor/src/github.com/containers/storage/pkg/system/xattrs_linux.go create mode 100644 vendor/src/github.com/containers/storage/pkg/system/xattrs_unsupported.go create mode 100644 vendor/src/github.com/containers/storage/storage/containers.go create mode 100644 vendor/src/github.com/containers/storage/storage/images.go create mode 100644 vendor/src/github.com/containers/storage/storage/layers.go create mode 100644 vendor/src/github.com/containers/storage/storage/lockfile.go create mode 100644 vendor/src/github.com/containers/storage/storage/store.go create mode 100644 vendor/src/github.com/containers/storage/storageversion/version_lib.go create mode 100644 vendor/src/github.com/docker/distribution/registry/api/errcode/errors.go create mode 100644 vendor/src/github.com/docker/distribution/registry/api/errcode/handler.go create mode 100644 vendor/src/github.com/docker/distribution/registry/api/errcode/register.go create mode 100644 vendor/src/github.com/docker/distribution/registry/api/v2/descriptors.go create mode 100644 vendor/src/github.com/docker/distribution/registry/api/v2/doc.go create mode 100644 vendor/src/github.com/docker/distribution/registry/api/v2/errors.go create mode 100644 vendor/src/github.com/docker/distribution/registry/api/v2/headerparser.go create mode 100644 vendor/src/github.com/docker/distribution/registry/api/v2/routes.go create mode 100644 vendor/src/github.com/docker/distribution/registry/api/v2/urls.go create mode 100644 vendor/src/github.com/docker/distribution/registry/client/auth/challenge/addr.go create mode 100644 vendor/src/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go create mode 100644 vendor/src/github.com/docker/distribution/registry/client/blob_writer.go create mode 100644 vendor/src/github.com/docker/distribution/registry/client/errors.go create mode 100644 vendor/src/github.com/docker/distribution/registry/client/repository.go create mode 100644 vendor/src/github.com/docker/distribution/registry/client/transport/http_reader.go create mode 100644 vendor/src/github.com/docker/distribution/registry/client/transport/transport.go create mode 100644 vendor/src/github.com/docker/distribution/registry/storage/cache/cache.go create mode 100644 vendor/src/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go create mode 100644 vendor/src/github.com/docker/distribution/registry/storage/cache/memory/memory.go create mode 100644 vendor/src/github.com/docker/engine-api/LICENSE create mode 100644 vendor/src/github.com/docker/engine-api/client/checkpoint_create.go create mode 100644 vendor/src/github.com/docker/engine-api/client/checkpoint_delete.go create mode 100644 vendor/src/github.com/docker/engine-api/client/checkpoint_list.go create mode 100644 vendor/src/github.com/docker/engine-api/client/client.go create mode 100644 vendor/src/github.com/docker/engine-api/client/client_darwin.go create mode 100644 vendor/src/github.com/docker/engine-api/client/client_unix.go create mode 100644 vendor/src/github.com/docker/engine-api/client/client_windows.go create mode 100644 vendor/src/github.com/docker/engine-api/client/container_attach.go create mode 100644 vendor/src/github.com/docker/engine-api/client/container_commit.go create mode 100644 vendor/src/github.com/docker/engine-api/client/container_copy.go create mode 100644 vendor/src/github.com/docker/engine-api/client/container_create.go create mode 100644 vendor/src/github.com/docker/engine-api/client/container_diff.go create mode 100644 vendor/src/github.com/docker/engine-api/client/container_exec.go create mode 100644 vendor/src/github.com/docker/engine-api/client/container_export.go create mode 100644 vendor/src/github.com/docker/engine-api/client/container_inspect.go create mode 100644 vendor/src/github.com/docker/engine-api/client/container_kill.go create mode 100644 vendor/src/github.com/docker/engine-api/client/container_list.go create mode 100644 vendor/src/github.com/docker/engine-api/client/container_logs.go create mode 100644 vendor/src/github.com/docker/engine-api/client/container_pause.go create mode 100644 vendor/src/github.com/docker/engine-api/client/container_remove.go create mode 100644 vendor/src/github.com/docker/engine-api/client/container_rename.go create mode 100644 vendor/src/github.com/docker/engine-api/client/container_resize.go create mode 100644 vendor/src/github.com/docker/engine-api/client/container_restart.go create mode 100644 vendor/src/github.com/docker/engine-api/client/container_start.go create mode 100644 vendor/src/github.com/docker/engine-api/client/container_stats.go create mode 100644 vendor/src/github.com/docker/engine-api/client/container_stop.go create mode 100644 vendor/src/github.com/docker/engine-api/client/container_top.go create mode 100644 vendor/src/github.com/docker/engine-api/client/container_unpause.go create mode 100644 vendor/src/github.com/docker/engine-api/client/container_update.go create mode 100644 vendor/src/github.com/docker/engine-api/client/container_wait.go create mode 100644 vendor/src/github.com/docker/engine-api/client/errors.go create mode 100644 vendor/src/github.com/docker/engine-api/client/events.go create mode 100644 vendor/src/github.com/docker/engine-api/client/hijack.go create mode 100644 vendor/src/github.com/docker/engine-api/client/image_build.go create mode 100644 vendor/src/github.com/docker/engine-api/client/image_create.go create mode 100644 vendor/src/github.com/docker/engine-api/client/image_history.go create mode 100644 vendor/src/github.com/docker/engine-api/client/image_import.go create mode 100644 vendor/src/github.com/docker/engine-api/client/image_inspect.go create mode 100644 vendor/src/github.com/docker/engine-api/client/image_list.go create mode 100644 vendor/src/github.com/docker/engine-api/client/image_load.go create mode 100644 vendor/src/github.com/docker/engine-api/client/image_pull.go create mode 100644 vendor/src/github.com/docker/engine-api/client/image_push.go create mode 100644 vendor/src/github.com/docker/engine-api/client/image_remove.go create mode 100644 vendor/src/github.com/docker/engine-api/client/image_save.go create mode 100644 vendor/src/github.com/docker/engine-api/client/image_search.go create mode 100644 vendor/src/github.com/docker/engine-api/client/image_tag.go create mode 100644 vendor/src/github.com/docker/engine-api/client/info.go create mode 100644 vendor/src/github.com/docker/engine-api/client/interface.go create mode 100644 vendor/src/github.com/docker/engine-api/client/interface_experimental.go create mode 100644 vendor/src/github.com/docker/engine-api/client/interface_stable.go create mode 100644 vendor/src/github.com/docker/engine-api/client/login.go create mode 100644 vendor/src/github.com/docker/engine-api/client/network_connect.go create mode 100644 vendor/src/github.com/docker/engine-api/client/network_create.go create mode 100644 vendor/src/github.com/docker/engine-api/client/network_disconnect.go create mode 100644 vendor/src/github.com/docker/engine-api/client/network_inspect.go create mode 100644 vendor/src/github.com/docker/engine-api/client/network_list.go create mode 100644 vendor/src/github.com/docker/engine-api/client/network_remove.go create mode 100644 vendor/src/github.com/docker/engine-api/client/node_inspect.go create mode 100644 vendor/src/github.com/docker/engine-api/client/node_list.go create mode 100644 vendor/src/github.com/docker/engine-api/client/node_remove.go create mode 100644 vendor/src/github.com/docker/engine-api/client/node_update.go create mode 100644 vendor/src/github.com/docker/engine-api/client/plugin_disable.go create mode 100644 vendor/src/github.com/docker/engine-api/client/plugin_enable.go create mode 100644 vendor/src/github.com/docker/engine-api/client/plugin_inspect.go create mode 100644 vendor/src/github.com/docker/engine-api/client/plugin_install.go create mode 100644 vendor/src/github.com/docker/engine-api/client/plugin_list.go create mode 100644 vendor/src/github.com/docker/engine-api/client/plugin_push.go create mode 100644 vendor/src/github.com/docker/engine-api/client/plugin_remove.go create mode 100644 vendor/src/github.com/docker/engine-api/client/plugin_set.go create mode 100644 vendor/src/github.com/docker/engine-api/client/request.go create mode 100644 vendor/src/github.com/docker/engine-api/client/service_create.go create mode 100644 vendor/src/github.com/docker/engine-api/client/service_inspect.go create mode 100644 vendor/src/github.com/docker/engine-api/client/service_list.go create mode 100644 vendor/src/github.com/docker/engine-api/client/service_remove.go create mode 100644 vendor/src/github.com/docker/engine-api/client/service_update.go create mode 100644 vendor/src/github.com/docker/engine-api/client/swarm_init.go create mode 100644 vendor/src/github.com/docker/engine-api/client/swarm_inspect.go create mode 100644 vendor/src/github.com/docker/engine-api/client/swarm_join.go create mode 100644 vendor/src/github.com/docker/engine-api/client/swarm_leave.go create mode 100644 vendor/src/github.com/docker/engine-api/client/swarm_update.go create mode 100644 vendor/src/github.com/docker/engine-api/client/task_inspect.go create mode 100644 vendor/src/github.com/docker/engine-api/client/task_list.go create mode 100644 vendor/src/github.com/docker/engine-api/client/transport/cancellable/LICENSE create mode 100644 vendor/src/github.com/docker/engine-api/client/transport/cancellable/canceler.go create mode 100644 vendor/src/github.com/docker/engine-api/client/transport/cancellable/canceler_go14.go create mode 100644 vendor/src/github.com/docker/engine-api/client/transport/cancellable/cancellable.go create mode 100644 vendor/src/github.com/docker/engine-api/client/transport/client.go create mode 100644 vendor/src/github.com/docker/engine-api/client/transport/transport.go create mode 100644 vendor/src/github.com/docker/engine-api/client/version.go create mode 100644 vendor/src/github.com/docker/engine-api/client/volume_create.go create mode 100644 vendor/src/github.com/docker/engine-api/client/volume_inspect.go create mode 100644 vendor/src/github.com/docker/engine-api/client/volume_list.go create mode 100644 vendor/src/github.com/docker/engine-api/client/volume_remove.go create mode 100644 vendor/src/github.com/docker/engine-api/types/auth.go create mode 100644 vendor/src/github.com/docker/engine-api/types/blkiodev/blkio.go create mode 100644 vendor/src/github.com/docker/engine-api/types/client.go create mode 100644 vendor/src/github.com/docker/engine-api/types/configs.go create mode 100644 vendor/src/github.com/docker/engine-api/types/container/config.go create mode 100644 vendor/src/github.com/docker/engine-api/types/container/host_config.go create mode 100644 vendor/src/github.com/docker/engine-api/types/container/hostconfig_unix.go create mode 100644 vendor/src/github.com/docker/engine-api/types/container/hostconfig_windows.go create mode 100644 vendor/src/github.com/docker/engine-api/types/errors.go create mode 100644 vendor/src/github.com/docker/engine-api/types/filters/parse.go create mode 100644 vendor/src/github.com/docker/engine-api/types/network/network.go create mode 100644 vendor/src/github.com/docker/engine-api/types/plugin.go create mode 100644 vendor/src/github.com/docker/engine-api/types/reference/image_reference.go create mode 100644 vendor/src/github.com/docker/engine-api/types/registry/registry.go create mode 100644 vendor/src/github.com/docker/engine-api/types/seccomp.go create mode 100644 vendor/src/github.com/docker/engine-api/types/stats.go create mode 100644 vendor/src/github.com/docker/engine-api/types/strslice/strslice.go create mode 100644 vendor/src/github.com/docker/engine-api/types/swarm/common.go create mode 100644 vendor/src/github.com/docker/engine-api/types/swarm/container.go create mode 100644 vendor/src/github.com/docker/engine-api/types/swarm/network.go create mode 100644 vendor/src/github.com/docker/engine-api/types/swarm/node.go create mode 100644 vendor/src/github.com/docker/engine-api/types/swarm/service.go create mode 100644 vendor/src/github.com/docker/engine-api/types/swarm/swarm.go create mode 100644 vendor/src/github.com/docker/engine-api/types/swarm/task.go create mode 100644 vendor/src/github.com/docker/engine-api/types/time/duration_convert.go create mode 100644 vendor/src/github.com/docker/engine-api/types/time/timestamp.go create mode 100644 vendor/src/github.com/docker/engine-api/types/types.go create mode 100644 vendor/src/github.com/docker/engine-api/types/versions/README.md create mode 100644 vendor/src/github.com/docker/engine-api/types/versions/compare.go create mode 100644 vendor/src/github.com/docker/engine-api/types/versions/v1p20/types.go create mode 100644 vendor/src/github.com/docker/go-connections/tlsconfig/certpool_go17.go create mode 100644 vendor/src/github.com/docker/go-connections/tlsconfig/certpool_other.go create mode 100644 vendor/src/github.com/mattn/go-runewidth/.travis.yml create mode 100644 vendor/src/github.com/mattn/go-runewidth/LICENSE create mode 100644 vendor/src/github.com/mattn/go-runewidth/README.mkd create mode 100644 vendor/src/github.com/mattn/go-runewidth/runewidth.go create mode 100644 vendor/src/github.com/mattn/go-runewidth/runewidth_js.go create mode 100644 vendor/src/github.com/mattn/go-runewidth/runewidth_posix.go create mode 100644 vendor/src/github.com/mattn/go-runewidth/runewidth_windows.go create mode 100644 vendor/src/github.com/mistifyio/go-zfs/.gitignore create mode 100644 vendor/src/github.com/mistifyio/go-zfs/.travis.yml create mode 100644 vendor/src/github.com/mistifyio/go-zfs/CONTRIBUTING.md create mode 100644 vendor/src/github.com/mistifyio/go-zfs/LICENSE create mode 100644 vendor/src/github.com/mistifyio/go-zfs/README.md create mode 100644 vendor/src/github.com/mistifyio/go-zfs/Vagrantfile create mode 100644 vendor/src/github.com/mistifyio/go-zfs/error.go create mode 100644 vendor/src/github.com/mistifyio/go-zfs/utils.go create mode 100644 vendor/src/github.com/mistifyio/go-zfs/utils_notsolaris.go create mode 100644 vendor/src/github.com/mistifyio/go-zfs/utils_solaris.go create mode 100644 vendor/src/github.com/mistifyio/go-zfs/zfs.go create mode 100644 vendor/src/github.com/mistifyio/go-zfs/zpool.go create mode 100644 vendor/src/github.com/mtrmac/gpgme/.gitignore create mode 100644 vendor/src/github.com/mtrmac/gpgme/LICENSE create mode 100644 vendor/src/github.com/mtrmac/gpgme/README.md create mode 100644 vendor/src/github.com/mtrmac/gpgme/callbacks.go create mode 100644 vendor/src/github.com/mtrmac/gpgme/data.go create mode 100644 vendor/src/github.com/mtrmac/gpgme/go_gpgme.c create mode 100644 vendor/src/github.com/mtrmac/gpgme/go_gpgme.h create mode 100644 vendor/src/github.com/mtrmac/gpgme/gpgme.go rename vendor/src/github.com/opencontainers/image-spec/specs-go/{ => v1}/descriptor.go (88%) create mode 100644 vendor/src/github.com/opencontainers/image-spec/specs-go/v1/layout.go create mode 100644 vendor/src/github.com/pborman/uuid/.travis.yml create mode 100644 vendor/src/github.com/pborman/uuid/CONTRIBUTING.md create mode 100644 vendor/src/github.com/pborman/uuid/CONTRIBUTORS create mode 100644 vendor/src/github.com/pborman/uuid/LICENSE create mode 100644 vendor/src/github.com/pborman/uuid/README.md create mode 100644 vendor/src/github.com/pborman/uuid/dce.go create mode 100644 vendor/src/github.com/pborman/uuid/doc.go create mode 100644 vendor/src/github.com/pborman/uuid/hash.go create mode 100644 vendor/src/github.com/pborman/uuid/json.go create mode 100644 vendor/src/github.com/pborman/uuid/node.go create mode 100644 vendor/src/github.com/pborman/uuid/sql.go create mode 100644 vendor/src/github.com/pborman/uuid/time.go create mode 100644 vendor/src/github.com/pborman/uuid/util.go create mode 100644 vendor/src/github.com/pborman/uuid/uuid.go create mode 100644 vendor/src/github.com/pborman/uuid/version1.go create mode 100644 vendor/src/github.com/pborman/uuid/version4.go create mode 100644 vendor/src/gopkg.in/cheggaaa/pb.v1/.travis.yml create mode 100644 vendor/src/gopkg.in/cheggaaa/pb.v1/LICENSE create mode 100644 vendor/src/gopkg.in/cheggaaa/pb.v1/README.md create mode 100644 vendor/src/gopkg.in/cheggaaa/pb.v1/format.go create mode 100644 vendor/src/gopkg.in/cheggaaa/pb.v1/pb.go create mode 100644 vendor/src/gopkg.in/cheggaaa/pb.v1/pb_appengine.go create mode 100644 vendor/src/gopkg.in/cheggaaa/pb.v1/pb_nix.go create mode 100644 vendor/src/gopkg.in/cheggaaa/pb.v1/pb_solaris.go create mode 100644 vendor/src/gopkg.in/cheggaaa/pb.v1/pb_win.go create mode 100644 vendor/src/gopkg.in/cheggaaa/pb.v1/pb_x.go create mode 100644 vendor/src/gopkg.in/cheggaaa/pb.v1/pool.go create mode 100644 vendor/src/gopkg.in/cheggaaa/pb.v1/pool_win.go create mode 100644 vendor/src/gopkg.in/cheggaaa/pb.v1/pool_x.go create mode 100644 vendor/src/gopkg.in/cheggaaa/pb.v1/reader.go create mode 100644 vendor/src/gopkg.in/cheggaaa/pb.v1/runecount.go create mode 100644 vendor/src/gopkg.in/cheggaaa/pb.v1/termios_bsd.go create mode 100644 vendor/src/gopkg.in/cheggaaa/pb.v1/termios_nix.go diff --git a/hack/.vendor-helpers.sh b/hack/.vendor-helpers.sh index 44ca62d2..d5086de9 100644 --- a/hack/.vendor-helpers.sh +++ b/hack/.vendor-helpers.sh @@ -60,7 +60,7 @@ clean() { local packages=($(GOPATH=$original_GOPATH go list -e ./... | grep -v "^${PROJECT}/vendor")) local platforms=( linux/amd64 linux/386 ) - local buildTags=( ) + local buildTags=( seccomp ) echo diff --git a/hack/vendor.sh b/hack/vendor.sh index 2f39b6b9..37ba3c4a 100755 --- a/hack/vendor.sh +++ b/hack/vendor.sh @@ -49,7 +49,17 @@ esac clone git github.com/coreos/go-systemd master clone git github.com/BurntSushi/toml v0.2.0 clone git github.com/Sirupsen/logrus v0.10.0 -clone git github.com/containers/image f6f11ab5cf8b1e70ef4aa3f8b6fdb4b671d16abd + +clone git github.com/containers/storage master +clone git github.com/mistifyio/go-zfs master +clone git github.com/pborman/uuid master + +clone git github.com/containers/image master +clone git github.com/mtrmac/gpgme master +clone git gopkg.in/cheggaaa/pb.v1 master +clone git github.com/mattn/go-runewidth master +clone git github.com/docker/engine-api v0.4.0 + clone git github.com/opencontainers/image-spec master clone git golang.org/x/net 991d3e32f76f19ee6d9caadb3a22eae8d23315f7 https://github.com/golang/net.git clone git github.com/docker/docker master @@ -64,7 +74,7 @@ clone git github.com/opencontainers/runtime-spec bb6925ea99f0e366a3f7d1c975f6577 clone git github.com/docker/distribution d22e09a6686c32be8c17b684b639da4b90efe320 clone git github.com/vbatts/tar-split v0.10.1 clone git github.com/docker/go-units f2145db703495b2e525c59662db69a7344b00bb8 -clone git github.com/docker/go-connections 988efe982fdecb46f01d53465878ff1f2ff411ce +clone git github.com/docker/go-connections 4ccf312bf1d35e5dbda654e57a9be4c3f3cd0366 clone git github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a clone git github.com/ghodss/yaml 73d445a93680fa1a78ae23a5839bad48f32ba1ee clone git gopkg.in/yaml.v2 d466437aa4adc35830964cffc5b5f262c63ddcb4 diff --git a/server/image_pull.go b/server/image_pull.go index 96ca09b5..3a0aa65c 100644 --- a/server/image_pull.go +++ b/server/image_pull.go @@ -35,11 +35,15 @@ func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.P if err != nil { return nil, err } - i := image.FromSource(src) - blobs, err := i.BlobDigests() + i, err := image.FromSource(src) if err != nil { return nil, err } + blobs := i.LayerInfos() + config := i.ConfigInfo() + if config.Digest != "" { + blobs = append(blobs, config) + } if err = os.Mkdir(filepath.Join(s.config.ImageDir, tr.StringWithinTransport()), 0755); err != nil { return nil, err @@ -61,7 +65,7 @@ func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.P if err != nil { return nil, err } - if _, _, err = dest.PutBlob(r, b, -1); err != nil { + if _, err = dest.PutBlob(r, b); err != nil { r.Close() return nil, err } diff --git a/vendor/src/github.com/containers/image/copy/compression.go b/vendor/src/github.com/containers/image/copy/compression.go new file mode 100644 index 00000000..820c7e99 --- /dev/null +++ b/vendor/src/github.com/containers/image/copy/compression.go @@ -0,0 +1,61 @@ +package copy + +import ( + "bytes" + "compress/bzip2" + "compress/gzip" + "errors" + "io" + + "github.com/Sirupsen/logrus" +) + +// decompressorFunc, given a compressed stream, returns the decompressed stream. +type decompressorFunc func(io.Reader) (io.Reader, error) + +func gzipDecompressor(r io.Reader) (io.Reader, error) { + return gzip.NewReader(r) +} +func bzip2Decompressor(r io.Reader) (io.Reader, error) { + return bzip2.NewReader(r), nil +} +func xzDecompressor(r io.Reader) (io.Reader, error) { + return nil, errors.New("Decompressing xz streams is not supported") +} + +// compressionAlgos is an internal implementation detail of detectCompression +var compressionAlgos = map[string]struct { + prefix []byte + decompressor decompressorFunc +}{ + "gzip": {[]byte{0x1F, 0x8B, 0x08}, gzipDecompressor}, // gzip (RFC 1952) + "bzip2": {[]byte{0x42, 0x5A, 0x68}, bzip2Decompressor}, // bzip2 (decompress.c:BZ2_decompress) + "xz": {[]byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, xzDecompressor}, // xz (/usr/share/doc/xz/xz-file-format.txt) +} + +// detectCompression returns a decompressorFunc if the input is recognized as a compressed format, nil otherwise. +// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning. +func detectCompression(input io.Reader) (decompressorFunc, io.Reader, error) { + buffer := [8]byte{} + + n, err := io.ReadAtLeast(input, buffer[:], len(buffer)) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + // This is a “real” error. We could just ignore it this time, process the data we have, and hope that the source will report the same error again. + // Instead, fail immediately with the original error cause instead of a possibly secondary/misleading error returned later. + return nil, nil, err + } + + var decompressor decompressorFunc + for name, algo := range compressionAlgos { + if bytes.HasPrefix(buffer[:n], algo.prefix) { + logrus.Debugf("Detected compression format %s", name) + decompressor = algo.decompressor + break + } + } + if decompressor == nil { + logrus.Debugf("No compression detected") + } + + return decompressor, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil +} diff --git a/vendor/src/github.com/containers/image/copy/copy.go b/vendor/src/github.com/containers/image/copy/copy.go new file mode 100644 index 00000000..555e378d --- /dev/null +++ b/vendor/src/github.com/containers/image/copy/copy.go @@ -0,0 +1,568 @@ +package copy + +import ( + "bytes" + "compress/gzip" + "errors" + "fmt" + "io" + "io/ioutil" + "reflect" + + pb "gopkg.in/cheggaaa/pb.v1" + + "github.com/Sirupsen/logrus" + "github.com/containers/image/image" + "github.com/containers/image/manifest" + "github.com/containers/image/signature" + "github.com/containers/image/transports" + "github.com/containers/image/types" + "github.com/docker/distribution/digest" +) + +// preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert. +// Prefer v2s2 to v2s1 because v2s2 does not need to be changed when uploading to a different location. +// Include v2s1 signed but not v2s1 unsigned, because docker/distribution requires a signature even if the unsigned MIME type is used. +var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType} + +type digestingReader struct { + source io.Reader + digester digest.Digester + expectedDigest digest.Digest + validationFailed bool +} + +// imageCopier allows us to keep track of diffID values for blobs, and other +// data, that we're copying between images, and cache other information that +// might allow us to take some shortcuts +type imageCopier struct { + copiedBlobs map[digest.Digest]digest.Digest + cachedDiffIDs map[digest.Digest]digest.Digest + manifestUpdates *types.ManifestUpdateOptions + dest types.ImageDestination + src types.Image + rawSource types.ImageSource + diffIDsAreNeeded bool + canModifyManifest bool + reportWriter io.Writer +} + +// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error +// and set validationFailed to true if the source stream does not match expectedDigest. +func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) { + if err := expectedDigest.Validate(); err != nil { + return nil, fmt.Errorf("Invalid digest specification %s", expectedDigest) + } + digestAlgorithm := expectedDigest.Algorithm() + if !digestAlgorithm.Available() { + return nil, fmt.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm) + } + return &digestingReader{ + source: source, + digester: digestAlgorithm.New(), + expectedDigest: expectedDigest, + validationFailed: false, + }, nil +} + +func (d *digestingReader) Read(p []byte) (int, error) { + n, err := d.source.Read(p) + if n > 0 { + if n2, err := d.digester.Hash().Write(p[:n]); n2 != n || err != nil { + // Coverage: This should not happen, the hash.Hash interface requires + // d.digest.Write to never return an error, and the io.Writer interface + // requires n2 == len(input) if no error is returned. + return 0, fmt.Errorf("Error updating digest during verification: %d vs. %d, %v", n2, n, err) + } + } + if err == io.EOF { + actualDigest := d.digester.Digest() + if actualDigest != d.expectedDigest { + d.validationFailed = true + return 0, fmt.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest) + } + } + return n, err +} + +// Options allows supplying non-default configuration modifying the behavior of CopyImage. +type Options struct { + RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature. + SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(), + ReportWriter io.Writer + SourceCtx *types.SystemContext + DestinationCtx *types.SystemContext +} + +// Image copies image from srcRef to destRef, using policyContext to validate source image admissibility. +func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) error { + reportWriter := ioutil.Discard + if options != nil && options.ReportWriter != nil { + reportWriter = options.ReportWriter + } + writeReport := func(f string, a ...interface{}) { + fmt.Fprintf(reportWriter, f, a...) + } + + dest, err := destRef.NewImageDestination(options.DestinationCtx) + if err != nil { + return fmt.Errorf("Error initializing destination %s: %v", transports.ImageName(destRef), err) + } + defer dest.Close() + destSupportedManifestMIMETypes := dest.SupportedManifestMIMETypes() + + rawSource, err := srcRef.NewImageSource(options.SourceCtx, destSupportedManifestMIMETypes) + if err != nil { + return fmt.Errorf("Error initializing source %s: %v", transports.ImageName(srcRef), err) + } + unparsedImage := image.UnparsedFromSource(rawSource) + defer func() { + if unparsedImage != nil { + unparsedImage.Close() + } + }() + + // Please keep this policy check BEFORE reading any other information about the image. + if allowed, err := policyContext.IsRunningImageAllowed(unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so. + return fmt.Errorf("Source image rejected: %v", err) + } + src, err := image.FromUnparsedImage(unparsedImage) + if err != nil { + return fmt.Errorf("Error initializing image from source %s: %v", transports.ImageName(srcRef), err) + } + unparsedImage = nil + defer src.Close() + + if src.IsMultiImage() { + return fmt.Errorf("can not copy %s: manifest contains multiple images", transports.ImageName(srcRef)) + } + + var sigs [][]byte + if options != nil && options.RemoveSignatures { + sigs = [][]byte{} + } else { + writeReport("Getting image source signatures\n") + s, err := src.Signatures() + if err != nil { + return fmt.Errorf("Error reading signatures: %v", err) + } + sigs = s + } + if len(sigs) != 0 { + writeReport("Checking if image destination supports signatures\n") + if err := dest.SupportsSignatures(); err != nil { + return fmt.Errorf("Can not copy signatures: %v", err) + } + } + + canModifyManifest := len(sigs) == 0 + manifestUpdates := types.ManifestUpdateOptions{} + + if err := determineManifestConversion(&manifestUpdates, src, destSupportedManifestMIMETypes, canModifyManifest); err != nil { + return err + } + + // If src.UpdatedImageNeedsLayerDiffIDs(manifestUpdates) will be true, it needs to be true by the time we get here. + ic := imageCopier{ + copiedBlobs: make(map[digest.Digest]digest.Digest), + cachedDiffIDs: make(map[digest.Digest]digest.Digest), + manifestUpdates: &manifestUpdates, + dest: dest, + src: src, + rawSource: rawSource, + diffIDsAreNeeded: src.UpdatedImageNeedsLayerDiffIDs(manifestUpdates), + canModifyManifest: canModifyManifest, + reportWriter: reportWriter, + } + + if err := ic.copyLayers(); err != nil { + return err + } + + pendingImage := src + if !reflect.DeepEqual(manifestUpdates, types.ManifestUpdateOptions{InformationOnly: manifestUpdates.InformationOnly}) { + if !canModifyManifest { + return fmt.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden") + } + manifestUpdates.InformationOnly.Destination = dest + pendingImage, err = src.UpdatedImage(manifestUpdates) + if err != nil { + return fmt.Errorf("Error creating an updated image manifest: %v", err) + } + } + manifest, _, err := pendingImage.Manifest() + if err != nil { + return fmt.Errorf("Error reading manifest: %v", err) + } + + if err := ic.copyConfig(pendingImage); err != nil { + return err + } + + if options != nil && options.SignBy != "" { + mech, err := signature.NewGPGSigningMechanism() + if err != nil { + return fmt.Errorf("Error initializing GPG: %v", err) + } + dockerReference := dest.Reference().DockerReference() + if dockerReference == nil { + return fmt.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(dest.Reference())) + } + + writeReport("Signing manifest\n") + newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, options.SignBy) + if err != nil { + return fmt.Errorf("Error creating signature: %v", err) + } + sigs = append(sigs, newSig) + } + + writeReport("Writing manifest to image destination\n") + if err := dest.PutManifest(manifest); err != nil { + return fmt.Errorf("Error writing manifest: %v", err) + } + + writeReport("Storing signatures\n") + if err := dest.PutSignatures(sigs); err != nil { + return fmt.Errorf("Error writing signatures: %v", err) + } + + if err := dest.Commit(); err != nil { + return fmt.Errorf("Error committing the finished image: %v", err) + } + + return nil +} + +// copyLayers copies layers from src/rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest. +func (ic *imageCopier) copyLayers() error { + srcInfos := ic.src.LayerInfos() + destInfos := []types.BlobInfo{} + diffIDs := []digest.Digest{} + for _, srcLayer := range srcInfos { + var ( + destInfo types.BlobInfo + diffID digest.Digest + err error + ) + if ic.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { + // DiffIDs are, currently, needed only when converting from schema1. + // In which case src.LayerInfos will not have URLs because schema1 + // does not support them. + if ic.diffIDsAreNeeded { + return errors.New("getting DiffID for foreign layers is unimplemented") + } + destInfo = srcLayer + fmt.Fprintf(ic.reportWriter, "Skipping foreign layer %q copy to %s\n", destInfo.Digest, ic.dest.Reference().Transport().Name()) + } else { + destInfo, diffID, err = ic.copyLayer(srcLayer) + if err != nil { + return err + } + } + destInfos = append(destInfos, destInfo) + diffIDs = append(diffIDs, diffID) + } + ic.manifestUpdates.InformationOnly.LayerInfos = destInfos + if ic.diffIDsAreNeeded { + ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs + } + if layerDigestsDiffer(srcInfos, destInfos) { + ic.manifestUpdates.LayerInfos = destInfos + } + return nil +} + +// layerDigestsDiffer return true iff the digests in a and b differ (ignoring sizes and possible other fields) +func layerDigestsDiffer(a, b []types.BlobInfo) bool { + if len(a) != len(b) { + return true + } + for i := range a { + if a[i].Digest != b[i].Digest { + return true + } + } + return false +} + +// copyConfig copies config.json, if any, from src to dest. +func (ic *imageCopier) copyConfig(src types.Image) error { + srcInfo := src.ConfigInfo() + if srcInfo.Digest != "" { + fmt.Fprintf(ic.reportWriter, "Copying config %s\n", srcInfo.Digest) + configBlob, err := src.ConfigBlob() + if err != nil { + return fmt.Errorf("Error reading config blob %s: %v", srcInfo.Digest, err) + } + destInfo, err := ic.copyBlobFromStream(bytes.NewReader(configBlob), srcInfo, nil, false) + if err != nil { + return err + } + if destInfo.Digest != srcInfo.Digest { + return fmt.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest) + } + } + return nil +} + +// diffIDResult contains both a digest value and an error from diffIDComputationGoroutine. +// We could also send the error through the pipeReader, but this more cleanly separates the copying of the layer and the DiffID computation. +type diffIDResult struct { + digest digest.Digest + err error +} + +// copyLayer copies a layer with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress, +// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded +func (ic *imageCopier) copyLayer(srcInfo types.BlobInfo) (types.BlobInfo, digest.Digest, error) { + // Check if we already have a blob with this digest + haveBlob, extantBlobSize, err := ic.dest.HasBlob(srcInfo) + if err != nil && err != types.ErrBlobNotFound { + return types.BlobInfo{}, "", fmt.Errorf("Error checking for blob %s at destination: %v", srcInfo.Digest, err) + } + // If we already have a cached diffID for this blob, we don't need to compute it + diffIDIsNeeded := ic.diffIDsAreNeeded && (ic.cachedDiffIDs[srcInfo.Digest] == "") + // If we already have the blob, and we don't need to recompute the diffID, then we might be able to avoid reading it again + if haveBlob && !diffIDIsNeeded { + // Check the blob sizes match, if we were given a size this time + if srcInfo.Size != -1 && srcInfo.Size != extantBlobSize { + return types.BlobInfo{}, "", fmt.Errorf("Error: blob %s is already present, but with size %d instead of %d", srcInfo.Digest, extantBlobSize, srcInfo.Size) + } + srcInfo.Size = extantBlobSize + // Tell the image destination that this blob's delta is being applied again. For some image destinations, this can be faster than using GetBlob/PutBlob + blobinfo, err := ic.dest.ReapplyBlob(srcInfo) + if err != nil { + return types.BlobInfo{}, "", fmt.Errorf("Error reapplying blob %s at destination: %v", srcInfo.Digest, err) + } + fmt.Fprintf(ic.reportWriter, "Skipping fetch of repeat blob %s\n", srcInfo.Digest) + return blobinfo, ic.cachedDiffIDs[srcInfo.Digest], err + } + + // Fallback: copy the layer, computing the diffID if we need to do so + fmt.Fprintf(ic.reportWriter, "Copying blob %s\n", srcInfo.Digest) + srcStream, srcBlobSize, err := ic.rawSource.GetBlob(srcInfo) + if err != nil { + return types.BlobInfo{}, "", fmt.Errorf("Error reading blob %s: %v", srcInfo.Digest, err) + } + defer srcStream.Close() + + blobInfo, diffIDChan, err := ic.copyLayerFromStream(srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize}, + diffIDIsNeeded) + if err != nil { + return types.BlobInfo{}, "", err + } + var diffIDResult diffIDResult // = {digest:""} + if diffIDIsNeeded { + diffIDResult = <-diffIDChan + if diffIDResult.err != nil { + return types.BlobInfo{}, "", fmt.Errorf("Error computing layer DiffID: %v", diffIDResult.err) + } + logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest) + ic.cachedDiffIDs[srcInfo.Digest] = diffIDResult.digest + } + return blobInfo, diffIDResult.digest, nil +} + +// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope. +// it copies a blob with srcInfo (with known Digest and possibly known Size) from srcStream to dest, +// perhaps compressing the stream if canCompress, +// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller. +func (ic *imageCopier) copyLayerFromStream(srcStream io.Reader, srcInfo types.BlobInfo, + diffIDIsNeeded bool) (types.BlobInfo, <-chan diffIDResult, error) { + var getDiffIDRecorder func(decompressorFunc) io.Writer // = nil + var diffIDChan chan diffIDResult + + err := errors.New("Internal error: unexpected panic in copyLayer") // For pipeWriter.CloseWithError below + if diffIDIsNeeded { + diffIDChan = make(chan diffIDResult, 1) // Buffered, so that sending a value after this or our caller has failed and exited does not block. + pipeReader, pipeWriter := io.Pipe() + defer func() { // Note that this is not the same as {defer pipeWriter.CloseWithError(err)}; we need err to be evaluated lazily. + pipeWriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close() + }() + + getDiffIDRecorder = func(decompressor decompressorFunc) io.Writer { + // If this fails, e.g. because we have exited and due to pipeWriter.CloseWithError() above further + // reading from the pipe has failed, we don’t really care. + // We only read from diffIDChan if the rest of the flow has succeeded, and when we do read from it, + // the return value includes an error indication, which we do check. + // + // If this gets never called, pipeReader will not be used anywhere, but pipeWriter will only be + // closed above, so we are happy enough with both pipeReader and pipeWriter to just get collected by GC. + go diffIDComputationGoroutine(diffIDChan, pipeReader, decompressor) // Closes pipeReader + return pipeWriter + } + } + blobInfo, err := ic.copyBlobFromStream(srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest) // Sets err to nil on success + return blobInfo, diffIDChan, err + // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan +} + +// diffIDComputationGoroutine reads all input from layerStream, uncompresses using decompressor if necessary, and sends its digest, and status, if any, to dest. +func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor decompressorFunc) { + result := diffIDResult{ + digest: "", + err: errors.New("Internal error: unexpected panic in diffIDComputationGoroutine"), + } + defer func() { dest <- result }() + defer layerStream.Close() // We do not care to bother the other end of the pipe with other failures; we send them to dest instead. + + result.digest, result.err = computeDiffID(layerStream, decompressor) +} + +// computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest. +func computeDiffID(stream io.Reader, decompressor decompressorFunc) (digest.Digest, error) { + if decompressor != nil { + s, err := decompressor(stream) + if err != nil { + return "", err + } + stream = s + } + + return digest.Canonical.FromReader(stream) +} + +// copyBlobFromStream copies a blob with srcInfo (with known Digest and possibly known Size) from srcStream to dest, +// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil, +// perhaps compressing it if canCompress, +// and returns a complete blobInfo of the copied blob. +func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.BlobInfo, + getOriginalLayerCopyWriter func(decompressor decompressorFunc) io.Writer, + canCompress bool) (types.BlobInfo, error) { + // The copying happens through a pipeline of connected io.Readers. + // === Input: srcStream + + // === Process input through digestingReader to validate against the expected digest. + // Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader, + // use a separate validation failure indicator. + // Note that we don't use a stronger "validationSucceeded" indicator, because + // dest.PutBlob may detect that the layer already exists, in which case we don't + // read stream to the end, and validation does not happen. + digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest) + if err != nil { + return types.BlobInfo{}, fmt.Errorf("Error preparing to verify blob %s: %v", srcInfo.Digest, err) + } + var destStream io.Reader = digestingReader + + // === Detect compression of the input stream. + // This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by detectCompression. + decompressor, destStream, err := detectCompression(destStream) // We could skip this in some cases, but let's keep the code path uniform + if err != nil { + return types.BlobInfo{}, fmt.Errorf("Error reading blob %s: %v", srcInfo.Digest, err) + } + isCompressed := decompressor != nil + + // === Report progress using a pb.Reader. + bar := pb.New(int(srcInfo.Size)).SetUnits(pb.U_BYTES) + bar.Output = ic.reportWriter + bar.SetMaxWidth(80) + bar.ShowTimeLeft = false + bar.ShowPercent = false + bar.Start() + destStream = bar.NewProxyReader(destStream) + defer fmt.Fprint(ic.reportWriter, "\n") + + // === Send a copy of the original, uncompressed, stream, to a separate path if necessary. + var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so. + if getOriginalLayerCopyWriter != nil { + destStream = io.TeeReader(destStream, getOriginalLayerCopyWriter(decompressor)) + originalLayerReader = destStream + } + + // === Compress the layer if it is uncompressed and compression is desired + var inputInfo types.BlobInfo + if !canCompress || isCompressed || !ic.dest.ShouldCompressLayers() { + logrus.Debugf("Using original blob without modification") + inputInfo = srcInfo + } else { + logrus.Debugf("Compressing blob on the fly") + pipeReader, pipeWriter := io.Pipe() + defer pipeReader.Close() + + // If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise, + // e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed, + // we don’t care. + go compressGoroutine(pipeWriter, destStream) // Closes pipeWriter + destStream = pipeReader + inputInfo.Digest = "" + inputInfo.Size = -1 + } + + // === Finally, send the layer stream to dest. + uploadedInfo, err := ic.dest.PutBlob(destStream, inputInfo) + if err != nil { + return types.BlobInfo{}, fmt.Errorf("Error writing blob: %v", err) + } + + // This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consumer + // all of the input (to compute DiffIDs), even if dest.PutBlob does not need it. + // So, read everything from originalLayerReader, which will cause the rest to be + // sent there if we are not already at EOF. + if getOriginalLayerCopyWriter != nil { + logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter") + _, err := io.Copy(ioutil.Discard, originalLayerReader) + if err != nil { + return types.BlobInfo{}, fmt.Errorf("Error reading input blob %s: %v", srcInfo.Digest, err) + } + } + + if digestingReader.validationFailed { // Coverage: This should never happen. + return types.BlobInfo{}, fmt.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest) + } + if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest { + return types.BlobInfo{}, fmt.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest) + } + return uploadedInfo, nil +} + +// compressGoroutine reads all input from src and writes its compressed equivalent to dest. +func compressGoroutine(dest *io.PipeWriter, src io.Reader) { + err := errors.New("Internal error: unexpected panic in compressGoroutine") + defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily. + dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close() + }() + + zipper := gzip.NewWriter(dest) + defer zipper.Close() + + _, err = io.Copy(zipper, src) // Sets err to nil, i.e. causes dest.Close() +} + +// determineManifestConversion updates manifestUpdates to convert manifest to a supported MIME type, if necessary and canModifyManifest. +// Note that the conversion will only happen later, through src.UpdatedImage +func determineManifestConversion(manifestUpdates *types.ManifestUpdateOptions, src types.Image, destSupportedManifestMIMETypes []string, canModifyManifest bool) error { + if len(destSupportedManifestMIMETypes) == 0 { + return nil // Anything goes + } + supportedByDest := map[string]struct{}{} + for _, t := range destSupportedManifestMIMETypes { + supportedByDest[t] = struct{}{} + } + + _, srcType, err := src.Manifest() + if err != nil { // This should have been cached?! + return fmt.Errorf("Error reading manifest: %v", err) + } + if _, ok := supportedByDest[srcType]; ok { + logrus.Debugf("Manifest MIME type %s is declared supported by the destination", srcType) + return nil + } + + // OK, we should convert the manifest. + if !canModifyManifest { + logrus.Debugf("Manifest MIME type %s is not supported by the destination, but we can't modify the manifest, hoping for the best...") + return nil // Take our chances - FIXME? Or should we fail without trying? + } + + var chosenType = destSupportedManifestMIMETypes[0] // This one is known to be supported. + for _, t := range preferredManifestMIMETypes { + if _, ok := supportedByDest[t]; ok { + chosenType = t + break + } + } + logrus.Debugf("Will convert manifest from MIME type %s to %s", srcType, chosenType) + manifestUpdates.ManifestMIMEType = chosenType + return nil +} diff --git a/vendor/src/github.com/containers/image/directory/directory_dest.go b/vendor/src/github.com/containers/image/directory/directory_dest.go index e5fa3939..2a8d102e 100644 --- a/vendor/src/github.com/containers/image/directory/directory_dest.go +++ b/vendor/src/github.com/containers/image/directory/directory_dest.go @@ -1,14 +1,13 @@ package directory import ( - "crypto/sha256" - "encoding/hex" "fmt" "io" "io/ioutil" "os" "github.com/containers/image/types" + "github.com/docker/distribution/digest" ) type dirImageDestination struct { @@ -40,16 +39,27 @@ func (d *dirImageDestination) SupportsSignatures() error { return nil } -// PutBlob writes contents of stream and returns its computed digest and size. -// A digest can be optionally provided if known, the specific image destination can decide to play with it or not. -// The length of stream is expected to be expectedSize; if expectedSize == -1, it is not known. +// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. +func (d *dirImageDestination) ShouldCompressLayers() bool { + return false +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *dirImageDestination) AcceptsForeignLayerURLs() bool { + return false +} + +// PutBlob writes contents of stream and returns data representing the result (with all data filled in). +// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Size is the expected length of stream, if known. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *dirImageDestination) PutBlob(stream io.Reader, digest string, expectedSize int64) (string, int64, error) { +func (d *dirImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) { blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob") if err != nil { - return "", -1, err + return types.BlobInfo{}, err } succeeded := false defer func() { @@ -59,29 +69,48 @@ func (d *dirImageDestination) PutBlob(stream io.Reader, digest string, expectedS } }() - h := sha256.New() - tee := io.TeeReader(stream, h) + digester := digest.Canonical.New() + tee := io.TeeReader(stream, digester.Hash()) size, err := io.Copy(blobFile, tee) if err != nil { - return "", -1, err + return types.BlobInfo{}, err } - computedDigest := hex.EncodeToString(h.Sum(nil)) - if expectedSize != -1 && size != expectedSize { - return "", -1, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, expectedSize, size) + computedDigest := digester.Digest() + if inputInfo.Size != -1 && size != inputInfo.Size { + return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size) } if err := blobFile.Sync(); err != nil { - return "", -1, err + return types.BlobInfo{}, err } if err := blobFile.Chmod(0644); err != nil { - return "", -1, err + return types.BlobInfo{}, err } blobPath := d.ref.layerPath(computedDigest) if err := os.Rename(blobFile.Name(), blobPath); err != nil { - return "", -1, err + return types.BlobInfo{}, err } succeeded = true - return "sha256:" + computedDigest, size, nil + return types.BlobInfo{Digest: computedDigest, Size: size}, nil +} + +func (d *dirImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) { + if info.Digest == "" { + return false, -1, fmt.Errorf(`"Can not check for a blob with unknown digest`) + } + blobPath := d.ref.layerPath(info.Digest) + finfo, err := os.Stat(blobPath) + if err != nil && os.IsNotExist(err) { + return false, -1, types.ErrBlobNotFound + } + if err != nil { + return false, -1, err + } + return true, finfo.Size(), nil +} + +func (d *dirImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) { + return info, nil } func (d *dirImageDestination) PutManifest(manifest []byte) error { diff --git a/vendor/src/github.com/containers/image/directory/directory_src.go b/vendor/src/github.com/containers/image/directory/directory_src.go index c87b0a3b..8500128d 100644 --- a/vendor/src/github.com/containers/image/directory/directory_src.go +++ b/vendor/src/github.com/containers/image/directory/directory_src.go @@ -1,11 +1,14 @@ package directory import ( + "fmt" "io" "io/ioutil" "os" + "github.com/containers/image/manifest" "github.com/containers/image/types" + "github.com/docker/distribution/digest" ) type dirImageSource struct { @@ -28,18 +31,23 @@ func (s *dirImageSource) Reference() types.ImageReference { func (s *dirImageSource) Close() { } -// it's up to the caller to determine the MIME type of the returned manifest's bytes +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. func (s *dirImageSource) GetManifest() ([]byte, string, error) { m, err := ioutil.ReadFile(s.ref.manifestPath()) if err != nil { return nil, "", err } - return m, "", err + return m, manifest.GuessMIMEType(m), err +} + +func (s *dirImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { + return nil, "", fmt.Errorf(`Getting target manifest not supported by "dir:"`) } // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -func (s *dirImageSource) GetBlob(digest string) (io.ReadCloser, int64, error) { - r, err := os.Open(s.ref.layerPath(digest)) +func (s *dirImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) { + r, err := os.Open(s.ref.layerPath(info.Digest)) if err != nil { return nil, 0, nil } diff --git a/vendor/src/github.com/containers/image/directory/directory_transport.go b/vendor/src/github.com/containers/image/directory/directory_transport.go index 4979572d..7a728e09 100644 --- a/vendor/src/github.com/containers/image/directory/directory_transport.go +++ b/vendor/src/github.com/containers/image/directory/directory_transport.go @@ -7,9 +7,10 @@ import ( "strings" "github.com/containers/image/directory/explicitfilepath" + "github.com/containers/image/docker/reference" "github.com/containers/image/image" "github.com/containers/image/types" - "github.com/docker/docker/reference" + "github.com/docker/distribution/digest" ) // Transport is an ImageTransport for directory paths. @@ -127,11 +128,13 @@ func (ref dirReference) PolicyConfigurationNamespaces() []string { return res } -// NewImage returns a types.Image for this reference. +// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. // The caller must call .Close() on the returned Image. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. func (ref dirReference) NewImage(ctx *types.SystemContext) (types.Image, error) { src := newImageSource(ref) - return image.FromSource(src), nil + return image.FromSource(src) } // NewImageSource returns a types.ImageSource for this reference, @@ -159,9 +162,9 @@ func (ref dirReference) manifestPath() string { } // layerPath returns a path for a layer tarball within a directory using our conventions. -func (ref dirReference) layerPath(digest string) string { +func (ref dirReference) layerPath(digest digest.Digest) string { // FIXME: Should we keep the digest identification? - return filepath.Join(ref.path, strings.TrimPrefix(digest, "sha256:")+".tar") + return filepath.Join(ref.path, digest.Hex()+".tar") } // signaturePath returns a path for a signature within a directory using our conventions. diff --git a/vendor/src/github.com/containers/image/docker/daemon/daemon_dest.go b/vendor/src/github.com/containers/image/docker/daemon/daemon_dest.go new file mode 100644 index 00000000..7c4576dd --- /dev/null +++ b/vendor/src/github.com/containers/image/docker/daemon/daemon_dest.go @@ -0,0 +1,312 @@ +package daemon + +import ( + "archive/tar" + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "time" + + "github.com/Sirupsen/logrus" + "github.com/containers/image/docker/reference" + "github.com/containers/image/manifest" + "github.com/containers/image/types" + "github.com/docker/distribution/digest" + "github.com/docker/engine-api/client" + "golang.org/x/net/context" +) + +type daemonImageDestination struct { + ref daemonReference + namedTaggedRef reference.NamedTagged // Strictly speaking redundant with ref above; having the field makes it structurally impossible for later users to fail. + // For talking to imageLoadGoroutine + goroutineCancel context.CancelFunc + statusChannel <-chan error + writer *io.PipeWriter + tar *tar.Writer + // Other state + committed bool // writer has been closed + blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs +} + +// newImageDestination returns a types.ImageDestination for the specified image reference. +func newImageDestination(systemCtx *types.SystemContext, ref daemonReference) (types.ImageDestination, error) { + if ref.ref == nil { + return nil, fmt.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) + } + namedTaggedRef, ok := ref.ref.(reference.NamedTagged) + if !ok { + return nil, fmt.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) + } + + c, err := client.NewClient(client.DefaultDockerHost, "1.22", nil, nil) // FIXME: overridable host + if err != nil { + return nil, fmt.Errorf("Error initializing docker engine client: %v", err) + } + + reader, writer := io.Pipe() + // Commit() may never be called, so we may never read from this channel; so, make this buffered to allow imageLoadGoroutine to write status and terminate even if we never read it. + statusChannel := make(chan error, 1) + + ctx, goroutineCancel := context.WithCancel(context.Background()) + go imageLoadGoroutine(ctx, c, reader, statusChannel) + + return &daemonImageDestination{ + ref: ref, + namedTaggedRef: namedTaggedRef, + goroutineCancel: goroutineCancel, + statusChannel: statusChannel, + writer: writer, + tar: tar.NewWriter(writer), + committed: false, + blobs: make(map[digest.Digest]types.BlobInfo), + }, nil +} + +// imageLoadGoroutine accepts tar stream on reader, sends it to c, and reports error or success by writing to statusChannel +func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeReader, statusChannel chan<- error) { + err := errors.New("Internal error: unexpected panic in imageLoadGoroutine") + defer func() { + logrus.Debugf("docker-daemon: sending done, status %v", err) + statusChannel <- err + }() + defer func() { + if err == nil { + reader.Close() + } else { + reader.CloseWithError(err) + } + }() + + resp, err := c.ImageLoad(ctx, reader, true) + if err != nil { + err = fmt.Errorf("Error saving image to docker engine: %v", err) + return + } + defer resp.Body.Close() +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *daemonImageDestination) Close() { + if !d.committed { + logrus.Debugf("docker-daemon: Closing tar stream to abort loading") + // In principle, goroutineCancel() should abort the HTTP request and stop the process from continuing. + // In practice, though, https://github.com/docker/engine-api/blob/master/client/transport/cancellable/cancellable.go + // currently just runs the HTTP request to completion in a goroutine, and returns early if the context is canceled + // without terminating the HTTP request at all. So we need this CloseWithError to terminate sending the HTTP request Body + // immediately, and hopefully, through terminating the sending which uses "Transfer-Encoding: chunked"" without sending + // the terminating zero-length chunk, prevent the docker daemon from processing the tar stream at all. + // Whether that works or not, closing the PipeWriter seems desirable in any case. + d.writer.CloseWithError(errors.New("Aborting upload, daemonImageDestination closed without a previous .Commit()")) + } + d.goroutineCancel() +} + +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. +func (d *daemonImageDestination) Reference() types.ImageReference { + return d.ref +} + +// SupportedManifestMIMETypes tells which manifest mime types the destination supports +// If an empty slice or nil it's returned, then any mime type can be tried to upload +func (d *daemonImageDestination) SupportedManifestMIMETypes() []string { + return []string{ + manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities. + } +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (d *daemonImageDestination) SupportsSignatures() error { + return fmt.Errorf("Storing signatures for docker-daemon: destinations is not supported") +} + +// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. +func (d *daemonImageDestination) ShouldCompressLayers() bool { + return false +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *daemonImageDestination) AcceptsForeignLayerURLs() bool { + return false +} + +// PutBlob writes contents of stream and returns data representing the result (with all data filled in). +// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Size is the expected length of stream, if known. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *daemonImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) { + if ok, size, err := d.HasBlob(inputInfo); err == nil && ok { + return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil + } + + if inputInfo.Size == -1 { // Ouch, we need to stream the blob into a temporary file just to determine the size. + logrus.Debugf("docker-daemon: input with unknown size, streaming to disk first…") + streamCopy, err := ioutil.TempFile(temporaryDirectoryForBigFiles, "docker-daemon-blob") + if err != nil { + return types.BlobInfo{}, err + } + defer os.Remove(streamCopy.Name()) + defer streamCopy.Close() + + size, err := io.Copy(streamCopy, stream) + if err != nil { + return types.BlobInfo{}, err + } + _, err = streamCopy.Seek(0, os.SEEK_SET) + if err != nil { + return types.BlobInfo{}, err + } + inputInfo.Size = size // inputInfo is a struct, so we are only modifying our copy. + stream = streamCopy + logrus.Debugf("… streaming done") + } + + digester := digest.Canonical.New() + tee := io.TeeReader(stream, digester.Hash()) + if err := d.sendFile(inputInfo.Digest.String(), inputInfo.Size, tee); err != nil { + return types.BlobInfo{}, err + } + d.blobs[inputInfo.Digest] = types.BlobInfo{Digest: digester.Digest(), Size: inputInfo.Size} + return types.BlobInfo{Digest: digester.Digest(), Size: inputInfo.Size}, nil +} + +func (d *daemonImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) { + if info.Digest == "" { + return false, -1, fmt.Errorf(`"Can not check for a blob with unknown digest`) + } + if blob, ok := d.blobs[info.Digest]; ok { + return true, blob.Size, nil + } + return false, -1, types.ErrBlobNotFound +} + +func (d *daemonImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) { + return info, nil +} + +func (d *daemonImageDestination) PutManifest(m []byte) error { + var man schema2Manifest + if err := json.Unmarshal(m, &man); err != nil { + return fmt.Errorf("Error parsing manifest: %v", err) + } + if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType { + return fmt.Errorf("Unsupported manifest type, need a Docker schema 2 manifest") + } + + layerPaths := []string{} + for _, l := range man.Layers { + layerPaths = append(layerPaths, l.Digest.String()) + } + + // For github.com/docker/docker consumers, this works just as well as + // refString := d.namedTaggedRef.String() [i.e. d.ref.ref.String()] + // because when reading the RepoTags strings, github.com/docker/docker/reference + // normalizes both of them to the same value. + // + // Doing it this way to include the normalized-out `docker.io[/library]` does make + // a difference for github.com/projectatomic/docker consumers, with the + // “Add --add-registry and --block-registry options to docker daemon” patch. + // These consumers treat reference strings which include a hostname and reference + // strings without a hostname differently. + // + // Using the host name here is more explicit about the intent, and it has the same + // effect as (docker pull) in projectatomic/docker, which tags the result using + // a hostname-qualified reference. + // See https://github.com/containers/image/issues/72 for a more detailed + // analysis and explanation. + refString := fmt.Sprintf("%s:%s", d.namedTaggedRef.FullName(), d.namedTaggedRef.Tag()) + + items := []manifestItem{{ + Config: man.Config.Digest.String(), + RepoTags: []string{refString}, + Layers: layerPaths, + Parent: "", + LayerSources: nil, + }} + itemsBytes, err := json.Marshal(&items) + if err != nil { + return err + } + + // FIXME? Do we also need to support the legacy format? + return d.sendFile(manifestFileName, int64(len(itemsBytes)), bytes.NewReader(itemsBytes)) +} + +type tarFI struct { + path string + size int64 +} + +func (t *tarFI) Name() string { + return t.path +} +func (t *tarFI) Size() int64 { + return t.size +} +func (t *tarFI) Mode() os.FileMode { + return 0444 +} +func (t *tarFI) ModTime() time.Time { + return time.Unix(0, 0) +} +func (t *tarFI) IsDir() bool { + return false +} +func (t *tarFI) Sys() interface{} { + return nil +} + +// sendFile sends a file into the tar stream. +func (d *daemonImageDestination) sendFile(path string, expectedSize int64, stream io.Reader) error { + hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "") + if err != nil { + return nil + } + logrus.Debugf("Sending as tar file %s", path) + if err := d.tar.WriteHeader(hdr); err != nil { + return err + } + size, err := io.Copy(d.tar, stream) + if err != nil { + return err + } + if size != expectedSize { + return fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size) + } + return nil +} + +func (d *daemonImageDestination) PutSignatures(signatures [][]byte) error { + if len(signatures) != 0 { + return fmt.Errorf("Storing signatures for docker-daemon: destinations is not supported") + } + return nil +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *daemonImageDestination) Commit() error { + logrus.Debugf("docker-daemon: Closing tar stream") + if err := d.tar.Close(); err != nil { + return err + } + if err := d.writer.Close(); err != nil { + return err + } + d.committed = true // We may still fail, but we are done sending to imageLoadGoroutine. + + logrus.Debugf("docker-daemon: Waiting for status") + err := <-d.statusChannel + return err +} diff --git a/vendor/src/github.com/containers/image/docker/daemon/daemon_src.go b/vendor/src/github.com/containers/image/docker/daemon/daemon_src.go new file mode 100644 index 00000000..2f487751 --- /dev/null +++ b/vendor/src/github.com/containers/image/docker/daemon/daemon_src.go @@ -0,0 +1,361 @@ +package daemon + +import ( + "archive/tar" + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path" + + "github.com/containers/image/manifest" + "github.com/containers/image/types" + "github.com/docker/distribution/digest" + "github.com/docker/engine-api/client" + "golang.org/x/net/context" +) + +const temporaryDirectoryForBigFiles = "/var/tmp" // Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs. + +type daemonImageSource struct { + ref daemonReference + tarCopyPath string + // The following data is only available after ensureCachedDataIsPresent() succeeds + tarManifest *manifestItem // nil if not available yet. + configBytes []byte + configDigest digest.Digest + orderedDiffIDList []diffID + knownLayers map[diffID]*layerInfo + // Other state + generatedManifest []byte // Private cache for GetManifest(), nil if not set yet. +} + +type layerInfo struct { + path string + size int64 +} + +// newImageSource returns a types.ImageSource for the specified image reference. +// The caller must call .Close() on the returned ImageSource. +// +// It would be great if we were able to stream the input tar as it is being +// sent; but Docker sends the top-level manifest, which determines which paths +// to look for, at the end, so in we will need to seek back and re-read, several times. +// (We could, perhaps, expect an exact sequence, assume that the first plaintext file +// is the config, and that the following len(RootFS) files are the layers, but that feels +// way too brittle.) +func newImageSource(ctx *types.SystemContext, ref daemonReference) (types.ImageSource, error) { + c, err := client.NewClient(client.DefaultDockerHost, "1.22", nil, nil) // FIXME: overridable host + if err != nil { + return nil, fmt.Errorf("Error initializing docker engine client: %v", err) + } + // Per NewReference(), ref.StringWithinTransport() is either an image ID (config digest), or a !reference.NameOnly() reference. + // Either way ImageSave should create a tarball with exactly one image. + inputStream, err := c.ImageSave(context.TODO(), []string{ref.StringWithinTransport()}) + if err != nil { + return nil, fmt.Errorf("Error loading image from docker engine: %v", err) + } + defer inputStream.Close() + + // FIXME: use SystemContext here. + tarCopyFile, err := ioutil.TempFile(temporaryDirectoryForBigFiles, "docker-daemon-tar") + if err != nil { + return nil, err + } + defer tarCopyFile.Close() + + succeeded := false + defer func() { + if !succeeded { + os.Remove(tarCopyFile.Name()) + } + }() + + if _, err := io.Copy(tarCopyFile, inputStream); err != nil { + return nil, err + } + + succeeded = true + return &daemonImageSource{ + ref: ref, + tarCopyPath: tarCopyFile.Name(), + }, nil +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (s *daemonImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *daemonImageSource) Close() { + _ = os.Remove(s.tarCopyPath) +} + +// tarReadCloser is a way to close the backing file of a tar.Reader when the user no longer needs the tar component. +type tarReadCloser struct { + *tar.Reader + backingFile *os.File +} + +func (t *tarReadCloser) Close() error { + return t.backingFile.Close() +} + +// openTarComponent returns a ReadCloser for the specific file within the archive. +// This is linear scan; we assume that the tar file will have a fairly small amount of files (~layers), +// and that filesystem caching will make the repeated seeking over the (uncompressed) tarCopyPath cheap enough. +// The caller should call .Close() on the returned stream. +func (s *daemonImageSource) openTarComponent(componentPath string) (io.ReadCloser, error) { + f, err := os.Open(s.tarCopyPath) + if err != nil { + return nil, err + } + succeeded := false + defer func() { + if !succeeded { + f.Close() + } + }() + + tarReader, header, err := findTarComponent(f, componentPath) + if err != nil { + return nil, err + } + if header == nil { + return nil, os.ErrNotExist + } + if header.FileInfo().Mode()&os.ModeType == os.ModeSymlink { // FIXME: untested + // We follow only one symlink; so no loops are possible. + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + return nil, err + } + // The new path could easily point "outside" the archive, but we only compare it to existing tar headers without extracting the archive, + // so we don't care. + tarReader, header, err = findTarComponent(f, path.Join(path.Dir(componentPath), header.Linkname)) + if err != nil { + return nil, err + } + if header == nil { + return nil, os.ErrNotExist + } + } + + if !header.FileInfo().Mode().IsRegular() { + return nil, fmt.Errorf("Error reading tar archive component %s: not a regular file", header.Name) + } + succeeded = true + return &tarReadCloser{Reader: tarReader, backingFile: f}, nil +} + +// findTarComponent returns a header and a reader matching path within inputFile, +// or (nil, nil, nil) if not found. +func findTarComponent(inputFile io.Reader, path string) (*tar.Reader, *tar.Header, error) { + t := tar.NewReader(inputFile) + for { + h, err := t.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, nil, err + } + if h.Name == path { + return t, h, nil + } + } + return nil, nil, nil +} + +// readTarComponent returns full contents of componentPath. +func (s *daemonImageSource) readTarComponent(path string) ([]byte, error) { + file, err := s.openTarComponent(path) + if err != nil { + return nil, fmt.Errorf("Error loading tar component %s: %v", path, err) + } + defer file.Close() + bytes, err := ioutil.ReadAll(file) + if err != nil { + return nil, err + } + return bytes, nil +} + +// ensureCachedDataIsPresent loads data necessary for any of the public accessors. +func (s *daemonImageSource) ensureCachedDataIsPresent() error { + if s.tarManifest != nil { + return nil + } + + // Read and parse manifest.json + tarManifest, err := s.loadTarManifest() + if err != nil { + return err + } + + // Read and parse config. + configBytes, err := s.readTarComponent(tarManifest.Config) + if err != nil { + return err + } + var parsedConfig dockerImage // Most fields ommitted, we only care about layer DiffIDs. + if err := json.Unmarshal(configBytes, &parsedConfig); err != nil { + return fmt.Errorf("Error decoding tar config %s: %v", tarManifest.Config, err) + } + + knownLayers, err := s.prepareLayerData(tarManifest, &parsedConfig) + if err != nil { + return err + } + + // Success; commit. + s.tarManifest = tarManifest + s.configBytes = configBytes + s.configDigest = digest.FromBytes(configBytes) + s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs + s.knownLayers = knownLayers + return nil +} + +// loadTarManifest loads and decodes the manifest.json. +func (s *daemonImageSource) loadTarManifest() (*manifestItem, error) { + // FIXME? Do we need to deal with the legacy format? + bytes, err := s.readTarComponent(manifestFileName) + if err != nil { + return nil, err + } + var items []manifestItem + if err := json.Unmarshal(bytes, &items); err != nil { + return nil, fmt.Errorf("Error decoding tar manifest.json: %v", err) + } + if len(items) != 1 { + return nil, fmt.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(items)) + } + return &items[0], nil +} + +func (s *daemonImageSource) prepareLayerData(tarManifest *manifestItem, parsedConfig *dockerImage) (map[diffID]*layerInfo, error) { + // Collect layer data available in manifest and config. + if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) { + return nil, fmt.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs)) + } + knownLayers := map[diffID]*layerInfo{} + unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes. + for i, diffID := range parsedConfig.RootFS.DiffIDs { + if _, ok := knownLayers[diffID]; ok { + // Apparently it really can happen that a single image contains the same layer diff more than once. + // In that case, the diffID validation ensures that both layers truly are the same, and it should not matter + // which of the tarManifest.Layers paths is used; (docker save) actually makes the duplicates symlinks to the original. + continue + } + layerPath := tarManifest.Layers[i] + if _, ok := unknownLayerSizes[layerPath]; ok { + return nil, fmt.Errorf("Layer tarfile %s used for two different DiffID values", layerPath) + } + li := &layerInfo{ // A new element in each iteration + path: layerPath, + size: -1, + } + knownLayers[diffID] = li + unknownLayerSizes[layerPath] = li + } + + // Scan the tar file to collect layer sizes. + file, err := os.Open(s.tarCopyPath) + if err != nil { + return nil, err + } + defer file.Close() + t := tar.NewReader(file) + for { + h, err := t.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + if li, ok := unknownLayerSizes[h.Name]; ok { + li.size = h.Size + delete(unknownLayerSizes, h.Name) + } + } + if len(unknownLayerSizes) != 0 { + return nil, fmt.Errorf("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice. + } + + return knownLayers, nil +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +func (s *daemonImageSource) GetManifest() ([]byte, string, error) { + if s.generatedManifest == nil { + if err := s.ensureCachedDataIsPresent(); err != nil { + return nil, "", err + } + m := schema2Manifest{ + SchemaVersion: 2, + MediaType: manifest.DockerV2Schema2MediaType, + Config: distributionDescriptor{ + MediaType: manifest.DockerV2Schema2ConfigMediaType, + Size: int64(len(s.configBytes)), + Digest: s.configDigest, + }, + Layers: []distributionDescriptor{}, + } + for _, diffID := range s.orderedDiffIDList { + li, ok := s.knownLayers[diffID] + if !ok { + return nil, "", fmt.Errorf("Internal inconsistency: Information about layer %s missing", diffID) + } + m.Layers = append(m.Layers, distributionDescriptor{ + Digest: digest.Digest(diffID), // diffID is a digest of the uncompressed tarball + MediaType: manifest.DockerV2Schema2LayerMediaType, + Size: li.size, + }) + } + manifestBytes, err := json.Marshal(&m) + if err != nil { + return nil, "", err + } + s.generatedManifest = manifestBytes + } + return s.generatedManifest, manifest.DockerV2Schema2MediaType, nil +} + +// GetTargetManifest returns an image's manifest given a digest. This is mainly used to retrieve a single image's manifest +// out of a manifest list. +func (s *daemonImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { + // How did we even get here? GetManifest() above has returned a manifest.DockerV2Schema2MediaType. + return nil, "", fmt.Errorf(`Manifest lists are not supported by "docker-daemon:"`) +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +func (s *daemonImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) { + if err := s.ensureCachedDataIsPresent(); err != nil { + return nil, 0, err + } + + if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256. + return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil + } + + if li, ok := s.knownLayers[diffID(info.Digest)]; ok { // diffID is a digest of the uncompressed tarball, + stream, err := s.openTarComponent(li.path) + if err != nil { + return nil, 0, err + } + return stream, li.size, nil + } + + return nil, 0, fmt.Errorf("Unknown blob %s", info.Digest) +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +func (s *daemonImageSource) GetSignatures() ([][]byte, error) { + return [][]byte{}, nil +} diff --git a/vendor/src/github.com/containers/image/docker/daemon/daemon_transport.go b/vendor/src/github.com/containers/image/docker/daemon/daemon_transport.go new file mode 100644 index 00000000..1492e2fe --- /dev/null +++ b/vendor/src/github.com/containers/image/docker/daemon/daemon_transport.go @@ -0,0 +1,179 @@ +package daemon + +import ( + "errors" + "fmt" + + "github.com/containers/image/docker/reference" + "github.com/containers/image/image" + "github.com/containers/image/types" + "github.com/docker/distribution/digest" +) + +// Transport is an ImageTransport for images managed by a local Docker daemon. +var Transport = daemonTransport{} + +type daemonTransport struct{} + +// Name returns the name of the transport, which must be unique among other transports. +func (t daemonTransport) Name() string { + return "docker-daemon" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t daemonTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t daemonTransport) ValidatePolicyConfigurationScope(scope string) error { + // See the explanation in daemonReference.PolicyConfigurationIdentity. + return errors.New(`docker-daemon: does not support any scopes except the default "" one`) +} + +// daemonReference is an ImageReference for images managed by a local Docker daemon +// Exactly one of id and ref can be set. +// For daemonImageSource, both id and ref are acceptable, ref must not be a NameOnly (interpreted as all tags in that repository by the daemon) +// For daemonImageDestination, it must be a ref, which is NamedTagged. +// (We could, in principle, also allow storing images without tagging them, and the user would have to refer to them using the docker image ID = config digest. +// Using the config digest requires the caller to parse the manifest themselves, which is very cumbersome; so, for now, we don’t bother.) +type daemonReference struct { + id digest.Digest + ref reference.Named // !reference.IsNameOnly +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func ParseReference(refString string) (types.ImageReference, error) { + // This is intended to be compatible with reference.ParseIDOrReference, but more strict about refusing some of the ambiguous cases. + // In particular, this rejects unprefixed digest values (64 hex chars), and sha256 digest prefixes (sha256:fewer-than-64-hex-chars). + + // digest:hexstring is structurally the same as a reponame:tag (meaning docker.io/library/reponame:tag). + // reference.ParseIDOrReference interprets such strings as digests. + if dgst, err := digest.ParseDigest(refString); err == nil { + // The daemon explicitly refuses to tag images with a reponame equal to digest.Canonical - but _only_ this digest name. + // Other digest references are ambiguous, so refuse them. + if dgst.Algorithm() != digest.Canonical { + return nil, fmt.Errorf("Invalid docker-daemon: reference %s: only digest algorithm %s accepted", refString, digest.Canonical) + } + return NewReference(dgst, nil) + } + + ref, err := reference.ParseNamed(refString) // This also rejects unprefixed digest values + if err != nil { + return nil, err + } + if ref.Name() == digest.Canonical.String() { + return nil, fmt.Errorf("Invalid docker-daemon: reference %s: The %s repository name is reserved for (non-shortened) digest references", refString, digest.Canonical) + } + return NewReference("", ref) +} + +// NewReference returns a docker-daemon reference for either the supplied image ID (config digest) or the supplied reference (which must satisfy !reference.IsNameOnly) +func NewReference(id digest.Digest, ref reference.Named) (types.ImageReference, error) { + if id != "" && ref != nil { + return nil, errors.New("docker-daemon: reference must not have an image ID and a reference string specified at the same time") + } + if ref != nil { + if reference.IsNameOnly(ref) { + return nil, fmt.Errorf("docker-daemon: reference %s has neither a tag nor a digest", ref.String()) + } + // A github.com/distribution/reference value can have a tag and a digest at the same time! + // docker/reference does not handle that, so fail. + _, isTagged := ref.(reference.NamedTagged) + _, isDigested := ref.(reference.Canonical) + if isTagged && isDigested { + return nil, fmt.Errorf("docker-daemon: references with both a tag and digest are currently not supported") + } + } + return daemonReference{ + id: id, + ref: ref, + }, nil +} + +func (ref daemonReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; +// instead, see transports.ImageName(). +func (ref daemonReference) StringWithinTransport() string { + switch { + case ref.id != "": + return ref.id.String() + case ref.ref != nil: + return ref.ref.String() + default: // Coverage: Should never happen, NewReference above should refuse such values. + panic("Internal inconsistency: daemonReference has empty id and nil ref") + } +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref daemonReference) DockerReference() reference.Named { + return ref.ref // May be nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref daemonReference) PolicyConfigurationIdentity() string { + // We must allow referring to images in the daemon by image ID, otherwise untagged images would not be accessible. + // But the existence of image IDs means that we can’t truly well namespace the input; the untagged images would have to fall into the default policy, + // which can be unexpected. So, punt. + return "" // This still allows using the default "" scope to define a policy for this transport. +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref daemonReference) PolicyConfigurationNamespaces() []string { + // See the explanation in daemonReference.PolicyConfigurationIdentity. + return []string{} +} + +// NewImage returns a types.Image for this reference. +// The caller must call .Close() on the returned Image. +func (ref daemonReference) NewImage(ctx *types.SystemContext) (types.Image, error) { + src, err := newImageSource(ctx, ref) + if err != nil { + return nil, err + } + return image.FromSource(src) +} + +// NewImageSource returns a types.ImageSource for this reference, +// asking the backend to use a manifest from requestedManifestMIMETypes if possible. +// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes. +// The caller must call .Close() on the returned ImageSource. +func (ref daemonReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) { + return newImageSource(ctx, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref daemonReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(ctx, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref daemonReference) DeleteImage(ctx *types.SystemContext) error { + // Should this just untag the image? Should this stop running containers? + // The semantics is not quite as clear as for remote repositories. + // The user can run (docker rmi) directly anyway, so, for now(?), punt instead of trying to guess what the user meant. + return fmt.Errorf("Deleting images not implemented for docker-daemon: images") +} diff --git a/vendor/src/github.com/containers/image/docker/daemon/daemon_types.go b/vendor/src/github.com/containers/image/docker/daemon/daemon_types.go new file mode 100644 index 00000000..c1bb9caa --- /dev/null +++ b/vendor/src/github.com/containers/image/docker/daemon/daemon_types.go @@ -0,0 +1,53 @@ +package daemon + +import "github.com/docker/distribution/digest" + +// Various data structures. + +// Based on github.com/docker/docker/image/tarexport/tarexport.go +const ( + manifestFileName = "manifest.json" + // legacyLayerFileName = "layer.tar" + // legacyConfigFileName = "json" + // legacyVersionFileName = "VERSION" + // legacyRepositoriesFileName = "repositories" +) + +type manifestItem struct { + Config string + RepoTags []string + Layers []string + Parent imageID `json:",omitempty"` + LayerSources map[diffID]distributionDescriptor `json:",omitempty"` +} + +type imageID string +type diffID digest.Digest + +// Based on github.com/docker/distribution/blobs.go +type distributionDescriptor struct { + MediaType string `json:"mediaType,omitempty"` + Size int64 `json:"size,omitempty"` + Digest digest.Digest `json:"digest,omitempty"` + URLs []string `json:"urls,omitempty"` +} + +// Based on github.com/docker/distribution/manifest/schema2/manifest.go +// FIXME: We are repeating this all over the place; make a public copy? +type schema2Manifest struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType,omitempty"` + Config distributionDescriptor `json:"config"` + Layers []distributionDescriptor `json:"layers"` +} + +// Based on github.com/docker/docker/image/image.go +// MOST CONTENT OMITTED AS UNNECESSARY +type dockerImage struct { + RootFS *rootFS `json:"rootfs,omitempty"` +} + +type rootFS struct { + Type string `json:"type"` + DiffIDs []diffID `json:"diff_ids,omitempty"` +} diff --git a/vendor/src/github.com/containers/image/docker/docker_client.go b/vendor/src/github.com/containers/image/docker/docker_client.go index 900a4bf8..35ecb96e 100644 --- a/vendor/src/github.com/containers/image/docker/docker_client.go +++ b/vendor/src/github.com/containers/image/docker/docker_client.go @@ -4,9 +4,11 @@ import ( "crypto/tls" "encoding/base64" "encoding/json" + "errors" "fmt" "io" "io/ioutil" + "net" "net/http" "os" "path/filepath" @@ -15,7 +17,9 @@ import ( "github.com/Sirupsen/logrus" "github.com/containers/image/types" - "github.com/docker/docker/pkg/homedir" + "github.com/containers/storage/pkg/homedir" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" ) const ( @@ -28,12 +32,17 @@ const ( dockerCfgObsolete = ".dockercfg" baseURL = "%s://%s/v2/" + baseURLV1 = "%s://%s/v1/_ping" tagsURL = "%s/tags/list" manifestURL = "%s/manifests/%s" blobsURL = "%s/blobs/%s" blobUploadURL = "%s/blobs/uploads/" ) +// ErrV1NotSupported is returned when we're trying to talk to a +// docker V1 registry. +var ErrV1NotSupported = errors.New("can't talk to a V1 docker registry") + // dockerClient is configuration for dealing with a single Docker registry. type dockerClient struct { ctx *types.SystemContext @@ -46,6 +55,96 @@ type dockerClient struct { signatureBase signatureStorageBase } +// this is cloned from docker/go-connections because upstream docker has changed +// it and make deps here fails otherwise. +// We'll drop this once we upgrade to docker 1.13.x deps. +func serverDefault() *tls.Config { + return &tls.Config{ + // Avoid fallback to SSL protocols < TLS1.0 + MinVersion: tls.VersionTLS10, + PreferServerCipherSuites: true, + CipherSuites: tlsconfig.DefaultServerAcceptedCiphers, + } +} + +func newTransport() *http.Transport { + direct := &net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + } + tr := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: direct.Dial, + TLSHandshakeTimeout: 10 * time.Second, + // TODO(dmcgowan): Call close idle connections when complete and use keep alive + DisableKeepAlives: true, + } + proxyDialer, err := sockets.DialerFromEnvironment(direct) + if err == nil { + tr.Dial = proxyDialer.Dial + } + return tr +} + +func setupCertificates(dir string, tlsc *tls.Config) error { + if dir == "" { + return nil + } + fs, err := ioutil.ReadDir(dir) + if err != nil && !os.IsNotExist(err) { + return err + } + + for _, f := range fs { + fullPath := filepath.Join(dir, f.Name()) + if strings.HasSuffix(f.Name(), ".crt") { + systemPool, err := tlsconfig.SystemCertPool() + if err != nil { + return fmt.Errorf("unable to get system cert pool: %v", err) + } + tlsc.RootCAs = systemPool + logrus.Debugf("crt: %s", fullPath) + data, err := ioutil.ReadFile(fullPath) + if err != nil { + return err + } + tlsc.RootCAs.AppendCertsFromPEM(data) + } + if strings.HasSuffix(f.Name(), ".cert") { + certName := f.Name() + keyName := certName[:len(certName)-5] + ".key" + logrus.Debugf("cert: %s", fullPath) + if !hasFile(fs, keyName) { + return fmt.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName) + } + cert, err := tls.LoadX509KeyPair(filepath.Join(dir, certName), filepath.Join(dir, keyName)) + if err != nil { + return err + } + tlsc.Certificates = append(tlsc.Certificates, cert) + } + if strings.HasSuffix(f.Name(), ".key") { + keyName := f.Name() + certName := keyName[:len(keyName)-4] + ".cert" + logrus.Debugf("key: %s", fullPath) + if !hasFile(fs, certName) { + return fmt.Errorf("missing client certificate %s for key %s", certName, keyName) + } + } + } + return nil +} + +func hasFile(files []os.FileInfo, name string) bool { + for _, f := range files { + if f.Name() == name { + return true + } + } + return false +} + // newDockerClient returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry) // “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection) func newDockerClient(ctx *types.SystemContext, ref dockerReference, write bool) (*dockerClient, error) { @@ -53,32 +152,25 @@ func newDockerClient(ctx *types.SystemContext, ref dockerReference, write bool) if registry == dockerHostname { registry = dockerRegistry } - username, password, err := getAuth(ref.ref.Hostname()) + username, password, err := getAuth(ctx, ref.ref.Hostname()) if err != nil { return nil, err } - var tr *http.Transport + tr := newTransport() if ctx != nil && (ctx.DockerCertPath != "" || ctx.DockerInsecureSkipTLSVerify) { tlsc := &tls.Config{} - if ctx.DockerCertPath != "" { - cert, err := tls.LoadX509KeyPair(filepath.Join(ctx.DockerCertPath, "cert.pem"), filepath.Join(ctx.DockerCertPath, "key.pem")) - if err != nil { - return nil, fmt.Errorf("Error loading x509 key pair: %s", err) - } - tlsc.Certificates = append(tlsc.Certificates, cert) + if err := setupCertificates(ctx.DockerCertPath, tlsc); err != nil { + return nil, err } + tlsc.InsecureSkipVerify = ctx.DockerInsecureSkipTLSVerify - tr = &http.Transport{ - TLSClientConfig: tlsc, - } + tr.TLSClientConfig = tlsc } - client := &http.Client{ - Timeout: 1 * time.Minute, - } - if tr != nil { - client.Transport = tr + if tr.TLSClientConfig == nil { + tr.TLSClientConfig = serverDefault() } + client := &http.Client{Transport: tr} sigBase, err := configuredSignatureStorageBase(ctx, ref, write) if err != nil { @@ -108,13 +200,14 @@ func (c *dockerClient) makeRequest(method, url string, headers map[string][]stri } url = fmt.Sprintf(baseURL, c.scheme, c.registry) + url - return c.makeRequestToResolvedURL(method, url, headers, stream, -1) + return c.makeRequestToResolvedURL(method, url, headers, stream, -1, true) } // makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. // streamLen, if not -1, specifies the length of the data expected on stream. // makeRequest should generally be preferred. -func (c *dockerClient) makeRequestToResolvedURL(method, url string, headers map[string][]string, stream io.Reader, streamLen int64) (*http.Response, error) { +// TODO(runcom): too many arguments here, use a struct +func (c *dockerClient) makeRequestToResolvedURL(method, url string, headers map[string][]string, stream io.Reader, streamLen int64, sendAuth bool) (*http.Response, error) { req, err := http.NewRequest(method, url, stream) if err != nil { return nil, err @@ -128,7 +221,10 @@ func (c *dockerClient) makeRequestToResolvedURL(method, url string, headers map[ req.Header.Add(n, hh) } } - if c.wwwAuthenticate != "" { + if c.ctx != nil && c.ctx.DockerRegistryUserAgent != "" { + req.Header.Add("User-Agent", c.ctx.DockerRegistryUserAgent) + } + if c.wwwAuthenticate != "" && sendAuth { if err := c.setupRequestAuth(req); err != nil { return nil, err } @@ -210,8 +306,9 @@ func (c *dockerClient) getBearerToken(realm, service, scope string) (string, err if c.username != "" && c.password != "" { authReq.SetBasicAuth(c.username, c.password) } - // insecure for now to contact the external token service - tr := &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}} + tr := newTransport() + // TODO(runcom): insecure for now to contact the external token service + tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} client := &http.Client{Transport: tr} res, err := client.Do(authReq) if err != nil { @@ -246,69 +343,70 @@ func (c *dockerClient) getBearerToken(realm, service, scope string) (string, err return tokenStruct.Token, nil } -func getAuth(hostname string) (string, string, error) { - // TODO(runcom): get this from *cli.Context somehow - //if username != "" && password != "" { - //return username, password, nil - //} - if hostname == dockerHostname { - hostname = dockerAuthRegistry +func getAuth(ctx *types.SystemContext, registry string) (string, string, error) { + if ctx != nil && ctx.DockerAuthConfig != nil { + return ctx.DockerAuthConfig.Username, ctx.DockerAuthConfig.Password, nil } + var dockerAuth dockerConfigFile dockerCfgPath := filepath.Join(getDefaultConfigDir(".docker"), dockerCfgFileName) if _, err := os.Stat(dockerCfgPath); err == nil { j, err := ioutil.ReadFile(dockerCfgPath) if err != nil { return "", "", err } - var dockerAuth dockerConfigFile if err := json.Unmarshal(j, &dockerAuth); err != nil { return "", "", err } - // try the normal case - if c, ok := dockerAuth.AuthConfigs[hostname]; ok { - return decodeDockerAuth(c.Auth) - } + } else if os.IsNotExist(err) { + // try old config path oldDockerCfgPath := filepath.Join(getDefaultConfigDir(dockerCfgObsolete)) if _, err := os.Stat(oldDockerCfgPath); err != nil { - return "", "", nil //missing file is not an error + if os.IsNotExist(err) { + return "", "", nil + } + return "", "", fmt.Errorf("%s - %v", oldDockerCfgPath, err) } + j, err := ioutil.ReadFile(oldDockerCfgPath) if err != nil { return "", "", err } - var dockerAuthOld map[string]dockerAuthConfigObsolete - if err := json.Unmarshal(j, &dockerAuthOld); err != nil { + if err := json.Unmarshal(j, &dockerAuth.AuthConfigs); err != nil { return "", "", err } - if c, ok := dockerAuthOld[hostname]; ok { - return decodeDockerAuth(c.Auth) - } - } else { - // if file is there but we can't stat it for any reason other - // than it doesn't exist then stop + + } else if err != nil { return "", "", fmt.Errorf("%s - %v", dockerCfgPath, err) } - return "", "", nil -} -type apiErr struct { - Code string - Message string - Detail interface{} + // I'm feeling lucky + if c, exists := dockerAuth.AuthConfigs[registry]; exists { + return decodeDockerAuth(c.Auth) + } + + // bad luck; let's normalize the entries first + registry = normalizeRegistry(registry) + normalizedAuths := map[string]dockerAuthConfig{} + for k, v := range dockerAuth.AuthConfigs { + normalizedAuths[normalizeRegistry(k)] = v + } + if c, exists := normalizedAuths[registry]; exists { + return decodeDockerAuth(c.Auth) + } + return "", "", nil } type pingResponse struct { WWWAuthenticate string APIVersion string scheme string - errors []apiErr } func (c *dockerClient) ping() (*pingResponse, error) { ping := func(scheme string) (*pingResponse, error) { url := fmt.Sprintf(baseURL, scheme, c.registry) - resp, err := c.client.Get(url) + resp, err := c.makeRequestToResolvedURL("GET", url, nil, nil, -1, true) logrus.Debugf("Ping %s err %#v", url, err) if err != nil { return nil, err @@ -322,22 +420,40 @@ func (c *dockerClient) ping() (*pingResponse, error) { pr.WWWAuthenticate = resp.Header.Get("WWW-Authenticate") pr.APIVersion = resp.Header.Get("Docker-Distribution-Api-Version") pr.scheme = scheme - if resp.StatusCode == http.StatusUnauthorized { - type APIErrors struct { - Errors []apiErr - } - errs := &APIErrors{} - if err := json.NewDecoder(resp.Body).Decode(errs); err != nil { - return nil, err - } - pr.errors = errs.Errors - } return pr, nil } pr, err := ping("https") - if err != nil && c.ctx.DockerInsecureSkipTLSVerify { + if err != nil && c.ctx != nil && c.ctx.DockerInsecureSkipTLSVerify { pr, err = ping("http") } + if err != nil { + err = fmt.Errorf("pinging docker registry returned %+v", err) + if c.ctx != nil && c.ctx.DockerDisableV1Ping { + return nil, err + } + // best effort to understand if we're talking to a V1 registry + pingV1 := func(scheme string) bool { + url := fmt.Sprintf(baseURLV1, scheme, c.registry) + resp, err := c.makeRequestToResolvedURL("GET", url, nil, nil, -1, true) + logrus.Debugf("Ping %s err %#v", url, err) + if err != nil { + return false + } + defer resp.Body.Close() + logrus.Debugf("Ping %s status %d", scheme+"://"+c.registry+"/v1/_ping", resp.StatusCode) + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { + return false + } + return true + } + isV1 := pingV1("https") + if !isV1 && c.ctx != nil && c.ctx.DockerInsecureSkipTLSVerify { + isV1 = pingV1("http") + } + if isV1 { + err = ErrV1NotSupported + } + } return pr, err } @@ -345,10 +461,6 @@ func getDefaultConfigDir(confPath string) string { return filepath.Join(homedir.Get(), confPath) } -type dockerAuthConfigObsolete struct { - Auth string `json:"auth"` -} - type dockerAuthConfig struct { Auth string `json:"auth,omitempty"` } @@ -371,3 +483,28 @@ func decodeDockerAuth(s string) (string, string, error) { password := strings.Trim(parts[1], "\x00") return user, password, nil } + +// convertToHostname converts a registry url which has http|https prepended +// to just an hostname. +// Copied from github.com/docker/docker/registry/auth.go +func convertToHostname(url string) string { + stripped := url + if strings.HasPrefix(url, "http://") { + stripped = strings.TrimPrefix(url, "http://") + } else if strings.HasPrefix(url, "https://") { + stripped = strings.TrimPrefix(url, "https://") + } + + nameParts := strings.SplitN(stripped, "/", 2) + + return nameParts[0] +} + +func normalizeRegistry(registry string) string { + normalized := convertToHostname(registry) + switch normalized { + case "registry-1.docker.io", "docker.io": + return "index.docker.io" + } + return normalized +} diff --git a/vendor/src/github.com/containers/image/docker/docker_image.go b/vendor/src/github.com/containers/image/docker/docker_image.go index b02a8cd9..59468755 100644 --- a/vendor/src/github.com/containers/image/docker/docker_image.go +++ b/vendor/src/github.com/containers/image/docker/docker_image.go @@ -24,7 +24,11 @@ func newImage(ctx *types.SystemContext, ref dockerReference) (types.Image, error if err != nil { return nil, err } - return &Image{Image: image.FromSource(s), src: s}, nil + img, err := image.FromSource(s) + if err != nil { + return nil, err + } + return &Image{Image: img, src: s}, nil } // SourceRefFullName returns a fully expanded name for the repository this image is in. diff --git a/vendor/src/github.com/containers/image/docker/docker_image_dest.go b/vendor/src/github.com/containers/image/docker/docker_image_dest.go index a16cf3fb..54df01d7 100644 --- a/vendor/src/github.com/containers/image/docker/docker_image_dest.go +++ b/vendor/src/github.com/containers/image/docker/docker_image_dest.go @@ -2,8 +2,6 @@ package docker import ( "bytes" - "crypto/sha256" - "encoding/hex" "fmt" "io" "io/ioutil" @@ -11,18 +9,18 @@ import ( "net/url" "os" "path/filepath" - "strconv" "github.com/Sirupsen/logrus" "github.com/containers/image/manifest" "github.com/containers/image/types" + "github.com/docker/distribution/digest" ) type dockerImageDestination struct { ref dockerReference c *dockerClient // State - manifestDigest string // or "" if not yet known. + manifestDigest digest.Digest // or "" if not yet known. } // newImageDestination creates a new ImageDestination for the specified image reference. @@ -62,29 +60,52 @@ func (d *dockerImageDestination) SupportsSignatures() error { return fmt.Errorf("Pushing signatures to a Docker Registry is not supported") } -// PutBlob writes contents of stream and returns its computed digest and size. -// A digest can be optionally provided if known, the specific image destination can decide to play with it or not. -// The length of stream is expected to be expectedSize; if expectedSize == -1, it is not known. +// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. +func (d *dockerImageDestination) ShouldCompressLayers() bool { + return true +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *dockerImageDestination) AcceptsForeignLayerURLs() bool { + return true +} + +// sizeCounter is an io.Writer which only counts the total size of its input. +type sizeCounter struct{ size int64 } + +func (c *sizeCounter) Write(p []byte) (n int, err error) { + c.size += int64(len(p)) + return len(p), nil +} + +// PutBlob writes contents of stream and returns data representing the result (with all data filled in). +// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Size is the expected length of stream, if known. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *dockerImageDestination) PutBlob(stream io.Reader, digest string, expectedSize int64) (string, int64, error) { - if digest != "" { - checkURL := fmt.Sprintf(blobsURL, d.ref.ref.RemoteName(), digest) +func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) { + if inputInfo.Digest.String() != "" { + checkURL := fmt.Sprintf(blobsURL, d.ref.ref.RemoteName(), inputInfo.Digest.String()) logrus.Debugf("Checking %s", checkURL) res, err := d.c.makeRequest("HEAD", checkURL, nil, nil) if err != nil { - return "", -1, err + return types.BlobInfo{}, err } defer res.Body.Close() - if res.StatusCode == http.StatusOK { + switch res.StatusCode { + case http.StatusOK: logrus.Debugf("... already exists, not uploading") - blobLength, err := strconv.ParseInt(res.Header.Get("Content-Length"), 10, 64) - if err != nil { - return "", -1, err - } - return digest, blobLength, nil + return types.BlobInfo{Digest: inputInfo.Digest, Size: getBlobSize(res)}, nil + case http.StatusUnauthorized: + logrus.Debugf("... not authorized") + return types.BlobInfo{}, fmt.Errorf("not authorized to read from destination repository %s", d.ref.ref.RemoteName()) + case http.StatusNotFound: + // noop + default: + return types.BlobInfo{}, fmt.Errorf("failed to read from destination repository %s: %v", d.ref.ref.RemoteName(), http.StatusText(res.StatusCode)) } logrus.Debugf("... failed, status %d", res.StatusCode) } @@ -94,63 +115,99 @@ func (d *dockerImageDestination) PutBlob(stream io.Reader, digest string, expect logrus.Debugf("Uploading %s", uploadURL) res, err := d.c.makeRequest("POST", uploadURL, nil, nil) if err != nil { - return "", -1, err + return types.BlobInfo{}, err } defer res.Body.Close() if res.StatusCode != http.StatusAccepted { logrus.Debugf("Error initiating layer upload, response %#v", *res) - return "", -1, fmt.Errorf("Error initiating layer upload to %s, status %d", uploadURL, res.StatusCode) + return types.BlobInfo{}, fmt.Errorf("Error initiating layer upload to %s, status %d", uploadURL, res.StatusCode) } uploadLocation, err := res.Location() if err != nil { - return "", -1, fmt.Errorf("Error determining upload URL: %s", err.Error()) + return types.BlobInfo{}, fmt.Errorf("Error determining upload URL: %s", err.Error()) } - h := sha256.New() - tee := io.TeeReader(stream, h) - res, err = d.c.makeRequestToResolvedURL("PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, expectedSize) + digester := digest.Canonical.New() + sizeCounter := &sizeCounter{} + tee := io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter)) + res, err = d.c.makeRequestToResolvedURL("PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, true) if err != nil { logrus.Debugf("Error uploading layer chunked, response %#v", *res) - return "", -1, err + return types.BlobInfo{}, err } defer res.Body.Close() - hash := h.Sum(nil) - computedDigest := "sha256:" + hex.EncodeToString(hash[:]) + computedDigest := digester.Digest() uploadLocation, err = res.Location() if err != nil { - return "", -1, fmt.Errorf("Error determining upload URL: %s", err.Error()) + return types.BlobInfo{}, fmt.Errorf("Error determining upload URL: %s", err.Error()) } // FIXME: DELETE uploadLocation on failure locationQuery := uploadLocation.Query() - // TODO: check digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717 - locationQuery.Set("digest", computedDigest) + // TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717 + locationQuery.Set("digest", computedDigest.String()) uploadLocation.RawQuery = locationQuery.Encode() - res, err = d.c.makeRequestToResolvedURL("PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1) + res, err = d.c.makeRequestToResolvedURL("PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, true) if err != nil { - return "", -1, err + return types.BlobInfo{}, err } defer res.Body.Close() if res.StatusCode != http.StatusCreated { logrus.Debugf("Error uploading layer, response %#v", *res) - return "", -1, fmt.Errorf("Error uploading layer to %s, status %d", uploadLocation, res.StatusCode) + return types.BlobInfo{}, fmt.Errorf("Error uploading layer to %s, status %d", uploadLocation, res.StatusCode) } - logrus.Debugf("Upload of layer %s complete", digest) - return computedDigest, res.Request.ContentLength, nil + logrus.Debugf("Upload of layer %s complete", computedDigest) + return types.BlobInfo{Digest: computedDigest, Size: sizeCounter.size}, nil +} + +func (d *dockerImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) { + if info.Digest == "" { + return false, -1, fmt.Errorf(`"Can not check for a blob with unknown digest`) + } + checkURL := fmt.Sprintf(blobsURL, d.ref.ref.RemoteName(), info.Digest.String()) + + logrus.Debugf("Checking %s", checkURL) + res, err := d.c.makeRequest("HEAD", checkURL, nil, nil) + if err != nil { + return false, -1, err + } + defer res.Body.Close() + switch res.StatusCode { + case http.StatusOK: + logrus.Debugf("... already exists") + return true, getBlobSize(res), nil + case http.StatusUnauthorized: + logrus.Debugf("... not authorized") + return false, -1, fmt.Errorf("not authorized to read from destination repository %s", d.ref.ref.RemoteName()) + case http.StatusNotFound: + logrus.Debugf("... not present") + return false, -1, types.ErrBlobNotFound + default: + logrus.Errorf("failed to read from destination repository %s: %v", d.ref.ref.RemoteName(), http.StatusText(res.StatusCode)) + } + logrus.Debugf("... failed, status %d, ignoring", res.StatusCode) + return false, -1, types.ErrBlobNotFound +} + +func (d *dockerImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) { + return info, nil } func (d *dockerImageDestination) PutManifest(m []byte) error { - // FIXME: This only allows upload by digest, not creating a tag. See the - // corresponding comment in openshift.NewImageDestination. digest, err := manifest.Digest(m) if err != nil { return err } d.manifestDigest = digest - url := fmt.Sprintf(manifestURL, d.ref.ref.RemoteName(), digest) + + reference, err := d.ref.tagOrDigest() + if err != nil { + return err + } + url := fmt.Sprintf(manifestURL, d.ref.ref.RemoteName(), reference) headers := map[string][]string{} mimeType := manifest.GuessMIMEType(m) @@ -185,8 +242,8 @@ func (d *dockerImageDestination) PutSignatures(signatures [][]byte) error { return fmt.Errorf("Pushing signatures to a Docker Registry is not supported, and there is no applicable signature storage configured") } - // FIXME: This assumption that signatures are stored after the manifest rather breaks the model. - if d.manifestDigest == "" { + if d.manifestDigest.String() == "" { + // This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures return fmt.Errorf("Unknown manifest digest, can't add signatures") } @@ -237,6 +294,8 @@ func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte) } return nil + case "http", "https": + return fmt.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String()) default: return fmt.Errorf("Unsupported scheme when writing signature to %s", url.String()) } @@ -254,6 +313,8 @@ func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error } return false, err + case "http", "https": + return false, fmt.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String()) default: return false, fmt.Errorf("Unsupported scheme when deleting signature from %s", url.String()) } diff --git a/vendor/src/github.com/containers/image/docker/docker_image_src.go b/vendor/src/github.com/containers/image/docker/docker_image_src.go index 47d47f72..8a85891f 100644 --- a/vendor/src/github.com/containers/image/docker/docker_image_src.go +++ b/vendor/src/github.com/containers/image/docker/docker_image_src.go @@ -13,17 +13,10 @@ import ( "github.com/Sirupsen/logrus" "github.com/containers/image/manifest" "github.com/containers/image/types" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/client" ) -type errFetchManifest struct { - statusCode int - body []byte -} - -func (e errFetchManifest) Error() string { - return fmt.Sprintf("error fetching manifest: status code: %d, body: %s", e.statusCode, string(e.body)) -} - type dockerImageSource struct { ref dockerReference requestedManifestMIMETypes []string @@ -75,6 +68,8 @@ func simplifyContentType(contentType string) string { return mimeType } +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. func (s *dockerImageSource) GetManifest() ([]byte, string, error) { err := s.ensureManifestIsLoaded() if err != nil { @@ -83,6 +78,31 @@ func (s *dockerImageSource) GetManifest() ([]byte, string, error) { return s.cachedManifest, s.cachedManifestMIMEType, nil } +func (s *dockerImageSource) fetchManifest(tagOrDigest string) ([]byte, string, error) { + url := fmt.Sprintf(manifestURL, s.ref.ref.RemoteName(), tagOrDigest) + headers := make(map[string][]string) + headers["Accept"] = s.requestedManifestMIMETypes + res, err := s.c.makeRequest("GET", url, headers, nil) + if err != nil { + return nil, "", err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + return nil, "", client.HandleErrorResponse(res) + } + manblob, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, "", err + } + return manblob, simplifyContentType(res.Header.Get("Content-Type")), nil +} + +// GetTargetManifest returns an image's manifest given a digest. +// This is mainly used to retrieve a single image's manifest out of a manifest list. +func (s *dockerImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { + return s.fetchManifest(digest.String()) +} + // ensureManifestIsLoaded sets s.cachedManifest and s.cachedManifestMIMEType // // ImageSource implementations are not required or expected to do any caching, @@ -99,32 +119,53 @@ func (s *dockerImageSource) ensureManifestIsLoaded() error { if err != nil { return err } - url := fmt.Sprintf(manifestURL, s.ref.ref.RemoteName(), reference) - // TODO(runcom) set manifest version header! schema1 for now - then schema2 etc etc and v1 - // TODO(runcom) NO, switch on the resulter manifest like Docker is doing - headers := make(map[string][]string) - headers["Accept"] = s.requestedManifestMIMETypes - res, err := s.c.makeRequest("GET", url, headers, nil) + + manblob, mt, err := s.fetchManifest(reference) if err != nil { return err } - defer res.Body.Close() - manblob, err := ioutil.ReadAll(res.Body) - if err != nil { - return err - } - if res.StatusCode != http.StatusOK { - return errFetchManifest{res.StatusCode, manblob} - } // We might validate manblob against the Docker-Content-Digest header here to protect against transport errors. s.cachedManifest = manblob - s.cachedManifestMIMEType = simplifyContentType(res.Header.Get("Content-Type")) + s.cachedManifestMIMEType = mt return nil } +func (s *dockerImageSource) getExternalBlob(urls []string) (io.ReadCloser, int64, error) { + var ( + resp *http.Response + err error + ) + for _, url := range urls { + resp, err = s.c.makeRequestToResolvedURL("GET", url, nil, nil, -1, false) + if err == nil { + if resp.StatusCode != http.StatusOK { + err = fmt.Errorf("error fetching external blob from %q: %d", url, resp.StatusCode) + logrus.Debug(err) + continue + } + } + } + if resp.Body != nil && err == nil { + return resp.Body, getBlobSize(resp), nil + } + return nil, 0, err +} + +func getBlobSize(resp *http.Response) int64 { + size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) + if err != nil { + size = -1 + } + return size +} + // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -func (s *dockerImageSource) GetBlob(digest string) (io.ReadCloser, int64, error) { - url := fmt.Sprintf(blobsURL, s.ref.ref.RemoteName(), digest) +func (s *dockerImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) { + if len(info.URLs) != 0 { + return s.getExternalBlob(info.URLs) + } + + url := fmt.Sprintf(blobsURL, s.ref.ref.RemoteName(), info.Digest.String()) logrus.Debugf("Downloading %s", url) res, err := s.c.makeRequest("GET", url, nil, nil) if err != nil { @@ -134,11 +175,7 @@ func (s *dockerImageSource) GetBlob(digest string) (io.ReadCloser, int64, error) // print url also return nil, 0, fmt.Errorf("Invalid status code returned when fetching blob %d", res.StatusCode) } - size, err := strconv.ParseInt(res.Header.Get("Content-Length"), 10, 64) - if err != nil { - size = -1 - } - return res.Body, size, nil + return res.Body, getBlobSize(res), nil } func (s *dockerImageSource) GetSignatures() ([][]byte, error) { @@ -239,7 +276,7 @@ func deleteImage(ctx *types.SystemContext, ref dockerReference) error { switch get.StatusCode { case http.StatusOK: case http.StatusNotFound: - return fmt.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry.", ref.ref) + return fmt.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry", ref.ref) default: return fmt.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status) } diff --git a/vendor/src/github.com/containers/image/docker/docker_transport.go b/vendor/src/github.com/containers/image/docker/docker_transport.go index 5ab5efe3..1284dc4b 100644 --- a/vendor/src/github.com/containers/image/docker/docker_transport.go +++ b/vendor/src/github.com/containers/image/docker/docker_transport.go @@ -5,8 +5,8 @@ import ( "strings" "github.com/containers/image/docker/policyconfiguration" + "github.com/containers/image/docker/reference" "github.com/containers/image/types" - "github.com/docker/docker/reference" ) // Transport is an ImageTransport for Docker registry-hosted images. @@ -115,8 +115,10 @@ func (ref dockerReference) PolicyConfigurationNamespaces() []string { return policyconfiguration.DockerReferenceNamespaces(ref.ref) } -// NewImage returns a types.Image for this reference. +// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. // The caller must call .Close() on the returned Image. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. func (ref dockerReference) NewImage(ctx *types.SystemContext) (types.Image, error) { return newImage(ctx, ref) } diff --git a/vendor/src/github.com/containers/image/docker/lookaside.go b/vendor/src/github.com/containers/image/docker/lookaside.go index 2cc0a9f4..743e172c 100644 --- a/vendor/src/github.com/containers/image/docker/lookaside.go +++ b/vendor/src/github.com/containers/image/docker/lookaside.go @@ -9,6 +9,7 @@ import ( "path/filepath" "strings" + "github.com/docker/distribution/digest" "github.com/ghodss/yaml" "github.com/Sirupsen/logrus" @@ -188,11 +189,11 @@ func (ns registryNamespace) signatureTopLevel(write bool) string { // signatureStorageURL returns an URL usable for acessing signature index in base with known manifestDigest, or nil if not applicable. // Returns nil iff base == nil. -func signatureStorageURL(base signatureStorageBase, manifestDigest string, index int) *url.URL { +func signatureStorageURL(base signatureStorageBase, manifestDigest digest.Digest, index int) *url.URL { if base == nil { return nil } url := *base - url.Path = fmt.Sprintf("%s@%s/signature-%d", url.Path, manifestDigest, index+1) + url.Path = fmt.Sprintf("%s@%s/signature-%d", url.Path, manifestDigest.String(), index+1) return &url } diff --git a/vendor/src/github.com/containers/image/docker/policyconfiguration/naming.go b/vendor/src/github.com/containers/image/docker/policyconfiguration/naming.go index ace300e4..08892cb1 100644 --- a/vendor/src/github.com/containers/image/docker/policyconfiguration/naming.go +++ b/vendor/src/github.com/containers/image/docker/policyconfiguration/naming.go @@ -5,7 +5,7 @@ import ( "fmt" "strings" - "github.com/docker/docker/reference" + "github.com/containers/image/docker/reference" ) // DockerReferenceIdentity returns a string representation of the reference, suitable for policy lookup, diff --git a/vendor/src/github.com/containers/image/docker/reference/doc.go b/vendor/src/github.com/containers/image/docker/reference/doc.go new file mode 100644 index 00000000..a75ea749 --- /dev/null +++ b/vendor/src/github.com/containers/image/docker/reference/doc.go @@ -0,0 +1,6 @@ +// Package reference is a fork of the upstream docker/docker/reference package. +// The package is forked because we need consistency especially when storing and +// checking signatures (RH patches break this consistency because they modify +// docker/docker/reference as part of a patch carried in projectatomic/docker). +// The version of this package is v1.12.1 from upstream, update as necessary. +package reference diff --git a/vendor/src/github.com/containers/image/docker/reference/reference.go b/vendor/src/github.com/containers/image/docker/reference/reference.go new file mode 100644 index 00000000..de26e44c --- /dev/null +++ b/vendor/src/github.com/containers/image/docker/reference/reference.go @@ -0,0 +1,220 @@ +package reference + +import ( + "errors" + "fmt" + "regexp" + "strings" + + "github.com/docker/distribution/digest" + distreference "github.com/docker/distribution/reference" +) + +const ( + // DefaultTag defines the default tag used when performing images related actions and no tag or digest is specified + DefaultTag = "latest" + // DefaultHostname is the default built-in hostname + DefaultHostname = "docker.io" + // LegacyDefaultHostname is automatically converted to DefaultHostname + LegacyDefaultHostname = "index.docker.io" + // DefaultRepoPrefix is the prefix used for default repositories in default host + DefaultRepoPrefix = "library/" +) + +// Named is an object with a full name +type Named interface { + // Name returns normalized repository name, like "ubuntu". + Name() string + // String returns full reference, like "ubuntu@sha256:abcdef..." + String() string + // FullName returns full repository name with hostname, like "docker.io/library/ubuntu" + FullName() string + // Hostname returns hostname for the reference, like "docker.io" + Hostname() string + // RemoteName returns the repository component of the full name, like "library/ubuntu" + RemoteName() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Canonical reference is an object with a fully unique +// name including a name with hostname and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name, otherwise an error is +// returned. +// If an error was encountered it is returned, along with a nil Reference. +func ParseNamed(s string) (Named, error) { + named, err := distreference.ParseNamed(s) + if err != nil { + return nil, fmt.Errorf("Error parsing reference: %q is not a valid repository/tag", s) + } + r, err := WithName(named.Name()) + if err != nil { + return nil, err + } + if canonical, isCanonical := named.(distreference.Canonical); isCanonical { + return WithDigest(r, canonical.Digest()) + } + if tagged, isTagged := named.(distreference.NamedTagged); isTagged { + return WithTag(r, tagged.Tag()) + } + return r, nil +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + name, err := normalize(name) + if err != nil { + return nil, err + } + if err := validateName(name); err != nil { + return nil, err + } + r, err := distreference.WithName(name) + if err != nil { + return nil, err + } + return &namedRef{r}, nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + r, err := distreference.WithTag(name, tag) + if err != nil { + return nil, err + } + return &taggedRef{namedRef{r}}, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + r, err := distreference.WithDigest(name, digest) + if err != nil { + return nil, err + } + return &canonicalRef{namedRef{r}}, nil +} + +type namedRef struct { + distreference.Named +} +type taggedRef struct { + namedRef +} +type canonicalRef struct { + namedRef +} + +func (r *namedRef) FullName() string { + hostname, remoteName := splitHostname(r.Name()) + return hostname + "/" + remoteName +} +func (r *namedRef) Hostname() string { + hostname, _ := splitHostname(r.Name()) + return hostname +} +func (r *namedRef) RemoteName() string { + _, remoteName := splitHostname(r.Name()) + return remoteName +} +func (r *taggedRef) Tag() string { + return r.namedRef.Named.(distreference.NamedTagged).Tag() +} +func (r *canonicalRef) Digest() digest.Digest { + return r.namedRef.Named.(distreference.Canonical).Digest() +} + +// WithDefaultTag adds a default tag to a reference if it only has a repo name. +func WithDefaultTag(ref Named) Named { + if IsNameOnly(ref) { + ref, _ = WithTag(ref, DefaultTag) + } + return ref +} + +// IsNameOnly returns true if reference only contains a repo name. +func IsNameOnly(ref Named) bool { + if _, ok := ref.(NamedTagged); ok { + return false + } + if _, ok := ref.(Canonical); ok { + return false + } + return true +} + +// ParseIDOrReference parses string for an image ID or a reference. ID can be +// without a default prefix. +func ParseIDOrReference(idOrRef string) (digest.Digest, Named, error) { + if err := validateID(idOrRef); err == nil { + idOrRef = "sha256:" + idOrRef + } + if dgst, err := digest.ParseDigest(idOrRef); err == nil { + return dgst, nil, nil + } + ref, err := ParseNamed(idOrRef) + return "", ref, err +} + +// splitHostname splits a repository name to hostname and remotename string. +// If no valid hostname is found, the default hostname is used. Repository name +// needs to be already validated before. +func splitHostname(name string) (hostname, remoteName string) { + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { + hostname, remoteName = DefaultHostname, name + } else { + hostname, remoteName = name[:i], name[i+1:] + } + if hostname == LegacyDefaultHostname { + hostname = DefaultHostname + } + if hostname == DefaultHostname && !strings.ContainsRune(remoteName, '/') { + remoteName = DefaultRepoPrefix + remoteName + } + return +} + +// normalize returns a repository name in its normalized form, meaning it +// will not contain default hostname nor library/ prefix for official images. +func normalize(name string) (string, error) { + host, remoteName := splitHostname(name) + if strings.ToLower(remoteName) != remoteName { + return "", errors.New("invalid reference format: repository name must be lowercase") + } + if host == DefaultHostname { + if strings.HasPrefix(remoteName, DefaultRepoPrefix) { + return strings.TrimPrefix(remoteName, DefaultRepoPrefix), nil + } + return remoteName, nil + } + return name, nil +} + +var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) + +func validateID(id string) error { + if ok := validHex.MatchString(id); !ok { + return fmt.Errorf("image ID %q is invalid", id) + } + return nil +} + +func validateName(name string) error { + if err := validateID(name); err == nil { + return fmt.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", name) + } + return nil +} diff --git a/vendor/src/github.com/containers/image/image/docker_list.go b/vendor/src/github.com/containers/image/image/docker_list.go new file mode 100644 index 00000000..bfb3b570 --- /dev/null +++ b/vendor/src/github.com/containers/image/image/docker_list.go @@ -0,0 +1,64 @@ +package image + +import ( + "encoding/json" + "errors" + "fmt" + "runtime" + + "github.com/containers/image/manifest" + "github.com/containers/image/types" + "github.com/docker/distribution/digest" +) + +type platformSpec struct { + Architecture string `json:"architecture"` + OS string `json:"os"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` + Variant string `json:"variant,omitempty"` + Features []string `json:"features,omitempty"` +} + +// A manifestDescriptor references a platform-specific manifest. +type manifestDescriptor struct { + descriptor + Platform platformSpec `json:"platform"` +} + +type manifestList struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + Manifests []manifestDescriptor `json:"manifests"` +} + +func manifestSchema2FromManifestList(src types.ImageSource, manblob []byte) (genericManifest, error) { + list := manifestList{} + if err := json.Unmarshal(manblob, &list); err != nil { + return nil, err + } + var targetManifestDigest digest.Digest + for _, d := range list.Manifests { + if d.Platform.Architecture == runtime.GOARCH && d.Platform.OS == runtime.GOOS { + targetManifestDigest = d.Digest + break + } + } + if targetManifestDigest == "" { + return nil, errors.New("no supported platform found in manifest list") + } + manblob, mt, err := src.GetTargetManifest(targetManifestDigest) + if err != nil { + return nil, err + } + + matches, err := manifest.MatchesDigest(manblob, targetManifestDigest) + if err != nil { + return nil, fmt.Errorf("Error computing manifest digest: %v", err) + } + if !matches { + return nil, fmt.Errorf("Manifest image does not match selected manifest digest %s", targetManifestDigest) + } + + return manifestInstanceFromBlob(src, manblob, mt) +} diff --git a/vendor/src/github.com/containers/image/image/docker_schema1.go b/vendor/src/github.com/containers/image/image/docker_schema1.go new file mode 100644 index 00000000..c2ad9a7d --- /dev/null +++ b/vendor/src/github.com/containers/image/image/docker_schema1.go @@ -0,0 +1,327 @@ +package image + +import ( + "encoding/json" + "errors" + "fmt" + "regexp" + "strings" + "time" + + "github.com/containers/image/docker/reference" + "github.com/containers/image/manifest" + "github.com/containers/image/types" + "github.com/docker/distribution/digest" +) + +var ( + validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) +) + +type fsLayersSchema1 struct { + BlobSum digest.Digest `json:"blobSum"` +} + +type historySchema1 struct { + V1Compatibility string `json:"v1Compatibility"` +} + +// historySchema1 is a string containing this. It is similar to v1Image but not the same, in particular note the ThrowAway field. +type v1Compatibility struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + ContainerConfig struct { + Cmd []string + } `json:"container_config,omitempty"` + Author string `json:"author,omitempty"` + ThrowAway bool `json:"throwaway,omitempty"` +} + +type manifestSchema1 struct { + Name string `json:"name"` + Tag string `json:"tag"` + Architecture string `json:"architecture"` + FSLayers []fsLayersSchema1 `json:"fsLayers"` + History []historySchema1 `json:"history"` + SchemaVersion int `json:"schemaVersion"` +} + +func manifestSchema1FromManifest(manifest []byte) (genericManifest, error) { + mschema1 := &manifestSchema1{} + if err := json.Unmarshal(manifest, mschema1); err != nil { + return nil, err + } + if mschema1.SchemaVersion != 1 { + return nil, fmt.Errorf("unsupported schema version %d", mschema1.SchemaVersion) + } + if len(mschema1.FSLayers) != len(mschema1.History) { + return nil, errors.New("length of history not equal to number of layers") + } + if len(mschema1.FSLayers) == 0 { + return nil, errors.New("no FSLayers in manifest") + } + + if err := fixManifestLayers(mschema1); err != nil { + return nil, err + } + return mschema1, nil +} + +// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data. +func manifestSchema1FromComponents(ref reference.Named, fsLayers []fsLayersSchema1, history []historySchema1, architecture string) genericManifest { + var name, tag string + if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them. + name = ref.RemoteName() + if tagged, ok := ref.(reference.NamedTagged); ok { + tag = tagged.Tag() + } + } + return &manifestSchema1{ + Name: name, + Tag: tag, + Architecture: architecture, + FSLayers: fsLayers, + History: history, + SchemaVersion: 1, + } +} + +func (m *manifestSchema1) serialize() ([]byte, error) { + // docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType. + unsigned, err := json.Marshal(*m) + if err != nil { + return nil, err + } + return manifest.AddDummyV2S1Signature(unsigned) +} + +func (m *manifestSchema1) manifestMIMEType() string { + return manifest.DockerV2Schema1SignedMediaType +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. +func (m *manifestSchema1) ConfigInfo() types.BlobInfo { + return types.BlobInfo{} +} + +// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. +// The result is cached; it is OK to call this however often you need. +func (m *manifestSchema1) ConfigBlob() ([]byte, error) { + return nil, nil +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *manifestSchema1) LayerInfos() []types.BlobInfo { + layers := make([]types.BlobInfo, len(m.FSLayers)) + for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway) + layers[(len(m.FSLayers)-1)-i] = types.BlobInfo{Digest: layer.BlobSum, Size: -1} + } + return layers +} + +func (m *manifestSchema1) imageInspectInfo() (*types.ImageInspectInfo, error) { + v1 := &v1Image{} + if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), v1); err != nil { + return nil, err + } + return &types.ImageInspectInfo{ + Tag: m.Tag, + DockerVersion: v1.DockerVersion, + Created: v1.Created, + Labels: v1.Config.Labels, + Architecture: v1.Architecture, + Os: v1.OS, + }, nil +} + +// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. +// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute +// (most importantly it forces us to download the full layers even if they are already present at the destination). +func (m *manifestSchema1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { + return options.ManifestMIMEType == manifest.DockerV2Schema2MediaType +} + +// UpdatedImage returns a types.Image modified according to options. +// This does not change the state of the original Image object. +func (m *manifestSchema1) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) { + copy := *m + if options.LayerInfos != nil { + // Our LayerInfos includes empty layers (where m.History.V1Compatibility->ThrowAway), so expect them to be included here as well. + if len(copy.FSLayers) != len(options.LayerInfos) { + return nil, fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.FSLayers), len(options.LayerInfos)) + } + for i, info := range options.LayerInfos { + // (docker push) sets up m.History.V1Compatibility->{Id,Parent} based on values of info.Digest, + // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness. + // So, we don't bother recomputing the IDs in m.History.V1Compatibility. + copy.FSLayers[(len(options.LayerInfos)-1)-i].BlobSum = info.Digest + } + } + + switch options.ManifestMIMEType { + case "": // No conversion, OK + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: + // We have 2 MIME types for schema 1, which are basically equivalent (even the un-"Signed" MIME type will be rejected if there isn’t a signature; so, + // handle conversions between them by doing nothing. + case manifest.DockerV2Schema2MediaType: + return copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs) + default: + return nil, fmt.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema1SignedMediaType, options.ManifestMIMEType) + } + + return memoryImageFromManifest(©), nil +} + +// fixManifestLayers, after validating the supplied manifest +// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in manifest.History), +// modifies manifest to only have one entry for each layer ID in manifest.History (deleting the older duplicates, +// both from manifest.History and manifest.FSLayers). +// Note that even after this succeeds, manifest.FSLayers may contain duplicate entries +// (for Dockerfile operations which change the configuration but not the filesystem). +func fixManifestLayers(manifest *manifestSchema1) error { + type imageV1 struct { + ID string + Parent string + } + // Per the specification, we can assume that len(manifest.FSLayers) == len(manifest.History) + imgs := make([]*imageV1, len(manifest.FSLayers)) + for i := range manifest.FSLayers { + img := &imageV1{} + + if err := json.Unmarshal([]byte(manifest.History[i].V1Compatibility), img); err != nil { + return err + } + + imgs[i] = img + if err := validateV1ID(img.ID); err != nil { + return err + } + } + if imgs[len(imgs)-1].Parent != "" { + return errors.New("Invalid parent ID in the base layer of the image") + } + // check general duplicates to error instead of a deadlock + idmap := make(map[string]struct{}) + var lastID string + for _, img := range imgs { + // skip IDs that appear after each other, we handle those later + if _, exists := idmap[img.ID]; img.ID != lastID && exists { + return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) + } + lastID = img.ID + idmap[lastID] = struct{}{} + } + // backwards loop so that we keep the remaining indexes after removing items + for i := len(imgs) - 2; i >= 0; i-- { + if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue + manifest.FSLayers = append(manifest.FSLayers[:i], manifest.FSLayers[i+1:]...) + manifest.History = append(manifest.History[:i], manifest.History[i+1:]...) + } else if imgs[i].Parent != imgs[i+1].ID { + return fmt.Errorf("Invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent) + } + } + return nil +} + +func validateV1ID(id string) error { + if ok := validHex.MatchString(id); !ok { + return fmt.Errorf("image ID %q is invalid", id) + } + return nil +} + +// Based on github.com/docker/docker/distribution/pull_v2.go +func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.BlobInfo, layerDiffIDs []digest.Digest) (types.Image, error) { + if len(m.History) == 0 { + // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing. + return nil, fmt.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType) + } + if len(m.History) != len(m.FSLayers) { + return nil, fmt.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.History), len(m.FSLayers)) + } + if len(uploadedLayerInfos) != len(m.FSLayers) { + return nil, fmt.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.FSLayers)) + } + if len(layerDiffIDs) != len(m.FSLayers) { + return nil, fmt.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.FSLayers)) + } + + rootFS := rootFS{ + Type: "layers", + DiffIDs: []digest.Digest{}, + BaseLayer: "", + } + var layers []descriptor + history := make([]imageHistory, len(m.History)) + for v1Index := len(m.History) - 1; v1Index >= 0; v1Index-- { + v2Index := (len(m.History) - 1) - v1Index + + var v1compat v1Compatibility + if err := json.Unmarshal([]byte(m.History[v1Index].V1Compatibility), &v1compat); err != nil { + return nil, fmt.Errorf("Error decoding history entry %d: %v", v1Index, err) + } + history[v2Index] = imageHistory{ + Created: v1compat.Created, + Author: v1compat.Author, + CreatedBy: strings.Join(v1compat.ContainerConfig.Cmd, " "), + Comment: v1compat.Comment, + EmptyLayer: v1compat.ThrowAway, + } + + if !v1compat.ThrowAway { + layers = append(layers, descriptor{ + MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", + Size: uploadedLayerInfos[v2Index].Size, + Digest: m.FSLayers[v1Index].BlobSum, + }) + rootFS.DiffIDs = append(rootFS.DiffIDs, layerDiffIDs[v2Index]) + } + } + configJSON, err := configJSONFromV1Config([]byte(m.History[0].V1Compatibility), rootFS, history) + if err != nil { + return nil, err + } + configDescriptor := descriptor{ + MediaType: "application/vnd.docker.container.image.v1+json", + Size: int64(len(configJSON)), + Digest: digest.FromBytes(configJSON), + } + + m2 := manifestSchema2FromComponents(configDescriptor, nil, configJSON, layers) + return memoryImageFromManifest(m2), nil +} + +func configJSONFromV1Config(v1ConfigJSON []byte, rootFS rootFS, history []imageHistory) ([]byte, error) { + // github.com/docker/docker/image/v1/imagev1.go:MakeConfigFromV1Config unmarshals and re-marshals the input if docker_version is < 1.8.3 to remove blank fields; + // we don't do that here. FIXME? Should we? AFAICT it would only affect the digest value of the schema2 manifest, and we don't particularly need that to be + // a consistently reproducible value. + + // Preserve everything we don't specifically know about. + // (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.) + rawContents := map[string]*json.RawMessage{} + if err := json.Unmarshal(v1ConfigJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?! + return nil, err + } + + delete(rawContents, "id") + delete(rawContents, "parent") + delete(rawContents, "Size") + delete(rawContents, "parent_id") + delete(rawContents, "layer_id") + delete(rawContents, "throwaway") + + updates := map[string]interface{}{"rootfs": rootFS, "history": history} + for field, value := range updates { + encoded, err := json.Marshal(value) + if err != nil { + return nil, err + } + rawContents[field] = (*json.RawMessage)(&encoded) + } + return json.Marshal(rawContents) +} diff --git a/vendor/src/github.com/containers/image/image/docker_schema2.go b/vendor/src/github.com/containers/image/image/docker_schema2.go new file mode 100644 index 00000000..812d3369 --- /dev/null +++ b/vendor/src/github.com/containers/image/image/docker_schema2.go @@ -0,0 +1,299 @@ +package image + +import ( + "bytes" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/containers/image/manifest" + "github.com/containers/image/types" + "github.com/docker/distribution/digest" +) + +// gzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes) +// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is +// a non-zero embedded timestamp; we could zero that, but that would just waste storage space +// in registries, so let’s use the same values. +var gzippedEmptyLayer = []byte{ + 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88, + 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0, +} + +// gzippedEmptyLayerDigest is a digest of gzippedEmptyLayer +const gzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") + +type descriptor struct { + MediaType string `json:"mediaType"` + Size int64 `json:"size"` + Digest digest.Digest `json:"digest"` + URLs []string `json:"urls,omitempty"` +} + +type manifestSchema2 struct { + src types.ImageSource // May be nil if configBlob is not nil + configBlob []byte // If set, corresponds to contents of ConfigDescriptor. + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + ConfigDescriptor descriptor `json:"config"` + LayersDescriptors []descriptor `json:"layers"` +} + +func manifestSchema2FromManifest(src types.ImageSource, manifest []byte) (genericManifest, error) { + v2s2 := manifestSchema2{src: src} + if err := json.Unmarshal(manifest, &v2s2); err != nil { + return nil, err + } + return &v2s2, nil +} + +// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data: +func manifestSchema2FromComponents(config descriptor, src types.ImageSource, configBlob []byte, layers []descriptor) genericManifest { + return &manifestSchema2{ + src: src, + configBlob: configBlob, + SchemaVersion: 2, + MediaType: manifest.DockerV2Schema2MediaType, + ConfigDescriptor: config, + LayersDescriptors: layers, + } +} + +func (m *manifestSchema2) serialize() ([]byte, error) { + return json.Marshal(*m) +} + +func (m *manifestSchema2) manifestMIMEType() string { + return m.MediaType +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. +func (m *manifestSchema2) ConfigInfo() types.BlobInfo { + return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size} +} + +// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. +// The result is cached; it is OK to call this however often you need. +func (m *manifestSchema2) ConfigBlob() ([]byte, error) { + if m.configBlob == nil { + if m.src == nil { + return nil, fmt.Errorf("Internal error: neither src nor configBlob set in manifestSchema2") + } + stream, _, err := m.src.GetBlob(types.BlobInfo{ + Digest: m.ConfigDescriptor.Digest, + Size: m.ConfigDescriptor.Size, + URLs: m.ConfigDescriptor.URLs, + }) + if err != nil { + return nil, err + } + defer stream.Close() + blob, err := ioutil.ReadAll(stream) + if err != nil { + return nil, err + } + computedDigest := digest.FromBytes(blob) + if computedDigest != m.ConfigDescriptor.Digest { + return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest) + } + m.configBlob = blob + } + return m.configBlob, nil +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *manifestSchema2) LayerInfos() []types.BlobInfo { + blobs := []types.BlobInfo{} + for _, layer := range m.LayersDescriptors { + blobs = append(blobs, types.BlobInfo{ + Digest: layer.Digest, + Size: layer.Size, + URLs: layer.URLs, + }) + } + return blobs +} + +func (m *manifestSchema2) imageInspectInfo() (*types.ImageInspectInfo, error) { + config, err := m.ConfigBlob() + if err != nil { + return nil, err + } + v1 := &v1Image{} + if err := json.Unmarshal(config, v1); err != nil { + return nil, err + } + return &types.ImageInspectInfo{ + DockerVersion: v1.DockerVersion, + Created: v1.Created, + Labels: v1.Config.Labels, + Architecture: v1.Architecture, + Os: v1.OS, + }, nil +} + +// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. +// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute +// (most importantly it forces us to download the full layers even if they are already present at the destination). +func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { + return false +} + +// UpdatedImage returns a types.Image modified according to options. +// This does not change the state of the original Image object. +func (m *manifestSchema2) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) { + copy := *m // NOTE: This is not a deep copy, it still shares slices etc. + if options.LayerInfos != nil { + if len(copy.LayersDescriptors) != len(options.LayerInfos) { + return nil, fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos)) + } + copy.LayersDescriptors = make([]descriptor, len(options.LayerInfos)) + for i, info := range options.LayerInfos { + copy.LayersDescriptors[i].Digest = info.Digest + copy.LayersDescriptors[i].Size = info.Size + copy.LayersDescriptors[i].URLs = info.URLs + } + } + + switch options.ManifestMIMEType { + case "": // No conversion, OK + case manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType: + return copy.convertToManifestSchema1(options.InformationOnly.Destination) + default: + return nil, fmt.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema2MediaType, options.ManifestMIMEType) + } + + return memoryImageFromManifest(©), nil +} + +// Based on docker/distribution/manifest/schema1/config_builder.go +func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination) (types.Image, error) { + configBytes, err := m.ConfigBlob() + if err != nil { + return nil, err + } + imageConfig := &image{} + if err := json.Unmarshal(configBytes, imageConfig); err != nil { + return nil, err + } + + // Build fsLayers and History, discarding all configs. We will patch the top-level config in later. + fsLayers := make([]fsLayersSchema1, len(imageConfig.History)) + history := make([]historySchema1, len(imageConfig.History)) + nonemptyLayerIndex := 0 + var parentV1ID string // Set in the loop + v1ID := "" + haveGzippedEmptyLayer := false + if len(imageConfig.History) == 0 { + // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing. + return nil, fmt.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType) + } + for v2Index, historyEntry := range imageConfig.History { + parentV1ID = v1ID + v1Index := len(imageConfig.History) - 1 - v2Index + + var blobDigest digest.Digest + if historyEntry.EmptyLayer { + if !haveGzippedEmptyLayer { + logrus.Debugf("Uploading empty layer during conversion to schema 1") + info, err := dest.PutBlob(bytes.NewReader(gzippedEmptyLayer), types.BlobInfo{Digest: gzippedEmptyLayerDigest, Size: int64(len(gzippedEmptyLayer))}) + if err != nil { + return nil, fmt.Errorf("Error uploading empty layer: %v", err) + } + if info.Digest != gzippedEmptyLayerDigest { + return nil, fmt.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, gzippedEmptyLayerDigest) + } + haveGzippedEmptyLayer = true + } + blobDigest = gzippedEmptyLayerDigest + } else { + if nonemptyLayerIndex >= len(m.LayersDescriptors) { + return nil, fmt.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.LayersDescriptors)) + } + blobDigest = m.LayersDescriptors[nonemptyLayerIndex].Digest + nonemptyLayerIndex++ + } + + // AFAICT pull ignores these ID values, at least nowadays, so we could use anything unique, including a simple counter. Use what Docker uses for cargo-cult consistency. + v, err := v1IDFromBlobDigestAndComponents(blobDigest, parentV1ID) + if err != nil { + return nil, err + } + v1ID = v + + fakeImage := v1Compatibility{ + ID: v1ID, + Parent: parentV1ID, + Comment: historyEntry.Comment, + Created: historyEntry.Created, + Author: historyEntry.Author, + ThrowAway: historyEntry.EmptyLayer, + } + fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy} + v1CompatibilityBytes, err := json.Marshal(&fakeImage) + if err != nil { + return nil, fmt.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage) + } + + fsLayers[v1Index] = fsLayersSchema1{BlobSum: blobDigest} + history[v1Index] = historySchema1{V1Compatibility: string(v1CompatibilityBytes)} + // Note that parentV1ID of the top layer is preserved when exiting this loop + } + + // Now patch in real configuration for the top layer (v1Index == 0) + v1ID, err = v1IDFromBlobDigestAndComponents(fsLayers[0].BlobSum, parentV1ID, string(configBytes)) // See above WRT v1ID value generation and cargo-cult consistency. + if err != nil { + return nil, err + } + v1Config, err := v1ConfigFromConfigJSON(configBytes, v1ID, parentV1ID, imageConfig.History[len(imageConfig.History)-1].EmptyLayer) + if err != nil { + return nil, err + } + history[0].V1Compatibility = string(v1Config) + + m1 := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture) + return memoryImageFromManifest(m1), nil +} + +func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string) (string, error) { + if err := blobDigest.Validate(); err != nil { + return "", err + } + parts := append([]string{blobDigest.Hex()}, others...) + v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " "))) + return hex.EncodeToString(v1IDHash[:]), nil +} + +func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { + // Preserve everything we don't specifically know about. + // (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.) + rawContents := map[string]*json.RawMessage{} + if err := json.Unmarshal(configJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?! + return nil, err + } + delete(rawContents, "rootfs") + delete(rawContents, "history") + + updates := map[string]interface{}{"id": v1ID} + if parentV1ID != "" { + updates["parent"] = parentV1ID + } + if throwaway { + updates["throwaway"] = throwaway + } + for field, value := range updates { + encoded, err := json.Marshal(value) + if err != nil { + return nil, err + } + rawContents[field] = (*json.RawMessage)(&encoded) + } + return json.Marshal(rawContents) +} diff --git a/vendor/src/github.com/containers/image/image/image.go b/vendor/src/github.com/containers/image/image/image.go deleted file mode 100644 index ad6fbf32..00000000 --- a/vendor/src/github.com/containers/image/image/image.go +++ /dev/null @@ -1,365 +0,0 @@ -// Package image consolidates knowledge about various container image formats -// (as opposed to image storage mechanisms, which are handled by types.ImageSource) -// and exposes all of them using an unified interface. -package image - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "regexp" - "time" - - "github.com/containers/image/manifest" - "github.com/containers/image/types" -) - -var ( - validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) -) - -// genericImage is a general set of utilities for working with container images, -// whatever is their underlying location (i.e. dockerImageSource-independent). -// Note the existence of skopeo/docker.Image: some instances of a `types.Image` -// may not be a `genericImage` directly. However, most users of `types.Image` -// do not care, and those who care about `skopeo/docker.Image` know they do. -type genericImage struct { - src types.ImageSource - // private cache for Manifest(); nil if not yet known. - cachedManifest []byte - // private cache for the manifest media type w/o having to guess it - // this may be the empty string in case the MIME Type wasn't guessed correctly - // this field is valid only if cachedManifest is not nil - cachedManifestMIMEType string - // private cache for Signatures(); nil if not yet known. - cachedSignatures [][]byte -} - -// FromSource returns a types.Image implementation for source. -// The caller must call .Close() on the returned Image. -// -// FromSource “takes ownership” of the input ImageSource and will call src.Close() -// when the image is closed. (This does not prevent callers from using both the -// Image and ImageSource objects simultaneously, but it means that they only need to -// the Image.) -func FromSource(src types.ImageSource) types.Image { - return &genericImage{src: src} -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (i *genericImage) Reference() types.ImageReference { - return i.src.Reference() -} - -// Close removes resources associated with an initialized Image, if any. -func (i *genericImage) Close() { - i.src.Close() -} - -// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. -// NOTE: It is essential for signature verification that Manifest returns the manifest from which BlobDigests is computed. -func (i *genericImage) Manifest() ([]byte, string, error) { - if i.cachedManifest == nil { - m, mt, err := i.src.GetManifest() - if err != nil { - return nil, "", err - } - i.cachedManifest = m - if mt == "" || mt == "text/plain" { - // Crane registries can return "text/plain". - // This makes no real sense, but it happens - // because requests for manifests are - // redirected to a content distribution - // network which is configured that way. - mt = manifest.GuessMIMEType(i.cachedManifest) - } - i.cachedManifestMIMEType = mt - } - return i.cachedManifest, i.cachedManifestMIMEType, nil -} - -// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. -func (i *genericImage) Signatures() ([][]byte, error) { - if i.cachedSignatures == nil { - sigs, err := i.src.GetSignatures() - if err != nil { - return nil, err - } - i.cachedSignatures = sigs - } - return i.cachedSignatures, nil -} - -func (i *genericImage) Inspect() (*types.ImageInspectInfo, error) { - // TODO(runcom): unused version param for now, default to docker v2-1 - m, err := i.getParsedManifest() - if err != nil { - return nil, err - } - return m.ImageInspectInfo() -} - -type config struct { - Labels map[string]string -} - -type v1Image struct { - // Config is the configuration of the container received from the client - Config *config `json:"config,omitempty"` - // DockerVersion specifies version on which image is built - DockerVersion string `json:"docker_version,omitempty"` - // Created timestamp when image was created - Created time.Time `json:"created"` - // Architecture is the hardware that the image is build and runs on - Architecture string `json:"architecture,omitempty"` - // OS is the operating system used to build and run the image - OS string `json:"os,omitempty"` -} - -// will support v1 one day... -type genericManifest interface { - Config() ([]byte, error) - LayerDigests() []string - BlobDigests() []string - ImageInspectInfo() (*types.ImageInspectInfo, error) -} - -type fsLayersSchema1 struct { - BlobSum string `json:"blobSum"` -} - -// compile-time check that manifestSchema1 implements genericManifest -var _ genericManifest = (*manifestSchema1)(nil) - -type manifestSchema1 struct { - Name string - Tag string - FSLayers []fsLayersSchema1 `json:"fsLayers"` - History []struct { - V1Compatibility string `json:"v1Compatibility"` - } `json:"history"` - // TODO(runcom) verify the downloaded manifest - //Signature []byte `json:"signature"` -} - -func (m *manifestSchema1) LayerDigests() []string { - layers := make([]string, len(m.FSLayers)) - for i, layer := range m.FSLayers { - layers[i] = layer.BlobSum - } - return layers -} - -func (m *manifestSchema1) BlobDigests() []string { - return m.LayerDigests() -} - -func (m *manifestSchema1) Config() ([]byte, error) { - return []byte(m.History[0].V1Compatibility), nil -} - -func (m *manifestSchema1) ImageInspectInfo() (*types.ImageInspectInfo, error) { - v1 := &v1Image{} - config, err := m.Config() - if err != nil { - return nil, err - } - if err := json.Unmarshal(config, v1); err != nil { - return nil, err - } - return &types.ImageInspectInfo{ - Tag: m.Tag, - DockerVersion: v1.DockerVersion, - Created: v1.Created, - Labels: v1.Config.Labels, - Architecture: v1.Architecture, - Os: v1.OS, - Layers: m.LayerDigests(), - }, nil -} - -// compile-time check that manifestSchema2 implements genericManifest -var _ genericManifest = (*manifestSchema2)(nil) - -type manifestSchema2 struct { - src types.ImageSource - ConfigDescriptor descriptor `json:"config"` - LayersDescriptors []descriptor `json:"layers"` -} - -type descriptor struct { - MediaType string `json:"mediaType"` - Size int64 `json:"size"` - Digest string `json:"digest"` -} - -func (m *manifestSchema2) LayerDigests() []string { - blobs := []string{} - for _, layer := range m.LayersDescriptors { - blobs = append(blobs, layer.Digest) - } - return blobs -} - -func (m *manifestSchema2) BlobDigests() []string { - blobs := m.LayerDigests() - blobs = append(blobs, m.ConfigDescriptor.Digest) - return blobs -} - -func (m *manifestSchema2) Config() ([]byte, error) { - rawConfig, _, err := m.src.GetBlob(m.ConfigDescriptor.Digest) - if err != nil { - return nil, err - } - config, err := ioutil.ReadAll(rawConfig) - rawConfig.Close() - return config, err -} - -func (m *manifestSchema2) ImageInspectInfo() (*types.ImageInspectInfo, error) { - config, err := m.Config() - if err != nil { - return nil, err - } - v1 := &v1Image{} - if err := json.Unmarshal(config, v1); err != nil { - return nil, err - } - return &types.ImageInspectInfo{ - DockerVersion: v1.DockerVersion, - Created: v1.Created, - Labels: v1.Config.Labels, - Architecture: v1.Architecture, - Os: v1.OS, - Layers: m.LayerDigests(), - }, nil -} - -// getParsedManifest parses the manifest into a data structure, cleans it up, and returns it. -// NOTE: The manifest may have been modified in the process; DO NOT reserialize and store the return value -// if you want to preserve the original manifest; use the blob returned by Manifest() directly. -// NOTE: It is essential for signature verification that the object is computed from the same manifest which is returned by Manifest(). -func (i *genericImage) getParsedManifest() (genericManifest, error) { - manblob, mt, err := i.Manifest() - if err != nil { - return nil, err - } - switch mt { - // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md . - // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might - // need to happen within the ImageSource. - case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, "application/json": - mschema1 := &manifestSchema1{} - if err := json.Unmarshal(manblob, mschema1); err != nil { - return nil, err - } - if err := fixManifestLayers(mschema1); err != nil { - return nil, err - } - // TODO(runcom): verify manifest schema 1, 2 etc - //if len(m.FSLayers) != len(m.History) { - //return nil, fmt.Errorf("length of history not equal to number of layers for %q", ref.String()) - //} - //if len(m.FSLayers) == 0 { - //return nil, fmt.Errorf("no FSLayers in manifest for %q", ref.String()) - //} - return mschema1, nil - case manifest.DockerV2Schema2MediaType: - v2s2 := manifestSchema2{src: i.src} - if err := json.Unmarshal(manblob, &v2s2); err != nil { - return nil, err - } - return &v2s2, nil - case "": - return nil, errors.New("could not guess manifest media type") - default: - return nil, fmt.Errorf("unsupported manifest media type %s", mt) - } -} - -// uniqueBlobDigests returns a list of blob digests referenced from a manifest. -// The list will not contain duplicates; it is not intended to correspond to the "history" or "parent chain" of a Docker image. -func uniqueBlobDigests(m genericManifest) []string { - var res []string - seen := make(map[string]struct{}) - for _, digest := range m.BlobDigests() { - if _, ok := seen[digest]; ok { - continue - } - seen[digest] = struct{}{} - res = append(res, digest) - } - return res -} - -// BlobDigests returns a list of blob digests referenced by this image. -// The list will not contain duplicates; it is not intended to correspond to the "history" or "parent chain" of a Docker image. -// NOTE: It is essential for signature verification that BlobDigests is computed from the same manifest which is returned by Manifest(). -func (i *genericImage) BlobDigests() ([]string, error) { - m, err := i.getParsedManifest() - if err != nil { - return nil, err - } - return uniqueBlobDigests(m), nil -} - -// fixManifestLayers, after validating the supplied manifest -// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in manifest.History), -// modifies manifest to only have one entry for each layer ID in manifest.History (deleting the older duplicates, -// both from manifest.History and manifest.FSLayers). -// Note that even after this succeeds, manifest.FSLayers may contain duplicate entries -// (for Dockerfile operations which change the configuration but not the filesystem). -func fixManifestLayers(manifest *manifestSchema1) error { - type imageV1 struct { - ID string - Parent string - } - // Per the specification, we can assume that len(manifest.FSLayers) == len(manifest.History) - imgs := make([]*imageV1, len(manifest.FSLayers)) - for i := range manifest.FSLayers { - img := &imageV1{} - - if err := json.Unmarshal([]byte(manifest.History[i].V1Compatibility), img); err != nil { - return err - } - - imgs[i] = img - if err := validateV1ID(img.ID); err != nil { - return err - } - } - if imgs[len(imgs)-1].Parent != "" { - return errors.New("Invalid parent ID in the base layer of the image.") - } - // check general duplicates to error instead of a deadlock - idmap := make(map[string]struct{}) - var lastID string - for _, img := range imgs { - // skip IDs that appear after each other, we handle those later - if _, exists := idmap[img.ID]; img.ID != lastID && exists { - return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) - } - lastID = img.ID - idmap[lastID] = struct{}{} - } - // backwards loop so that we keep the remaining indexes after removing items - for i := len(imgs) - 2; i >= 0; i-- { - if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue - manifest.FSLayers = append(manifest.FSLayers[:i], manifest.FSLayers[i+1:]...) - manifest.History = append(manifest.History[:i], manifest.History[i+1:]...) - } else if imgs[i].Parent != imgs[i+1].ID { - return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent) - } - } - return nil -} - -func validateV1ID(id string) error { - if ok := validHex.MatchString(id); !ok { - return fmt.Errorf("image ID %q is invalid", id) - } - return nil -} diff --git a/vendor/src/github.com/containers/image/image/manifest.go b/vendor/src/github.com/containers/image/image/manifest.go new file mode 100644 index 00000000..df3e23f4 --- /dev/null +++ b/vendor/src/github.com/containers/image/image/manifest.go @@ -0,0 +1,121 @@ +package image + +import ( + "time" + + "github.com/docker/distribution/digest" + "github.com/docker/engine-api/types/strslice" + + "github.com/containers/image/manifest" + "github.com/containers/image/types" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +type config struct { + Cmd strslice.StrSlice + Labels map[string]string +} + +type v1Image struct { + ID string `json:"id,omitempty"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + ContainerConfig *config `json:"container_config,omitempty"` + DockerVersion string `json:"docker_version,omitempty"` + Author string `json:"author,omitempty"` + // Config is the configuration of the container received from the client + Config *config `json:"config,omitempty"` + // Architecture is the hardware that the image is build and runs on + Architecture string `json:"architecture,omitempty"` + // OS is the operating system used to build and run the image + OS string `json:"os,omitempty"` +} + +type image struct { + v1Image + History []imageHistory `json:"history,omitempty"` + RootFS *rootFS `json:"rootfs,omitempty"` +} + +type imageHistory struct { + Created time.Time `json:"created"` + Author string `json:"author,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + Comment string `json:"comment,omitempty"` + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +type rootFS struct { + Type string `json:"type"` + DiffIDs []digest.Digest `json:"diff_ids,omitempty"` + BaseLayer string `json:"base_layer,omitempty"` +} + +// genericManifest is an interface for parsing, modifying image manifests and related data. +// Note that the public methods are intended to be a subset of types.Image +// so that embedding a genericManifest into structs works. +// will support v1 one day... +type genericManifest interface { + serialize() ([]byte, error) + manifestMIMEType() string + // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. + // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. + ConfigInfo() types.BlobInfo + // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. + // The result is cached; it is OK to call this however often you need. + ConfigBlob() ([]byte, error) + // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). + // The Digest field is guaranteed to be provided; Size may be -1. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfos() []types.BlobInfo + imageInspectInfo() (*types.ImageInspectInfo, error) // To be called by inspectManifest + // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. + // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute + // (most importantly it forces us to download the full layers even if they are already present at the destination). + UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool + // UpdatedImage returns a types.Image modified according to options. + // This does not change the state of the original Image object. + UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) +} + +func manifestInstanceFromBlob(src types.ImageSource, manblob []byte, mt string) (genericManifest, error) { + switch mt { + // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md . + // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might + // need to happen within the ImageSource. + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, "application/json": + return manifestSchema1FromManifest(manblob) + case imgspecv1.MediaTypeImageManifest: + return manifestOCI1FromManifest(src, manblob) + case manifest.DockerV2Schema2MediaType: + return manifestSchema2FromManifest(src, manblob) + case manifest.DockerV2ListMediaType: + return manifestSchema2FromManifestList(src, manblob) + default: + // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time + // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108 + // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50 + // + // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag. + // This makes no real sense, but it happens + // because requests for manifests are + // redirected to a content distribution + // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442 + return manifestSchema1FromManifest(manblob) + } +} + +// inspectManifest is an implementation of types.Image.Inspect +func inspectManifest(m genericManifest) (*types.ImageInspectInfo, error) { + info, err := m.imageInspectInfo() + if err != nil { + return nil, err + } + layers := m.LayerInfos() + info.Layers = make([]string, len(layers)) + for i, layer := range layers { + info.Layers[i] = layer.Digest.String() + } + return info, nil +} diff --git a/vendor/src/github.com/containers/image/image/memory.go b/vendor/src/github.com/containers/image/image/memory.go new file mode 100644 index 00000000..9404db82 --- /dev/null +++ b/vendor/src/github.com/containers/image/image/memory.go @@ -0,0 +1,74 @@ +package image + +import ( + "errors" + + "github.com/containers/image/types" +) + +// memoryImage is a mostly-implementation of types.Image assembled from data +// created in memory, used primarily as a return value of types.Image.UpdatedImage +// as a way to carry various structured information in a type-safe and easy-to-use way. +// Note that this _only_ carries the immediate metadata; it is _not_ a stand-alone +// collection of all related information, e.g. there is no way to get layer blobs +// from a memoryImage. +type memoryImage struct { + genericManifest + serializedManifest []byte // A private cache for Manifest() +} + +func memoryImageFromManifest(m genericManifest) types.Image { + return &memoryImage{ + genericManifest: m, + serializedManifest: nil, + } +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (i *memoryImage) Reference() types.ImageReference { + // It would really be inappropriate to return the ImageReference of the image this was based on. + return nil +} + +// Close removes resources associated with an initialized UnparsedImage, if any. +func (i *memoryImage) Close() { +} + +// Size returns the size of the image as stored, if known, or -1 if not. +func (i *memoryImage) Size() (int64, error) { + s, err := i.serialize() + if err != nil { + return -1, err + } + return int64(len(s)), nil +} + +// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. +func (i *memoryImage) Manifest() ([]byte, string, error) { + if i.serializedManifest == nil { + m, err := i.genericManifest.serialize() + if err != nil { + return nil, "", err + } + i.serializedManifest = m + } + return i.serializedManifest, i.genericManifest.manifestMIMEType(), nil +} + +// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. +func (i *memoryImage) Signatures() ([][]byte, error) { + // Modifying an image invalidates signatures; a caller asking the updated image for signatures + // is probably confused. + return nil, errors.New("Internal error: Image.Signatures() is not supported for images modified in memory") +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (i *memoryImage) Inspect() (*types.ImageInspectInfo, error) { + return inspectManifest(i.genericManifest) +} + +// IsMultiImage returns true if the image's manifest is a list of images, false otherwise. +func (i *memoryImage) IsMultiImage() bool { + return false +} diff --git a/vendor/src/github.com/containers/image/image/oci.go b/vendor/src/github.com/containers/image/image/oci.go new file mode 100644 index 00000000..6ca627a9 --- /dev/null +++ b/vendor/src/github.com/containers/image/image/oci.go @@ -0,0 +1,167 @@ +package image + +import ( + "encoding/json" + "fmt" + "io/ioutil" + + "github.com/containers/image/manifest" + "github.com/containers/image/types" + "github.com/docker/distribution/digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +type manifestOCI1 struct { + src types.ImageSource // May be nil if configBlob is not nil + configBlob []byte // If set, corresponds to contents of ConfigDescriptor. + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + ConfigDescriptor descriptor `json:"config"` + LayersDescriptors []descriptor `json:"layers"` +} + +func manifestOCI1FromManifest(src types.ImageSource, manifest []byte) (genericManifest, error) { + oci := manifestOCI1{src: src} + if err := json.Unmarshal(manifest, &oci); err != nil { + return nil, err + } + return &oci, nil +} + +// manifestOCI1FromComponents builds a new manifestOCI1 from the supplied data: +func manifestOCI1FromComponents(config descriptor, configBlob []byte, layers []descriptor) genericManifest { + return &manifestOCI1{ + src: nil, + configBlob: configBlob, + SchemaVersion: 2, + MediaType: imgspecv1.MediaTypeImageManifest, + ConfigDescriptor: config, + LayersDescriptors: layers, + } +} + +func (m *manifestOCI1) serialize() ([]byte, error) { + return json.Marshal(*m) +} + +func (m *manifestOCI1) manifestMIMEType() string { + return m.MediaType +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. +func (m *manifestOCI1) ConfigInfo() types.BlobInfo { + return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size} +} + +// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. +// The result is cached; it is OK to call this however often you need. +func (m *manifestOCI1) ConfigBlob() ([]byte, error) { + if m.configBlob == nil { + if m.src == nil { + return nil, fmt.Errorf("Internal error: neither src nor configBlob set in manifestOCI1") + } + stream, _, err := m.src.GetBlob(types.BlobInfo{ + Digest: m.ConfigDescriptor.Digest, + Size: m.ConfigDescriptor.Size, + URLs: m.ConfigDescriptor.URLs, + }) + if err != nil { + return nil, err + } + defer stream.Close() + blob, err := ioutil.ReadAll(stream) + if err != nil { + return nil, err + } + computedDigest := digest.FromBytes(blob) + if computedDigest != m.ConfigDescriptor.Digest { + return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest) + } + m.configBlob = blob + } + return m.configBlob, nil +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *manifestOCI1) LayerInfos() []types.BlobInfo { + blobs := []types.BlobInfo{} + for _, layer := range m.LayersDescriptors { + blobs = append(blobs, types.BlobInfo{Digest: layer.Digest, Size: layer.Size}) + } + return blobs +} + +func (m *manifestOCI1) imageInspectInfo() (*types.ImageInspectInfo, error) { + config, err := m.ConfigBlob() + if err != nil { + return nil, err + } + v1 := &v1Image{} + if err := json.Unmarshal(config, v1); err != nil { + return nil, err + } + return &types.ImageInspectInfo{ + DockerVersion: v1.DockerVersion, + Created: v1.Created, + Labels: v1.Config.Labels, + Architecture: v1.Architecture, + Os: v1.OS, + }, nil +} + +// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. +// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute +// (most importantly it forces us to download the full layers even if they are already present at the destination). +func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { + return false +} + +// UpdatedImage returns a types.Image modified according to options. +// This does not change the state of the original Image object. +func (m *manifestOCI1) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) { + copy := *m // NOTE: This is not a deep copy, it still shares slices etc. + if options.LayerInfos != nil { + if len(copy.LayersDescriptors) != len(options.LayerInfos) { + return nil, fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos)) + } + copy.LayersDescriptors = make([]descriptor, len(options.LayerInfos)) + for i, info := range options.LayerInfos { + copy.LayersDescriptors[i].Digest = info.Digest + copy.LayersDescriptors[i].Size = info.Size + } + } + + switch options.ManifestMIMEType { + case "": // No conversion, OK + case manifest.DockerV2Schema2MediaType: + return copy.convertToManifestSchema2() + default: + return nil, fmt.Errorf("Conversion of image manifest from %s to %s is not implemented", imgspecv1.MediaTypeImageManifest, options.ManifestMIMEType) + } + + return memoryImageFromManifest(©), nil +} + +func (m *manifestOCI1) convertToManifestSchema2() (types.Image, error) { + // Create a copy of the descriptor. + config := m.ConfigDescriptor + + // The only difference between OCI and DockerSchema2 is the mediatypes. The + // media type of the manifest is handled by manifestSchema2FromComponents. + config.MediaType = manifest.DockerV2Schema2ConfigMediaType + + layers := make([]descriptor, len(m.LayersDescriptors)) + for idx := range layers { + layers[idx] = m.LayersDescriptors[idx] + layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType + } + + // Rather than copying the ConfigBlob now, we just pass m.src to the + // translated manifest, since the only difference is the mediatype of + // descriptors there is no change to any blob stored in m.src. + m1 := manifestSchema2FromComponents(config, m.src, nil, layers) + return memoryImageFromManifest(m1), nil +} diff --git a/vendor/src/github.com/containers/image/image/sourced.go b/vendor/src/github.com/containers/image/image/sourced.go new file mode 100644 index 00000000..ef35b3c3 --- /dev/null +++ b/vendor/src/github.com/containers/image/image/sourced.go @@ -0,0 +1,90 @@ +// Package image consolidates knowledge about various container image formats +// (as opposed to image storage mechanisms, which are handled by types.ImageSource) +// and exposes all of them using an unified interface. +package image + +import ( + "github.com/containers/image/manifest" + "github.com/containers/image/types" +) + +// FromSource returns a types.Image implementation for source. +// The caller must call .Close() on the returned Image. +// +// FromSource “takes ownership” of the input ImageSource and will call src.Close() +// when the image is closed. (This does not prevent callers from using both the +// Image and ImageSource objects simultaneously, but it means that they only need to +// the Image.) +// +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function. +func FromSource(src types.ImageSource) (types.Image, error) { + return FromUnparsedImage(UnparsedFromSource(src)) +} + +// sourcedImage is a general set of utilities for working with container images, +// whatever is their underlying location (i.e. dockerImageSource-independent). +// Note the existence of skopeo/docker.Image: some instances of a `types.Image` +// may not be a `sourcedImage` directly. However, most users of `types.Image` +// do not care, and those who care about `skopeo/docker.Image` know they do. +type sourcedImage struct { + *UnparsedImage + manifestBlob []byte + manifestMIMEType string + // genericManifest contains data corresponding to manifestBlob. + // NOTE: The manifest may have been modified in the process; DO NOT reserialize and store genericManifest + // if you want to preserve the original manifest; use manifestBlob directly. + genericManifest +} + +// FromUnparsedImage returns a types.Image implementation for unparsed. +// The caller must call .Close() on the returned Image. +// +// FromSource “takes ownership” of the input UnparsedImage and will call uparsed.Close() +// when the image is closed. (This does not prevent callers from using both the +// UnparsedImage and ImageSource objects simultaneously, but it means that they only need to +// keep a reference to the Image.) +func FromUnparsedImage(unparsed *UnparsedImage) (types.Image, error) { + // Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage: + // we want to be able to use unparsed.src. We could make that an explicit interface, but, well, + // this is the only UnparsedImage implementation around, anyway. + + // Also, we do not explicitly implement types.Image.Close; we let the implementation fall through to + // unparsed.Close. + + // NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest(). + manifestBlob, manifestMIMEType, err := unparsed.Manifest() + if err != nil { + return nil, err + } + + parsedManifest, err := manifestInstanceFromBlob(unparsed.src, manifestBlob, manifestMIMEType) + if err != nil { + return nil, err + } + + return &sourcedImage{ + UnparsedImage: unparsed, + manifestBlob: manifestBlob, + manifestMIMEType: manifestMIMEType, + genericManifest: parsedManifest, + }, nil +} + +// Size returns the size of the image as stored, if it's known, or -1 if it isn't. +func (i *sourcedImage) Size() (int64, error) { + return -1, nil +} + +// Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched. +func (i *sourcedImage) Manifest() ([]byte, string, error) { + return i.manifestBlob, i.manifestMIMEType, nil +} + +func (i *sourcedImage) Inspect() (*types.ImageInspectInfo, error) { + return inspectManifest(i.genericManifest) +} + +func (i *sourcedImage) IsMultiImage() bool { + return i.manifestMIMEType == manifest.DockerV2ListMediaType +} diff --git a/vendor/src/github.com/containers/image/image/unparsed.go b/vendor/src/github.com/containers/image/image/unparsed.go new file mode 100644 index 00000000..6cedc08e --- /dev/null +++ b/vendor/src/github.com/containers/image/image/unparsed.go @@ -0,0 +1,83 @@ +package image + +import ( + "fmt" + + "github.com/containers/image/docker/reference" + "github.com/containers/image/manifest" + "github.com/containers/image/types" +) + +// UnparsedImage implements types.UnparsedImage . +type UnparsedImage struct { + src types.ImageSource + cachedManifest []byte // A private cache for Manifest(); nil if not yet known. + // A private cache for Manifest(), may be the empty string if guessing failed. + // Valid iff cachedManifest is not nil. + cachedManifestMIMEType string + cachedSignatures [][]byte // A private cache for Signatures(); nil if not yet known. +} + +// UnparsedFromSource returns a types.UnparsedImage implementation for source. +// The caller must call .Close() on the returned UnparsedImage. +// +// UnparsedFromSource “takes ownership” of the input ImageSource and will call src.Close() +// when the image is closed. (This does not prevent callers from using both the +// UnparsedImage and ImageSource objects simultaneously, but it means that they only need to +// keep a reference to the UnparsedImage.) +func UnparsedFromSource(src types.ImageSource) *UnparsedImage { + return &UnparsedImage{src: src} +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (i *UnparsedImage) Reference() types.ImageReference { + return i.src.Reference() +} + +// Close removes resources associated with an initialized UnparsedImage, if any. +func (i *UnparsedImage) Close() { + i.src.Close() +} + +// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. +func (i *UnparsedImage) Manifest() ([]byte, string, error) { + if i.cachedManifest == nil { + m, mt, err := i.src.GetManifest() + if err != nil { + return nil, "", err + } + + // ImageSource.GetManifest does not do digest verification, but we do; + // this immediately protects also any user of types.Image. + ref := i.Reference().DockerReference() + if ref != nil { + if canonical, ok := ref.(reference.Canonical); ok { + digest := canonical.Digest() + matches, err := manifest.MatchesDigest(m, digest) + if err != nil { + return nil, "", fmt.Errorf("Error computing manifest digest: %v", err) + } + if !matches { + return nil, "", fmt.Errorf("Manifest does not match provided manifest digest %s", digest) + } + } + } + + i.cachedManifest = m + i.cachedManifestMIMEType = mt + } + return i.cachedManifest, i.cachedManifestMIMEType, nil +} + +// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. +func (i *UnparsedImage) Signatures() ([][]byte, error) { + if i.cachedSignatures == nil { + sigs, err := i.src.GetSignatures() + if err != nil { + return nil, err + } + i.cachedSignatures = sigs + } + return i.cachedSignatures, nil +} diff --git a/vendor/src/github.com/containers/image/manifest/manifest.go b/vendor/src/github.com/containers/image/manifest/manifest.go index 6344c964..0702caa9 100644 --- a/vendor/src/github.com/containers/image/manifest/manifest.go +++ b/vendor/src/github.com/containers/image/manifest/manifest.go @@ -1,10 +1,9 @@ package manifest import ( - "crypto/sha256" - "encoding/hex" "encoding/json" + "github.com/docker/distribution/digest" "github.com/docker/libtrust" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -19,8 +18,14 @@ const ( DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws" // DockerV2Schema2MediaType MIME type represents Docker manifest schema 2 DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json" + // DockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs. + DockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json" + // DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers. + DockerV2Schema2LayerMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip" // DockerV2ListMediaType MIME type represents Docker manifest schema 2 list DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json" + // DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers. + DockerV2Schema2ForeignLayerMediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" ) // DefaultRequestedManifestMIMETypes is a list of MIME types a types.ImageSource @@ -30,6 +35,7 @@ var DefaultRequestedManifestMIMETypes = []string{ DockerV2Schema2MediaType, DockerV2Schema1SignedMediaType, DockerV2Schema1MediaType, + DockerV2ListMediaType, } // GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized. @@ -65,7 +71,7 @@ func GuessMIMEType(manifest []byte) string { } // Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures. -func Digest(manifest []byte) (string, error) { +func Digest(manifest []byte) (digest.Digest, error) { if GuessMIMEType(manifest) == DockerV2Schema1SignedMediaType { sig, err := libtrust.ParsePrettySignature(manifest, "signatures") if err != nil { @@ -79,15 +85,14 @@ func Digest(manifest []byte) (string, error) { } } - hash := sha256.Sum256(manifest) - return "sha256:" + hex.EncodeToString(hash[:]), nil + return digest.FromBytes(manifest), nil } // MatchesDigest returns true iff the manifest matches expectedDigest. // Error may be set if this returns false. // Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified, // or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob. -func MatchesDigest(manifest []byte, expectedDigest string) (bool, error) { +func MatchesDigest(manifest []byte, expectedDigest digest.Digest) (bool, error) { // This should eventually support various digest types. actualDigest, err := Digest(manifest) if err != nil { @@ -95,3 +100,21 @@ func MatchesDigest(manifest []byte, expectedDigest string) (bool, error) { } return expectedDigest == actualDigest, nil } + +// AddDummyV2S1Signature adds an JWS signature with a temporary key (i.e. useless) to a v2s1 manifest. +// This is useful to make the manifest acceptable to a Docker Registry (even though nothing needs or wants the JWS signature). +func AddDummyV2S1Signature(manifest []byte) ([]byte, error) { + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, err // Coverage: This can fail only if rand.Reader fails. + } + + js, err := libtrust.NewJSONSignature(manifest) + if err != nil { + return nil, err + } + if err := js.Sign(key); err != nil { // Coverage: This can fail basically only if rand.Reader fails. + return nil, err + } + return js.PrettySignature("signatures") +} diff --git a/vendor/src/github.com/containers/image/oci/layout/oci_dest.go b/vendor/src/github.com/containers/image/oci/layout/oci_dest.go index 5ea52fd3..1c849e0d 100644 --- a/vendor/src/github.com/containers/image/oci/layout/oci_dest.go +++ b/vendor/src/github.com/containers/image/oci/layout/oci_dest.go @@ -1,8 +1,6 @@ package layout import ( - "crypto/sha256" - "encoding/hex" "encoding/json" "errors" "fmt" @@ -13,7 +11,7 @@ import ( "github.com/containers/image/manifest" "github.com/containers/image/types" - imgspec "github.com/opencontainers/image-spec/specs-go" + "github.com/docker/distribution/digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -49,19 +47,30 @@ func (d *ociImageDestination) SupportsSignatures() error { return fmt.Errorf("Pushing signatures for OCI images is not supported") } -// PutBlob writes contents of stream and returns its computed digest and size. -// A digest can be optionally provided if known, the specific image destination can decide to play with it or not. -// The length of stream is expected to be expectedSize; if expectedSize == -1, it is not known. +// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. +func (d *ociImageDestination) ShouldCompressLayers() bool { + return true +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *ociImageDestination) AcceptsForeignLayerURLs() bool { + return false +} + +// PutBlob writes contents of stream and returns data representing the result (with all data filled in). +// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Size is the expected length of stream, if known. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *ociImageDestination) PutBlob(stream io.Reader, _ string, expectedSize int64) (string, int64, error) { +func (d *ociImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) { if err := ensureDirectoryExists(d.ref.dir); err != nil { - return "", -1, err + return types.BlobInfo{}, err } blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob") if err != nil { - return "", -1, err + return types.BlobInfo{}, err } succeeded := false defer func() { @@ -71,36 +80,58 @@ func (d *ociImageDestination) PutBlob(stream io.Reader, _ string, expectedSize i } }() - h := sha256.New() - tee := io.TeeReader(stream, h) + digester := digest.Canonical.New() + tee := io.TeeReader(stream, digester.Hash()) size, err := io.Copy(blobFile, tee) if err != nil { - return "", -1, err + return types.BlobInfo{}, err } - computedDigest := "sha256:" + hex.EncodeToString(h.Sum(nil)) - if expectedSize != -1 && size != expectedSize { - return "", -1, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, expectedSize, size) + computedDigest := digester.Digest() + if inputInfo.Size != -1 && size != inputInfo.Size { + return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size) } if err := blobFile.Sync(); err != nil { - return "", -1, err + return types.BlobInfo{}, err } if err := blobFile.Chmod(0644); err != nil { - return "", -1, err + return types.BlobInfo{}, err } blobPath, err := d.ref.blobPath(computedDigest) if err != nil { - return "", -1, err + return types.BlobInfo{}, err } if err := ensureParentDirectoryExists(blobPath); err != nil { - return "", -1, err + return types.BlobInfo{}, err } if err := os.Rename(blobFile.Name(), blobPath); err != nil { - return "", -1, err + return types.BlobInfo{}, err } succeeded = true - return computedDigest, size, nil + return types.BlobInfo{Digest: computedDigest, Size: size}, nil +} + +func (d *ociImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) { + if info.Digest == "" { + return false, -1, fmt.Errorf(`"Can not check for a blob with unknown digest`) + } + blobPath, err := d.ref.blobPath(info.Digest) + if err != nil { + return false, -1, err + } + finfo, err := os.Stat(blobPath) + if err != nil && os.IsNotExist(err) { + return false, -1, types.ErrBlobNotFound + } + if err != nil { + return false, -1, err + } + return true, finfo.Size(), nil +} + +func (d *ociImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) { + return info, nil } func createManifest(m []byte) ([]byte, string, error) { @@ -118,8 +149,12 @@ func createManifest(m []byte) ([]byte, string, error) { return nil, "", err } om.MediaType = imgspecv1.MediaTypeImageManifest - for i := range om.Layers { - om.Layers[i].MediaType = imgspecv1.MediaTypeImageLayer + for i, l := range om.Layers { + if l.MediaType == manifest.DockerV2Schema2ForeignLayerMediaType { + om.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable + } else { + om.Layers[i].MediaType = imgspecv1.MediaTypeImageLayer + } } om.Config.MediaType = imgspecv1.MediaTypeImageConfig b, err := json.Marshal(om) @@ -148,8 +183,8 @@ func (d *ociImageDestination) PutManifest(m []byte) error { if err != nil { return err } - desc := imgspec.Descriptor{} - desc.Digest = digest + desc := imgspecv1.Descriptor{} + desc.Digest = digest.String() // TODO(runcom): beaware and add support for OCI manifest list desc.MediaType = mt desc.Size = int64(len(ociMan)) diff --git a/vendor/src/github.com/containers/image/oci/layout/oci_src.go b/vendor/src/github.com/containers/image/oci/layout/oci_src.go new file mode 100644 index 00000000..e1501b94 --- /dev/null +++ b/vendor/src/github.com/containers/image/oci/layout/oci_src.go @@ -0,0 +1,94 @@ +package layout + +import ( + "encoding/json" + "io" + "io/ioutil" + "os" + + "github.com/containers/image/manifest" + "github.com/containers/image/types" + "github.com/docker/distribution/digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +type ociImageSource struct { + ref ociReference +} + +// newImageSource returns an ImageSource for reading from an existing directory. +func newImageSource(ref ociReference) types.ImageSource { + return &ociImageSource{ref: ref} +} + +// Reference returns the reference used to set up this source. +func (s *ociImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *ociImageSource) Close() { +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +func (s *ociImageSource) GetManifest() ([]byte, string, error) { + descriptorPath := s.ref.descriptorPath(s.ref.tag) + data, err := ioutil.ReadFile(descriptorPath) + if err != nil { + return nil, "", err + } + + desc := imgspecv1.Descriptor{} + err = json.Unmarshal(data, &desc) + if err != nil { + return nil, "", err + } + + manifestPath, err := s.ref.blobPath(digest.Digest(desc.Digest)) + if err != nil { + return nil, "", err + } + m, err := ioutil.ReadFile(manifestPath) + if err != nil { + return nil, "", err + } + + return m, manifest.GuessMIMEType(m), nil +} + +func (s *ociImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { + manifestPath, err := s.ref.blobPath(digest) + if err != nil { + return nil, "", err + } + + m, err := ioutil.ReadFile(manifestPath) + if err != nil { + return nil, "", err + } + + return m, manifest.GuessMIMEType(m), nil +} + +// GetBlob returns a stream for the specified blob, and the blob's size. +func (s *ociImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) { + path, err := s.ref.blobPath(info.Digest) + if err != nil { + return nil, 0, err + } + + r, err := os.Open(path) + if err != nil { + return nil, 0, err + } + fi, err := r.Stat() + if err != nil { + return nil, 0, err + } + return r, fi.Size(), nil +} + +func (s *ociImageSource) GetSignatures() ([][]byte, error) { + return [][]byte{}, nil +} diff --git a/vendor/src/github.com/containers/image/oci/layout/oci_transport.go b/vendor/src/github.com/containers/image/oci/layout/oci_transport.go index eee79957..7faddf97 100644 --- a/vendor/src/github.com/containers/image/oci/layout/oci_transport.go +++ b/vendor/src/github.com/containers/image/oci/layout/oci_transport.go @@ -8,8 +8,10 @@ import ( "strings" "github.com/containers/image/directory/explicitfilepath" + "github.com/containers/image/docker/reference" + "github.com/containers/image/image" "github.com/containers/image/types" - "github.com/docker/docker/reference" + "github.com/docker/distribution/digest" ) // Transport is an ImageTransport for OCI directories. @@ -164,10 +166,13 @@ func (ref ociReference) PolicyConfigurationNamespaces() []string { return res } -// NewImage returns a types.Image for this reference. +// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. // The caller must call .Close() on the returned Image. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. func (ref ociReference) NewImage(ctx *types.SystemContext) (types.Image, error) { - return nil, errors.New("Full Image support not implemented for oci: image names") + src := newImageSource(ref) + return image.FromSource(src) } // NewImageSource returns a types.ImageSource for this reference, @@ -175,7 +180,7 @@ func (ref ociReference) NewImage(ctx *types.SystemContext) (types.Image, error) // nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes. // The caller must call .Close() on the returned ImageSource. func (ref ociReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) { - return nil, errors.New("Reading images not implemented for oci: image names") + return newImageSource(ref), nil } // NewImageDestination returns a types.ImageDestination for this reference. @@ -195,12 +200,11 @@ func (ref ociReference) ociLayoutPath() string { } // blobPath returns a path for a blob within a directory using OCI image-layout conventions. -func (ref ociReference) blobPath(digest string) (string, error) { - pts := strings.SplitN(digest, ":", 2) - if len(pts) != 2 { - return "", fmt.Errorf("unexpected digest reference %s", digest) +func (ref ociReference) blobPath(digest digest.Digest) (string, error) { + if err := digest.Validate(); err != nil { + return "", fmt.Errorf("unexpected digest reference %s: %v", digest, err) } - return filepath.Join(ref.dir, "blobs", pts[0], pts[1]), nil + return filepath.Join(ref.dir, "blobs", digest.Algorithm().String(), digest.Hex()), nil } // descriptorPath returns a path for the manifest within a directory using OCI conventions. diff --git a/vendor/src/github.com/containers/image/openshift/openshift.go b/vendor/src/github.com/containers/image/openshift/openshift.go index ecd41306..a8f66681 100644 --- a/vendor/src/github.com/containers/image/openshift/openshift.go +++ b/vendor/src/github.com/containers/image/openshift/openshift.go @@ -11,13 +11,13 @@ import ( "net/http" "net/url" "strings" - "time" "github.com/Sirupsen/logrus" "github.com/containers/image/docker" "github.com/containers/image/manifest" "github.com/containers/image/types" "github.com/containers/image/version" + "github.com/docker/distribution/digest" ) // openshiftClient is configuration for dealing with a single image stream, for reading or writing. @@ -57,7 +57,6 @@ func newOpenshiftClient(ref openshiftReference) (*openshiftClient, error) { if httpClient == nil { httpClient = http.DefaultClient } - httpClient.Timeout = 1 * time.Minute return &openshiftClient{ ref: ref, @@ -198,6 +197,15 @@ func (s *openshiftImageSource) Close() { } } +func (s *openshiftImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { + if err := s.ensureImageIsResolved(); err != nil { + return nil, "", err + } + return s.docker.GetTargetManifest(digest) +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. func (s *openshiftImageSource) GetManifest() ([]byte, string, error) { if err := s.ensureImageIsResolved(); err != nil { return nil, "", err @@ -206,11 +214,11 @@ func (s *openshiftImageSource) GetManifest() ([]byte, string, error) { } // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -func (s *openshiftImageSource) GetBlob(digest string) (io.ReadCloser, int64, error) { +func (s *openshiftImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) { if err := s.ensureImageIsResolved(); err != nil { return nil, 0, err } - return s.docker.GetBlob(digest) + return s.docker.GetBlob(info) } func (s *openshiftImageSource) GetSignatures() ([][]byte, error) { @@ -337,60 +345,43 @@ func (d *openshiftImageDestination) SupportsSignatures() error { return nil } -// PutBlob writes contents of stream and returns its computed digest and size. -// A digest can be optionally provided if known, the specific image destination can decide to play with it or not. -// The length of stream is expected to be expectedSize; if expectedSize == -1, it is not known. +// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. +func (d *openshiftImageDestination) ShouldCompressLayers() bool { + return true +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *openshiftImageDestination) AcceptsForeignLayerURLs() bool { + return true +} + +// PutBlob writes contents of stream and returns data representing the result (with all data filled in). +// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. +// inputInfo.Size is the expected length of stream, if known. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *openshiftImageDestination) PutBlob(stream io.Reader, digest string, expectedSize int64) (string, int64, error) { - return d.docker.PutBlob(stream, digest, expectedSize) +func (d *openshiftImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) { + return d.docker.PutBlob(stream, inputInfo) +} + +func (d *openshiftImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) { + return d.docker.HasBlob(info) +} + +func (d *openshiftImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) { + return d.docker.ReapplyBlob(info) } func (d *openshiftImageDestination) PutManifest(m []byte) error { - // FIXME? Can this eventually just call d.docker.PutManifest()? - // Right now we need this as a skeleton to attach signatures to, and - // to workaround our inability to change tags when uploading v2s1 manifests. - - // Note: This does absolutely no kind/version checking or conversions. manifestDigest, err := manifest.Digest(m) if err != nil { return err } - d.imageStreamImageName = manifestDigest - // FIXME: We can't do what respositorymiddleware.go does because we don't know the internal address. Does any of this matter? - dockerImageReference := fmt.Sprintf("%s/%s/%s@%s", d.client.ref.dockerReference.Hostname(), d.client.ref.namespace, d.client.ref.stream, manifestDigest) - ism := imageStreamMapping{ - typeMeta: typeMeta{ - Kind: "ImageStreamMapping", - APIVersion: "v1", - }, - objectMeta: objectMeta{ - Namespace: d.client.ref.namespace, - Name: d.client.ref.stream, - }, - Image: image{ - objectMeta: objectMeta{ - Name: manifestDigest, - }, - DockerImageReference: dockerImageReference, - DockerImageManifest: string(m), - }, - Tag: d.client.ref.dockerReference.Tag(), - } - body, err := json.Marshal(ism) - if err != nil { - return err - } + d.imageStreamImageName = manifestDigest.String() - // FIXME: validate components per validation.IsValidPathSegmentName? - path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreammappings", d.client.ref.namespace) - body, err = d.client.doRequest("POST", path, body) - if err != nil { - return err - } - - return nil + return d.docker.PutManifest(m) } func (d *openshiftImageDestination) PutSignatures(signatures [][]byte) error { @@ -505,12 +496,6 @@ type imageSignature struct { // IssuedBy SignatureIssuer `json:"issuedBy,omitempty"` // IssuedTo SignatureSubject `json:"issuedTo,omitempty"` } -type imageStreamMapping struct { - typeMeta `json:",inline"` - objectMeta `json:"metadata,omitempty"` - Image image `json:"image"` - Tag string `json:"tag"` -} type typeMeta struct { Kind string `json:"kind,omitempty"` APIVersion string `json:"apiVersion,omitempty"` diff --git a/vendor/src/github.com/containers/image/openshift/openshift_transport.go b/vendor/src/github.com/containers/image/openshift/openshift_transport.go index 8e53b1f9..9e2b106c 100644 --- a/vendor/src/github.com/containers/image/openshift/openshift_transport.go +++ b/vendor/src/github.com/containers/image/openshift/openshift_transport.go @@ -1,14 +1,14 @@ package openshift import ( - "errors" "fmt" "regexp" "strings" "github.com/containers/image/docker/policyconfiguration" + "github.com/containers/image/docker/reference" + genericImage "github.com/containers/image/image" "github.com/containers/image/types" - "github.com/docker/docker/reference" ) // Transport is an ImageTransport for OpenShift registry-hosted images. @@ -118,10 +118,16 @@ func (ref openshiftReference) PolicyConfigurationNamespaces() []string { return policyconfiguration.DockerReferenceNamespaces(ref.dockerReference) } -// NewImage returns a types.Image for this reference. +// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. // The caller must call .Close() on the returned Image. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. func (ref openshiftReference) NewImage(ctx *types.SystemContext) (types.Image, error) { - return nil, errors.New("Full Image support not implemented for atomic: image names") + src, err := newImageSource(ctx, ref, nil) + if err != nil { + return nil, err + } + return genericImage.FromSource(src) } // NewImageSource returns a types.ImageSource for this reference, diff --git a/vendor/src/github.com/containers/image/signature/docker.go b/vendor/src/github.com/containers/image/signature/docker.go new file mode 100644 index 00000000..b90da5ff --- /dev/null +++ b/vendor/src/github.com/containers/image/signature/docker.go @@ -0,0 +1,61 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +package signature + +import ( + "fmt" + + "github.com/containers/image/manifest" + "github.com/docker/distribution/digest" +) + +// SignDockerManifest returns a signature for manifest as the specified dockerReference, +// using mech and keyIdentity. +func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) { + manifestDigest, err := manifest.Digest(m) + if err != nil { + return nil, err + } + sig := privateSignature{ + Signature{ + DockerManifestDigest: manifestDigest, + DockerReference: dockerReference, + }, + } + return sig.sign(mech, keyIdentity) +} + +// VerifyDockerManifestSignature checks that unverifiedSignature uses expectedKeyIdentity to sign unverifiedManifest as expectedDockerReference, +// using mech. +func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byte, + expectedDockerReference string, mech SigningMechanism, expectedKeyIdentity string) (*Signature, error) { + sig, err := verifyAndExtractSignature(mech, unverifiedSignature, signatureAcceptanceRules{ + validateKeyIdentity: func(keyIdentity string) error { + if keyIdentity != expectedKeyIdentity { + return InvalidSignatureError{msg: fmt.Sprintf("Signature by %s does not match expected fingerprint %s", keyIdentity, expectedKeyIdentity)} + } + return nil + }, + validateSignedDockerReference: func(signedDockerReference string) error { + if signedDockerReference != expectedDockerReference { + return InvalidSignatureError{msg: fmt.Sprintf("Docker reference %s does not match %s", + signedDockerReference, expectedDockerReference)} + } + return nil + }, + validateSignedDockerManifestDigest: func(signedDockerManifestDigest digest.Digest) error { + matches, err := manifest.MatchesDigest(unverifiedManifest, signedDockerManifestDigest) + if err != nil { + return err + } + if !matches { + return InvalidSignatureError{msg: fmt.Sprintf("Signature for docker digest %q does not match", signedDockerManifestDigest)} + } + return nil + }, + }) + if err != nil { + return nil, err + } + return sig, nil +} diff --git a/vendor/src/github.com/containers/image/signature/json.go b/vendor/src/github.com/containers/image/signature/json.go new file mode 100644 index 00000000..12d14c02 --- /dev/null +++ b/vendor/src/github.com/containers/image/signature/json.go @@ -0,0 +1,107 @@ +package signature + +import ( + "bytes" + "encoding/json" + "fmt" + "io" +) + +// jsonFormatError is returned when JSON does not match expected format. +type jsonFormatError string + +func (err jsonFormatError) Error() string { + return string(err) +} + +// validateExactMapKeys returns an error if the keys of m are not exactly expectedKeys, which must be pairwise distinct +func validateExactMapKeys(m map[string]interface{}, expectedKeys ...string) error { + if len(m) != len(expectedKeys) { + return jsonFormatError("Unexpected keys in a JSON object") + } + + for _, k := range expectedKeys { + if _, ok := m[k]; !ok { + return jsonFormatError(fmt.Sprintf("Key %s missing in a JSON object", k)) + } + } + // Assuming expectedKeys are pairwise distinct, we know m contains len(expectedKeys) different values in expectedKeys. + return nil +} + +// mapField returns a member fieldName of m, if it is a JSON map, or an error. +func mapField(m map[string]interface{}, fieldName string) (map[string]interface{}, error) { + untyped, ok := m[fieldName] + if !ok { + return nil, jsonFormatError(fmt.Sprintf("Field %s missing", fieldName)) + } + v, ok := untyped.(map[string]interface{}) + if !ok { + return nil, jsonFormatError(fmt.Sprintf("Field %s is not a JSON object", fieldName)) + } + return v, nil +} + +// stringField returns a member fieldName of m, if it is a string, or an error. +func stringField(m map[string]interface{}, fieldName string) (string, error) { + untyped, ok := m[fieldName] + if !ok { + return "", jsonFormatError(fmt.Sprintf("Field %s missing", fieldName)) + } + v, ok := untyped.(string) + if !ok { + return "", jsonFormatError(fmt.Sprintf("Field %s is not a JSON object", fieldName)) + } + return v, nil +} + +// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect +// (including duplicated keys, unrecognized keys, and non-matching types). Uses fieldResolver to +// determine the destination for a field value, which should return a pointer to the destination if valid, or nil if the key is rejected. +// +// The fieldResolver approach is useful for decoding the Policy.Transports map; using it for structs is a bit lazy, +// we could use reflection to automate this. Later? +func paranoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interface{}) error { + seenKeys := map[string]struct{}{} + + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return jsonFormatError(err.Error()) + } + if t != json.Delim('{') { + return jsonFormatError(fmt.Sprintf("JSON object expected, got \"%s\"", t)) + } + for { + t, err := dec.Token() + if err != nil { + return jsonFormatError(err.Error()) + } + if t == json.Delim('}') { + break + } + + key, ok := t.(string) + if !ok { + // Coverage: This should never happen, dec.Token() rejects non-string-literals in this state. + return jsonFormatError(fmt.Sprintf("Key string literal expected, got \"%s\"", t)) + } + if _, ok := seenKeys[key]; ok { + return jsonFormatError(fmt.Sprintf("Duplicate key \"%s\"", key)) + } + seenKeys[key] = struct{}{} + + valuePtr := fieldResolver(key) + if valuePtr == nil { + return jsonFormatError(fmt.Sprintf("Unknown key \"%s\"", key)) + } + // This works like json.Unmarshal, in particular it allows us to implement UnmarshalJSON to implement strict parsing of the field value. + if err := dec.Decode(valuePtr); err != nil { + return jsonFormatError(err.Error()) + } + } + if _, err := dec.Token(); err != io.EOF { + return jsonFormatError("Unexpected data after JSON object") + } + return nil +} diff --git a/vendor/src/github.com/containers/image/signature/mechanism.go b/vendor/src/github.com/containers/image/signature/mechanism.go new file mode 100644 index 00000000..9b1b47ff --- /dev/null +++ b/vendor/src/github.com/containers/image/signature/mechanism.go @@ -0,0 +1,124 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +package signature + +import ( + "bytes" + "fmt" + + "github.com/mtrmac/gpgme" +) + +// SigningMechanism abstracts a way to sign binary blobs and verify their signatures. +// FIXME: Eventually expand on keyIdentity (namespace them between mechanisms to +// eliminate ambiguities, support CA signatures and perhaps other key properties) +type SigningMechanism interface { + // ImportKeysFromBytes imports public keys from the supplied blob and returns their identities. + // The blob is assumed to have an appropriate format (the caller is expected to know which one). + // NOTE: This may modify long-term state (e.g. key storage in a directory underlying the mechanism). + ImportKeysFromBytes(blob []byte) ([]string, error) + // Sign creates a (non-detached) signature of input using keyidentity + Sign(input []byte, keyIdentity string) ([]byte, error) + // Verify parses unverifiedSignature and returns the content and the signer's identity + Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) +} + +// A GPG/OpenPGP signing mechanism. +type gpgSigningMechanism struct { + ctx *gpgme.Context +} + +// NewGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism. +func NewGPGSigningMechanism() (SigningMechanism, error) { + return newGPGSigningMechanismInDirectory("") +} + +// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty. +func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) { + ctx, err := gpgme.New() + if err != nil { + return nil, err + } + if err = ctx.SetProtocol(gpgme.ProtocolOpenPGP); err != nil { + return nil, err + } + if optionalDir != "" { + err := ctx.SetEngineInfo(gpgme.ProtocolOpenPGP, "", optionalDir) + if err != nil { + return nil, err + } + } + ctx.SetArmor(false) + ctx.SetTextMode(false) + return gpgSigningMechanism{ctx: ctx}, nil +} + +// ImportKeysFromBytes implements SigningMechanism.ImportKeysFromBytes +func (m gpgSigningMechanism) ImportKeysFromBytes(blob []byte) ([]string, error) { + inputData, err := gpgme.NewDataBytes(blob) + if err != nil { + return nil, err + } + res, err := m.ctx.Import(inputData) + if err != nil { + return nil, err + } + keyIdentities := []string{} + for _, i := range res.Imports { + if i.Result == nil { + keyIdentities = append(keyIdentities, i.Fingerprint) + } + } + return keyIdentities, nil +} + +// Sign implements SigningMechanism.Sign +func (m gpgSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { + key, err := m.ctx.GetKey(keyIdentity, true) + if err != nil { + if e, ok := err.(gpgme.Error); ok && e.Code() == gpgme.ErrorEOF { + return nil, fmt.Errorf("key %q not found", keyIdentity) + } + return nil, err + } + inputData, err := gpgme.NewDataBytes(input) + if err != nil { + return nil, err + } + var sigBuffer bytes.Buffer + sigData, err := gpgme.NewDataWriter(&sigBuffer) + if err != nil { + return nil, err + } + if err = m.ctx.Sign([]*gpgme.Key{key}, inputData, sigData, gpgme.SigModeNormal); err != nil { + return nil, err + } + return sigBuffer.Bytes(), nil +} + +// Verify implements SigningMechanism.Verify +func (m gpgSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { + signedBuffer := bytes.Buffer{} + signedData, err := gpgme.NewDataWriter(&signedBuffer) + if err != nil { + return nil, "", err + } + unverifiedSignatureData, err := gpgme.NewDataBytes(unverifiedSignature) + if err != nil { + return nil, "", err + } + _, sigs, err := m.ctx.Verify(unverifiedSignatureData, nil, signedData) + if err != nil { + return nil, "", err + } + if len(sigs) != 1 { + return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Unexpected GPG signature count %d", len(sigs))} + } + sig := sigs[0] + // This is sig.Summary == gpgme.SigSumValid except for key trust, which we handle ourselves + if sig.Status != nil || sig.Validity == gpgme.ValidityNever || sig.ValidityReason != nil || sig.WrongKeyUsage { + // FIXME: Better error reporting eventually + return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", sig)} + } + return signedBuffer.Bytes(), sig.Fingerprint, nil +} diff --git a/vendor/src/github.com/containers/image/signature/policy_config.go b/vendor/src/github.com/containers/image/signature/policy_config.go new file mode 100644 index 00000000..adac6888 --- /dev/null +++ b/vendor/src/github.com/containers/image/signature/policy_config.go @@ -0,0 +1,732 @@ +// policy_config.go hanles creation of policy objects, either by parsing JSON +// or by programs building them programmatically. + +// The New* constructors are intended to be a stable API. FIXME: after an independent review. + +// Do not invoke the internals of the JSON marshaling/unmarshaling directly. + +// We can't just blindly call json.Unmarshal because that would silently ignore +// typos, and that would just not do for security policy. + +// FIXME? This is by no means an user-friendly parser: No location information in error messages, no other context. +// But at least it is not worse than blind json.Unmarshal()… + +package signature + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "path/filepath" + + "github.com/containers/image/docker/reference" + "github.com/containers/image/transports" + "github.com/containers/image/types" +) + +// systemDefaultPolicyPath is the policy path used for DefaultPolicy(). +// You can override this at build time with +// -ldflags '-X github.com/containers/image/signature.systemDefaultPolicyPath=$your_path' +var systemDefaultPolicyPath = builtinDefaultPolicyPath + +// builtinDefaultPolicyPath is the policy pat used for DefaultPolicy(). +// DO NOT change this, instead see systemDefaultPolicyPath above. +const builtinDefaultPolicyPath = "/etc/containers/policy.json" + +// InvalidPolicyFormatError is returned when parsing an invalid policy configuration. +type InvalidPolicyFormatError string + +func (err InvalidPolicyFormatError) Error() string { + return string(err) +} + +// DefaultPolicy returns the default policy of the system. +// Most applications should be using this method to get the policy configured +// by the system administrator. +// ctx should usually be nil, can be set to override the default. +// NOTE: When this function returns an error, report it to the user and abort. +// DO NOT hard-code fallback policies in your application. +func DefaultPolicy(ctx *types.SystemContext) (*Policy, error) { + return NewPolicyFromFile(defaultPolicyPath(ctx)) +} + +// defaultPolicyPath returns a path to the default policy of the system. +func defaultPolicyPath(ctx *types.SystemContext) string { + if ctx != nil { + if ctx.SignaturePolicyPath != "" { + return ctx.SignaturePolicyPath + } + if ctx.RootForImplicitAbsolutePaths != "" { + return filepath.Join(ctx.RootForImplicitAbsolutePaths, systemDefaultPolicyPath) + } + } + return systemDefaultPolicyPath +} + +// NewPolicyFromFile returns a policy configured in the specified file. +func NewPolicyFromFile(fileName string) (*Policy, error) { + contents, err := ioutil.ReadFile(fileName) + if err != nil { + return nil, err + } + return NewPolicyFromBytes(contents) +} + +// NewPolicyFromBytes returns a policy parsed from the specified blob. +// Use this function instead of calling json.Unmarshal directly. +func NewPolicyFromBytes(data []byte) (*Policy, error) { + p := Policy{} + if err := json.Unmarshal(data, &p); err != nil { + return nil, InvalidPolicyFormatError(err.Error()) + } + return &p, nil +} + +// Compile-time check that Policy implements json.Unmarshaler. +var _ json.Unmarshaler = (*Policy)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (p *Policy) UnmarshalJSON(data []byte) error { + *p = Policy{} + transports := policyTransportsMap{} + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + switch key { + case "default": + return &p.Default + case "transports": + return &transports + default: + return nil + } + }); err != nil { + return err + } + + if p.Default == nil { + return InvalidPolicyFormatError("Default policy is missing") + } + p.Transports = map[string]PolicyTransportScopes(transports) + return nil +} + +// policyTransportsMap is a specialization of this map type for the strict JSON parsing semantics appropriate for the Policy.Transports member. +type policyTransportsMap map[string]PolicyTransportScopes + +// Compile-time check that policyTransportsMap implements json.Unmarshaler. +var _ json.Unmarshaler = (*policyTransportsMap)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *policyTransportsMap) UnmarshalJSON(data []byte) error { + // We can't unmarshal directly into map values because it is not possible to take an address of a map value. + // So, use a temporary map of pointers-to-slices and convert. + tmpMap := map[string]*PolicyTransportScopes{} + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + transport, ok := transports.KnownTransports[key] + if !ok { + return nil + } + // paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe. + if _, ok := tmpMap[key]; ok { + return nil + } + ptsWithTransport := policyTransportScopesWithTransport{ + transport: transport, + dest: &PolicyTransportScopes{}, // This allocates a new instance on each call. + } + tmpMap[key] = ptsWithTransport.dest + return &ptsWithTransport + }); err != nil { + return err + } + for key, ptr := range tmpMap { + (*m)[key] = *ptr + } + return nil +} + +// Compile-time check that PolicyTransportScopes "implements"" json.Unmarshaler. +// we want to only use policyTransportScopesWithTransport +var _ json.Unmarshaler = (*PolicyTransportScopes)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *PolicyTransportScopes) UnmarshalJSON(data []byte) error { + return errors.New("Do not try to unmarshal PolicyTransportScopes directly") +} + +// policyTransportScopesWithTransport is a way to unmarshal a PolicyTransportScopes +// while validating using a specific ImageTransport. +type policyTransportScopesWithTransport struct { + transport types.ImageTransport + dest *PolicyTransportScopes +} + +// Compile-time check that policyTransportScopesWithTransport implements json.Unmarshaler. +var _ json.Unmarshaler = (*policyTransportScopesWithTransport)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *policyTransportScopesWithTransport) UnmarshalJSON(data []byte) error { + // We can't unmarshal directly into map values because it is not possible to take an address of a map value. + // So, use a temporary map of pointers-to-slices and convert. + tmpMap := map[string]*PolicyRequirements{} + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + // paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe. + if _, ok := tmpMap[key]; ok { + return nil + } + if key != "" { + if err := m.transport.ValidatePolicyConfigurationScope(key); err != nil { + return nil + } + } + ptr := &PolicyRequirements{} // This allocates a new instance on each call. + tmpMap[key] = ptr + return ptr + }); err != nil { + return err + } + for key, ptr := range tmpMap { + (*m.dest)[key] = *ptr + } + return nil +} + +// Compile-time check that PolicyRequirements implements json.Unmarshaler. +var _ json.Unmarshaler = (*PolicyRequirements)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (m *PolicyRequirements) UnmarshalJSON(data []byte) error { + reqJSONs := []json.RawMessage{} + if err := json.Unmarshal(data, &reqJSONs); err != nil { + return err + } + if len(reqJSONs) == 0 { + return InvalidPolicyFormatError("List of verification policy requirements must not be empty") + } + res := make([]PolicyRequirement, len(reqJSONs)) + for i, reqJSON := range reqJSONs { + req, err := newPolicyRequirementFromJSON(reqJSON) + if err != nil { + return err + } + res[i] = req + } + *m = res + return nil +} + +// newPolicyRequirementFromJSON parses JSON data into a PolicyRequirement implementation. +func newPolicyRequirementFromJSON(data []byte) (PolicyRequirement, error) { + var typeField prCommon + if err := json.Unmarshal(data, &typeField); err != nil { + return nil, err + } + var res PolicyRequirement + switch typeField.Type { + case prTypeInsecureAcceptAnything: + res = &prInsecureAcceptAnything{} + case prTypeReject: + res = &prReject{} + case prTypeSignedBy: + res = &prSignedBy{} + case prTypeSignedBaseLayer: + res = &prSignedBaseLayer{} + default: + return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy requirement type \"%s\"", typeField.Type)) + } + if err := json.Unmarshal(data, &res); err != nil { + return nil, err + } + return res, nil +} + +// newPRInsecureAcceptAnything is NewPRInsecureAcceptAnything, except it returns the private type. +func newPRInsecureAcceptAnything() *prInsecureAcceptAnything { + return &prInsecureAcceptAnything{prCommon{Type: prTypeInsecureAcceptAnything}} +} + +// NewPRInsecureAcceptAnything returns a new "insecureAcceptAnything" PolicyRequirement. +func NewPRInsecureAcceptAnything() PolicyRequirement { + return newPRInsecureAcceptAnything() +} + +// Compile-time check that prInsecureAcceptAnything implements json.Unmarshaler. +var _ json.Unmarshaler = (*prInsecureAcceptAnything)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error { + *pr = prInsecureAcceptAnything{} + var tmp prInsecureAcceptAnything + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + switch key { + case "type": + return &tmp.Type + default: + return nil + } + }); err != nil { + return err + } + + if tmp.Type != prTypeInsecureAcceptAnything { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + *pr = *newPRInsecureAcceptAnything() + return nil +} + +// newPRReject is NewPRReject, except it returns the private type. +func newPRReject() *prReject { + return &prReject{prCommon{Type: prTypeReject}} +} + +// NewPRReject returns a new "reject" PolicyRequirement. +func NewPRReject() PolicyRequirement { + return newPRReject() +} + +// Compile-time check that prReject implements json.Unmarshaler. +var _ json.Unmarshaler = (*prReject)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prReject) UnmarshalJSON(data []byte) error { + *pr = prReject{} + var tmp prReject + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + switch key { + case "type": + return &tmp.Type + default: + return nil + } + }); err != nil { + return err + } + + if tmp.Type != prTypeReject { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + *pr = *newPRReject() + return nil +} + +// newPRSignedBy returns a new prSignedBy if parameters are valid. +func newPRSignedBy(keyType sbKeyType, keyPath string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { + if !keyType.IsValid() { + return nil, InvalidPolicyFormatError(fmt.Sprintf("invalid keyType \"%s\"", keyType)) + } + if len(keyPath) > 0 && len(keyData) > 0 { + return nil, InvalidPolicyFormatError("keyType and keyData cannot be used simultaneously") + } + if signedIdentity == nil { + return nil, InvalidPolicyFormatError("signedIdentity not specified") + } + return &prSignedBy{ + prCommon: prCommon{Type: prTypeSignedBy}, + KeyType: keyType, + KeyPath: keyPath, + KeyData: keyData, + SignedIdentity: signedIdentity, + }, nil +} + +// newPRSignedByKeyPath is NewPRSignedByKeyPath, except it returns the private type. +func newPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { + return newPRSignedBy(keyType, keyPath, nil, signedIdentity) +} + +// NewPRSignedByKeyPath returns a new "signedBy" PolicyRequirement using a KeyPath +func NewPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSignedByKeyPath(keyType, keyPath, signedIdentity) +} + +// newPRSignedByKeyData is NewPRSignedByKeyData, except it returns the private type. +func newPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { + return newPRSignedBy(keyType, "", keyData, signedIdentity) +} + +// NewPRSignedByKeyData returns a new "signedBy" PolicyRequirement using a KeyData +func NewPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSignedByKeyData(keyType, keyData, signedIdentity) +} + +// Compile-time check that prSignedBy implements json.Unmarshaler. +var _ json.Unmarshaler = (*prSignedBy)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prSignedBy) UnmarshalJSON(data []byte) error { + *pr = prSignedBy{} + var tmp prSignedBy + var gotKeyPath, gotKeyData = false, false + var signedIdentity json.RawMessage + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + switch key { + case "type": + return &tmp.Type + case "keyType": + return &tmp.KeyType + case "keyPath": + gotKeyPath = true + return &tmp.KeyPath + case "keyData": + gotKeyData = true + return &tmp.KeyData + case "signedIdentity": + return &signedIdentity + default: + return nil + } + }); err != nil { + return err + } + + if tmp.Type != prTypeSignedBy { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + if signedIdentity == nil { + tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact() + } else { + si, err := newPolicyReferenceMatchFromJSON(signedIdentity) + if err != nil { + return err + } + tmp.SignedIdentity = si + } + + var res *prSignedBy + var err error + switch { + case gotKeyPath && gotKeyData: + return InvalidPolicyFormatError("keyPath and keyData cannot be used simultaneously") + case gotKeyPath && !gotKeyData: + res, err = newPRSignedByKeyPath(tmp.KeyType, tmp.KeyPath, tmp.SignedIdentity) + case !gotKeyPath && gotKeyData: + res, err = newPRSignedByKeyData(tmp.KeyType, tmp.KeyData, tmp.SignedIdentity) + case !gotKeyPath && !gotKeyData: + return InvalidPolicyFormatError("At least one of keyPath and keyData mus be specified") + default: // Coverage: This should never happen + return fmt.Errorf("Impossible keyPath/keyData presence combination!?") + } + if err != nil { + return err + } + *pr = *res + + return nil +} + +// IsValid returns true iff kt is a recognized value +func (kt sbKeyType) IsValid() bool { + switch kt { + case SBKeyTypeGPGKeys, SBKeyTypeSignedByGPGKeys, + SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs: + return true + default: + return false + } +} + +// Compile-time check that sbKeyType implements json.Unmarshaler. +var _ json.Unmarshaler = (*sbKeyType)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (kt *sbKeyType) UnmarshalJSON(data []byte) error { + *kt = sbKeyType("") + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if !sbKeyType(s).IsValid() { + return InvalidPolicyFormatError(fmt.Sprintf("Unrecognized keyType value \"%s\"", s)) + } + *kt = sbKeyType(s) + return nil +} + +// newPRSignedBaseLayer is NewPRSignedBaseLayer, except it returns the private type. +func newPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (*prSignedBaseLayer, error) { + if baseLayerIdentity == nil { + return nil, InvalidPolicyFormatError("baseLayerIdentity not specified") + } + return &prSignedBaseLayer{ + prCommon: prCommon{Type: prTypeSignedBaseLayer}, + BaseLayerIdentity: baseLayerIdentity, + }, nil +} + +// NewPRSignedBaseLayer returns a new "signedBaseLayer" PolicyRequirement. +func NewPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (PolicyRequirement, error) { + return newPRSignedBaseLayer(baseLayerIdentity) +} + +// Compile-time check that prSignedBaseLayer implements json.Unmarshaler. +var _ json.Unmarshaler = (*prSignedBaseLayer)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error { + *pr = prSignedBaseLayer{} + var tmp prSignedBaseLayer + var baseLayerIdentity json.RawMessage + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + switch key { + case "type": + return &tmp.Type + case "baseLayerIdentity": + return &baseLayerIdentity + default: + return nil + } + }); err != nil { + return err + } + + if tmp.Type != prTypeSignedBaseLayer { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + if baseLayerIdentity == nil { + return InvalidPolicyFormatError(fmt.Sprintf("baseLayerIdentity not specified")) + } + bli, err := newPolicyReferenceMatchFromJSON(baseLayerIdentity) + if err != nil { + return err + } + res, err := newPRSignedBaseLayer(bli) + if err != nil { + // Coverage: This should never happen, newPolicyReferenceMatchFromJSON has ensured bli is valid. + return err + } + *pr = *res + return nil +} + +// newPolicyReferenceMatchFromJSON parses JSON data into a PolicyReferenceMatch implementation. +func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error) { + var typeField prmCommon + if err := json.Unmarshal(data, &typeField); err != nil { + return nil, err + } + var res PolicyReferenceMatch + switch typeField.Type { + case prmTypeMatchExact: + res = &prmMatchExact{} + case prmTypeMatchRepoDigestOrExact: + res = &prmMatchRepoDigestOrExact{} + case prmTypeMatchRepository: + res = &prmMatchRepository{} + case prmTypeExactReference: + res = &prmExactReference{} + case prmTypeExactRepository: + res = &prmExactRepository{} + default: + return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy reference match type \"%s\"", typeField.Type)) + } + if err := json.Unmarshal(data, &res); err != nil { + return nil, err + } + return res, nil +} + +// newPRMMatchExact is NewPRMMatchExact, except it resturns the private type. +func newPRMMatchExact() *prmMatchExact { + return &prmMatchExact{prmCommon{Type: prmTypeMatchExact}} +} + +// NewPRMMatchExact returns a new "matchExact" PolicyReferenceMatch. +func NewPRMMatchExact() PolicyReferenceMatch { + return newPRMMatchExact() +} + +// Compile-time check that prmMatchExact implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmMatchExact)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmMatchExact) UnmarshalJSON(data []byte) error { + *prm = prmMatchExact{} + var tmp prmMatchExact + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + switch key { + case "type": + return &tmp.Type + default: + return nil + } + }); err != nil { + return err + } + + if tmp.Type != prmTypeMatchExact { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + *prm = *newPRMMatchExact() + return nil +} + +// newPRMMatchRepoDigestOrExact is NewPRMMatchRepoDigestOrExact, except it resturns the private type. +func newPRMMatchRepoDigestOrExact() *prmMatchRepoDigestOrExact { + return &prmMatchRepoDigestOrExact{prmCommon{Type: prmTypeMatchRepoDigestOrExact}} +} + +// NewPRMMatchRepoDigestOrExact returns a new "matchRepoDigestOrExact" PolicyReferenceMatch. +func NewPRMMatchRepoDigestOrExact() PolicyReferenceMatch { + return newPRMMatchRepoDigestOrExact() +} + +// Compile-time check that prmMatchRepoDigestOrExact implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmMatchRepoDigestOrExact)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error { + *prm = prmMatchRepoDigestOrExact{} + var tmp prmMatchRepoDigestOrExact + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + switch key { + case "type": + return &tmp.Type + default: + return nil + } + }); err != nil { + return err + } + + if tmp.Type != prmTypeMatchRepoDigestOrExact { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + *prm = *newPRMMatchRepoDigestOrExact() + return nil +} + +// newPRMMatchRepository is NewPRMMatchRepository, except it resturns the private type. +func newPRMMatchRepository() *prmMatchRepository { + return &prmMatchRepository{prmCommon{Type: prmTypeMatchRepository}} +} + +// NewPRMMatchRepository returns a new "matchRepository" PolicyReferenceMatch. +func NewPRMMatchRepository() PolicyReferenceMatch { + return newPRMMatchRepository() +} + +// Compile-time check that prmMatchRepository implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmMatchRepository)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error { + *prm = prmMatchRepository{} + var tmp prmMatchRepository + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + switch key { + case "type": + return &tmp.Type + default: + return nil + } + }); err != nil { + return err + } + + if tmp.Type != prmTypeMatchRepository { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + *prm = *newPRMMatchRepository() + return nil +} + +// newPRMExactReference is NewPRMExactReference, except it resturns the private type. +func newPRMExactReference(dockerReference string) (*prmExactReference, error) { + ref, err := reference.ParseNamed(dockerReference) + if err != nil { + return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerReference %s: %s", dockerReference, err.Error())) + } + if reference.IsNameOnly(ref) { + return nil, InvalidPolicyFormatError(fmt.Sprintf("dockerReference %s contains neither a tag nor digest", dockerReference)) + } + return &prmExactReference{ + prmCommon: prmCommon{Type: prmTypeExactReference}, + DockerReference: dockerReference, + }, nil +} + +// NewPRMExactReference returns a new "exactReference" PolicyReferenceMatch. +func NewPRMExactReference(dockerReference string) (PolicyReferenceMatch, error) { + return newPRMExactReference(dockerReference) +} + +// Compile-time check that prmExactReference implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmExactReference)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmExactReference) UnmarshalJSON(data []byte) error { + *prm = prmExactReference{} + var tmp prmExactReference + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + switch key { + case "type": + return &tmp.Type + case "dockerReference": + return &tmp.DockerReference + default: + return nil + } + }); err != nil { + return err + } + + if tmp.Type != prmTypeExactReference { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + + res, err := newPRMExactReference(tmp.DockerReference) + if err != nil { + return err + } + *prm = *res + return nil +} + +// newPRMExactRepository is NewPRMExactRepository, except it resturns the private type. +func newPRMExactRepository(dockerRepository string) (*prmExactRepository, error) { + if _, err := reference.ParseNamed(dockerRepository); err != nil { + return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %s: %s", dockerRepository, err.Error())) + } + return &prmExactRepository{ + prmCommon: prmCommon{Type: prmTypeExactRepository}, + DockerRepository: dockerRepository, + }, nil +} + +// NewPRMExactRepository returns a new "exactRepository" PolicyRepositoryMatch. +func NewPRMExactRepository(dockerRepository string) (PolicyReferenceMatch, error) { + return newPRMExactRepository(dockerRepository) +} + +// Compile-time check that prmExactRepository implements json.Unmarshaler. +var _ json.Unmarshaler = (*prmExactRepository)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (prm *prmExactRepository) UnmarshalJSON(data []byte) error { + *prm = prmExactRepository{} + var tmp prmExactRepository + if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { + switch key { + case "type": + return &tmp.Type + case "dockerRepository": + return &tmp.DockerRepository + default: + return nil + } + }); err != nil { + return err + } + + if tmp.Type != prmTypeExactRepository { + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + } + + res, err := newPRMExactRepository(tmp.DockerRepository) + if err != nil { + return err + } + *prm = *res + return nil +} diff --git a/vendor/src/github.com/containers/image/signature/policy_eval.go b/vendor/src/github.com/containers/image/signature/policy_eval.go new file mode 100644 index 00000000..4828ecc8 --- /dev/null +++ b/vendor/src/github.com/containers/image/signature/policy_eval.go @@ -0,0 +1,287 @@ +// This defines the top-level policy evaluation API. +// To the extent possible, the interface of the fuctions provided +// here is intended to be completely unambiguous, and stable for users +// to rely on. + +package signature + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/containers/image/types" +) + +// PolicyRequirementError is an explanatory text for rejecting a signature or an image. +type PolicyRequirementError string + +func (err PolicyRequirementError) Error() string { + return string(err) +} + +// signatureAcceptanceResult is the principal value returned by isSignatureAuthorAccepted. +type signatureAcceptanceResult string + +const ( + sarAccepted signatureAcceptanceResult = "sarAccepted" + sarRejected signatureAcceptanceResult = "sarRejected" + sarUnknown signatureAcceptanceResult = "sarUnknown" +) + +// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image. +// The type is public, but its definition is private. +type PolicyRequirement interface { + // FIXME: For speed, we should support creating per-context state (not stored in the PolicyRequirement), to cache + // costly initialization like creating temporary GPG home directories and reading files. + // Setup() (someState, error) + // Then, the operations below would be done on the someState object, not directly on a PolicyRequirement. + + // isSignatureAuthorAccepted, given an image and a signature blob, returns: + // - sarAccepted if the signature has been verified against the appropriate public key + // (where "appropriate public key" may depend on the contents of the signature); + // in that case a parsed Signature should be returned. + // - sarRejected if the signature has not been verified; + // in that case error must be non-nil, and should be an PolicyRequirementError if evaluation + // succeeded but the result was rejection. + // - sarUnknown if if this PolicyRequirement does not deal with signatures. + // NOTE: sarUnknown should not be returned if this PolicyRequirement should make a decision but something failed. + // Returning sarUnknown and a non-nil error value is invalid. + // WARNING: This makes the signature contents acceptable for futher processing, + // but it does not necessarily mean that the contents of the signature are + // consistent with local policy. + // For example: + // - Do not use a true value to determine whether to run + // a container based on this image; use IsRunningImageAllowed instead. + // - Just because a signature is accepted does not automatically mean the contents of the + // signature are authorized to run code as root, or to affect system or cluster configuration. + isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) + + // isRunningImageAllowed returns true if the requirement allows running an image. + // If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation + // succeeded but the result was rejection. + // WARNING: This validates signatures and the manifest, but does not download or validate the + // layers. Users must validate that the layers match their expected digests. + isRunningImageAllowed(image types.UnparsedImage) (bool, error) +} + +// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement. +// The type is public, but its implementation is private. +type PolicyReferenceMatch interface { + // matchesDockerReference decides whether a specific image identity is accepted for an image + // (or, usually, for the image's Reference().DockerReference()). Note that + // image.Reference().DockerReference() may be nil. + matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool +} + +// PolicyContext encapsulates a policy and possible cached state +// for speeding up its evaluation. +type PolicyContext struct { + Policy *Policy + state policyContextState // Internal consistency checking +} + +// policyContextState is used internally to verify the users are not misusing a PolicyContext. +type policyContextState string + +const ( + pcInvalid policyContextState = "" + pcInitializing policyContextState = "Initializing" + pcReady policyContextState = "Ready" + pcInUse policyContextState = "InUse" + pcDestroying policyContextState = "Destroying" + pcDestroyed policyContextState = "Destroyed" +) + +// changeContextState changes pc.state, or fails if the state is unexpected +func (pc *PolicyContext) changeState(expected, new policyContextState) error { + if pc.state != expected { + return fmt.Errorf(`"Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state) + } + pc.state = new + return nil +} + +// NewPolicyContext sets up and initializes a context for the specified policy. +// The policy must not be modified while the context exists. FIXME: make a deep copy? +// If this function succeeds, the caller should call PolicyContext.Destroy() when done. +func NewPolicyContext(policy *Policy) (*PolicyContext, error) { + pc := &PolicyContext{Policy: policy, state: pcInitializing} + // FIXME: initialize + if err := pc.changeState(pcInitializing, pcReady); err != nil { + // Huh?! This should never fail, we didn't give the pointer to anybody. + // Just give up and leave unclean state around. + return nil, err + } + return pc, nil +} + +// Destroy should be called when the user of the context is done with it. +func (pc *PolicyContext) Destroy() error { + if err := pc.changeState(pcReady, pcDestroying); err != nil { + return err + } + // FIXME: destroy + return pc.changeState(pcDestroying, pcDestroyed) +} + +// policyIdentityLogName returns a string description of the image identity for policy purposes. +// ONLY use this for log messages, not for any decisions! +func policyIdentityLogName(ref types.ImageReference) string { + return ref.Transport().Name() + ":" + ref.PolicyConfigurationIdentity() +} + +// requirementsForImageRef selects the appropriate requirements for ref. +func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) PolicyRequirements { + // Do we have a PolicyTransportScopes for this transport? + transportName := ref.Transport().Name() + if transportScopes, ok := pc.Policy.Transports[transportName]; ok { + // Look for a full match. + identity := ref.PolicyConfigurationIdentity() + if req, ok := transportScopes[identity]; ok { + logrus.Debugf(` Using transport "%s" policy section %s`, transportName, identity) + return req + } + + // Look for a match of the possible parent namespaces. + for _, name := range ref.PolicyConfigurationNamespaces() { + if req, ok := transportScopes[name]; ok { + logrus.Debugf(` Using transport "%s" specific policy section %s`, transportName, name) + return req + } + } + + // Look for a default match for the transport. + if req, ok := transportScopes[""]; ok { + logrus.Debugf(` Using transport "%s" policy section ""`, transportName) + return req + } + } + + logrus.Debugf(" Using default policy section") + return pc.Policy.Default +} + +// GetSignaturesWithAcceptedAuthor returns those signatures from an image +// for which the policy accepts the author (and which have been successfully +// verified). +// NOTE: This may legitimately return an empty list and no error, if the image +// has no signatures or only invalid signatures. +// WARNING: This makes the signature contents acceptable for futher processing, +// but it does not necessarily mean that the contents of the signature are +// consistent with local policy. +// For example: +// - Do not use a an existence of an accepted signature to determine whether to run +// a container based on this image; use IsRunningImageAllowed instead. +// - Just because a signature is accepted does not automatically mean the contents of the +// signature are authorized to run code as root, or to affect system or cluster configuration. +func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(image types.UnparsedImage) (sigs []*Signature, finalErr error) { + if err := pc.changeState(pcReady, pcInUse); err != nil { + return nil, err + } + defer func() { + if err := pc.changeState(pcInUse, pcReady); err != nil { + sigs = nil + finalErr = err + } + }() + + logrus.Debugf("GetSignaturesWithAcceptedAuthor for image %s", policyIdentityLogName(image.Reference())) + reqs := pc.requirementsForImageRef(image.Reference()) + + // FIXME: rename Signatures to UnverifiedSignatures + unverifiedSignatures, err := image.Signatures() + if err != nil { + return nil, err + } + + res := make([]*Signature, 0, len(unverifiedSignatures)) + for sigNumber, sig := range unverifiedSignatures { + var acceptedSig *Signature // non-nil if accepted + rejected := false + // FIXME? Say more about the contents of the signature, i.e. parse it even before verification?! + logrus.Debugf("Evaluating signature %d:", sigNumber) + interpretingReqs: + for reqNumber, req := range reqs { + // FIXME: Log the requirement itself? For now, we use just the number. + // FIXME: supply state + switch res, as, err := req.isSignatureAuthorAccepted(image, sig); res { + case sarAccepted: + if as == nil { // Coverage: this should never happen + logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but no parsed contents", reqNumber) + rejected = true + break interpretingReqs + } + logrus.Debugf(" Requirement %d: signature accepted", reqNumber) + if acceptedSig == nil { + acceptedSig = as + } else if *as != *acceptedSig { // Coverage: this should never happen + // Huh?! Two ways of verifying the same signature blob resulted in two different parses of its already accepted contents? + logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but different parsed contents", reqNumber) + rejected = true + acceptedSig = nil + break interpretingReqs + } + case sarRejected: + logrus.Debugf(" Requirement %d: signature rejected: %s", reqNumber, err.Error()) + rejected = true + break interpretingReqs + case sarUnknown: + if err != nil { // Coverage: this should never happen + logrus.Debugf(" Requirement %d: internal inconsistency: sarUnknown but an error message %s", reqNumber, err.Error()) + rejected = true + break interpretingReqs + } + logrus.Debugf(" Requirement %d: signature state unknown, continuing", reqNumber) + default: // Coverage: this should never happen + logrus.Debugf(" Requirement %d: internal inconsistency: unknown result %#v", reqNumber, string(res)) + rejected = true + break interpretingReqs + } + } + // This also handles the (invalid) case of empty reqs, by rejecting the signature. + if acceptedSig != nil && !rejected { + logrus.Debugf(" Overall: OK, signature accepted") + res = append(res, acceptedSig) + } else { + logrus.Debugf(" Overall: Signature not accepted") + } + } + return res, nil +} + +// IsRunningImageAllowed returns true iff the policy allows running the image. +// If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation +// succeeded but the result was rejection. +// WARNING: This validates signatures and the manifest, but does not download or validate the +// layers. Users must validate that the layers match their expected digests. +func (pc *PolicyContext) IsRunningImageAllowed(image types.UnparsedImage) (res bool, finalErr error) { + if err := pc.changeState(pcReady, pcInUse); err != nil { + return false, err + } + defer func() { + if err := pc.changeState(pcInUse, pcReady); err != nil { + res = false + finalErr = err + } + }() + + logrus.Debugf("IsRunningImageAllowed for image %s", policyIdentityLogName(image.Reference())) + reqs := pc.requirementsForImageRef(image.Reference()) + + if len(reqs) == 0 { + return false, PolicyRequirementError("List of verification policy requirements must not be empty") + } + + for reqNumber, req := range reqs { + // FIXME: supply state + allowed, err := req.isRunningImageAllowed(image) + if !allowed { + logrus.Debugf("Requirement %d: denied, done", reqNumber) + return false, err + } + logrus.Debugf(" Requirement %d: allowed", reqNumber) + } + // We have tested that len(reqs) != 0, so at least one req must have explicitly allowed this image. + logrus.Debugf("Overall: allowed") + return true, nil +} diff --git a/vendor/src/github.com/containers/image/signature/policy_eval_baselayer.go b/vendor/src/github.com/containers/image/signature/policy_eval_baselayer.go new file mode 100644 index 00000000..dec84c93 --- /dev/null +++ b/vendor/src/github.com/containers/image/signature/policy_eval_baselayer.go @@ -0,0 +1,18 @@ +// Policy evaluation for prSignedBaseLayer. + +package signature + +import ( + "github.com/Sirupsen/logrus" + "github.com/containers/image/types" +) + +func (pr *prSignedBaseLayer) isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + return sarUnknown, nil, nil +} + +func (pr *prSignedBaseLayer) isRunningImageAllowed(image types.UnparsedImage) (bool, error) { + // FIXME? Reject this at policy parsing time already? + logrus.Errorf("signedBaseLayer not implemented yet!") + return false, PolicyRequirementError("signedBaseLayer not implemented yet!") +} diff --git a/vendor/src/github.com/containers/image/signature/policy_eval_signedby.go b/vendor/src/github.com/containers/image/signature/policy_eval_signedby.go new file mode 100644 index 00000000..595634ce --- /dev/null +++ b/vendor/src/github.com/containers/image/signature/policy_eval_signedby.go @@ -0,0 +1,138 @@ +// Policy evaluation for prSignedBy. + +package signature + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "strings" + + "github.com/containers/image/manifest" + "github.com/containers/image/types" + "github.com/docker/distribution/digest" +) + +func (pr *prSignedBy) isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + switch pr.KeyType { + case SBKeyTypeGPGKeys: + case SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs: + // FIXME? Reject this at policy parsing time already? + return sarRejected, nil, fmt.Errorf(`"Unimplemented "keyType" value "%s"`, string(pr.KeyType)) + default: + // This should never happen, newPRSignedBy ensures KeyType.IsValid() + return sarRejected, nil, fmt.Errorf(`"Unknown "keyType" value "%s"`, string(pr.KeyType)) + } + + if pr.KeyPath != "" && pr.KeyData != nil { + return sarRejected, nil, errors.New(`Internal inconsistency: both "keyPath" and "keyData" specified`) + } + // FIXME: move this to per-context initialization + var data []byte + if pr.KeyData != nil { + data = pr.KeyData + } else { + d, err := ioutil.ReadFile(pr.KeyPath) + if err != nil { + return sarRejected, nil, err + } + data = d + } + + // FIXME: move this to per-context initialization + dir, err := ioutil.TempDir("", "skopeo-signedBy-") + if err != nil { + return sarRejected, nil, err + } + defer os.RemoveAll(dir) + mech, err := newGPGSigningMechanismInDirectory(dir) + if err != nil { + return sarRejected, nil, err + } + + trustedIdentities, err := mech.ImportKeysFromBytes(data) + if err != nil { + return sarRejected, nil, err + } + if len(trustedIdentities) == 0 { + return sarRejected, nil, PolicyRequirementError("No public keys imported") + } + + signature, err := verifyAndExtractSignature(mech, sig, signatureAcceptanceRules{ + validateKeyIdentity: func(keyIdentity string) error { + for _, trustedIdentity := range trustedIdentities { + if keyIdentity == trustedIdentity { + return nil + } + } + // Coverage: We use a private GPG home directory and only import trusted keys, so this should + // not be reachable. + return PolicyRequirementError(fmt.Sprintf("Signature by key %s is not accepted", keyIdentity)) + }, + validateSignedDockerReference: func(ref string) error { + if !pr.SignedIdentity.matchesDockerReference(image, ref) { + return PolicyRequirementError(fmt.Sprintf("Signature for identity %s is not accepted", ref)) + } + return nil + }, + validateSignedDockerManifestDigest: func(digest digest.Digest) error { + m, _, err := image.Manifest() + if err != nil { + return err + } + digestMatches, err := manifest.MatchesDigest(m, digest) + if err != nil { + return err + } + if !digestMatches { + return PolicyRequirementError(fmt.Sprintf("Signature for digest %s does not match", digest)) + } + return nil + }, + }) + if err != nil { + return sarRejected, nil, err + } + + return sarAccepted, signature, nil +} + +func (pr *prSignedBy) isRunningImageAllowed(image types.UnparsedImage) (bool, error) { + sigs, err := image.Signatures() + if err != nil { + return false, err + } + var rejections []error + for _, s := range sigs { + var reason error + switch res, _, err := pr.isSignatureAuthorAccepted(image, s); res { + case sarAccepted: + // One accepted signature is enough. + return true, nil + case sarRejected: + reason = err + case sarUnknown: + // Huh?! This should not happen at all; treat it as any other invalid value. + fallthrough + default: + reason = fmt.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res)) + } + rejections = append(rejections, reason) + } + var summary error + switch len(rejections) { + case 0: + summary = PolicyRequirementError("A signature was required, but no signature exists") + case 1: + summary = rejections[0] + default: + var msgs []string + for _, e := range rejections { + msgs = append(msgs, e.Error()) + } + summary = PolicyRequirementError(fmt.Sprintf("None of the signatures were accepted, reasons: %s", + strings.Join(msgs, "; "))) + } + return false, summary +} diff --git a/vendor/src/github.com/containers/image/signature/policy_eval_simple.go b/vendor/src/github.com/containers/image/signature/policy_eval_simple.go new file mode 100644 index 00000000..19a71e6d --- /dev/null +++ b/vendor/src/github.com/containers/image/signature/policy_eval_simple.go @@ -0,0 +1,28 @@ +// Policy evaluation for the various simple PolicyRequirement types. + +package signature + +import ( + "fmt" + + "github.com/containers/image/transports" + "github.com/containers/image/types" +) + +func (pr *prInsecureAcceptAnything) isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + // prInsecureAcceptAnything semantics: Every image is allowed to run, + // but this does not consider the signature as verified. + return sarUnknown, nil, nil +} + +func (pr *prInsecureAcceptAnything) isRunningImageAllowed(image types.UnparsedImage) (bool, error) { + return true, nil +} + +func (pr *prReject) isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { + return sarRejected, nil, PolicyRequirementError(fmt.Sprintf("Any signatures for image %s are rejected by policy.", transports.ImageName(image.Reference()))) +} + +func (pr *prReject) isRunningImageAllowed(image types.UnparsedImage) (bool, error) { + return false, PolicyRequirementError(fmt.Sprintf("Running image %s is rejected by policy.", transports.ImageName(image.Reference()))) +} diff --git a/vendor/src/github.com/containers/image/signature/policy_reference_match.go b/vendor/src/github.com/containers/image/signature/policy_reference_match.go new file mode 100644 index 00000000..ced51e6e --- /dev/null +++ b/vendor/src/github.com/containers/image/signature/policy_reference_match.go @@ -0,0 +1,101 @@ +// PolicyReferenceMatch implementations. + +package signature + +import ( + "fmt" + + "github.com/containers/image/docker/reference" + "github.com/containers/image/transports" + "github.com/containers/image/types" +) + +// parseImageAndDockerReference converts an image and a reference string into two parsed entities, failing on any error and handling unidentified images. +func parseImageAndDockerReference(image types.UnparsedImage, s2 string) (reference.Named, reference.Named, error) { + r1 := image.Reference().DockerReference() + if r1 == nil { + return nil, nil, PolicyRequirementError(fmt.Sprintf("Docker reference match attempted on image %s with no known Docker reference identity", + transports.ImageName(image.Reference()))) + } + r2, err := reference.ParseNamed(s2) + if err != nil { + return nil, nil, err + } + return r1, r2, nil +} + +func (prm *prmMatchExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) + if err != nil { + return false + } + // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now. + if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) { + return false + } + return signature.String() == intended.String() +} + +func (prm *prmMatchRepoDigestOrExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) + if err != nil { + return false + } + + // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now. + if reference.IsNameOnly(signature) { + return false + } + switch intended.(type) { + case reference.NamedTagged: // Includes the case when intended has both a tag and a digest. + return signature.String() == intended.String() + case reference.Canonical: + // We don’t actually compare the manifest digest against the signature here; that happens prSignedBy.in UnparsedImage.Manifest. + // Becase UnparsedImage.Manifest verifies the intended.Digest() against the manifest, and prSignedBy verifies the signature digest against the manifest, + // we know that signature digest matches intended.Digest() (but intended.Digest() and signature digest may use different algorithms) + return signature.Name() == intended.Name() + default: // !reference.IsNameOnly(intended) + return false + } +} + +func (prm *prmMatchRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) + if err != nil { + return false + } + return signature.Name() == intended.Name() +} + +// parseDockerReferences converts two reference strings into parsed entities, failing on any error +func parseDockerReferences(s1, s2 string) (reference.Named, reference.Named, error) { + r1, err := reference.ParseNamed(s1) + if err != nil { + return nil, nil, err + } + r2, err := reference.ParseNamed(s2) + if err != nil { + return nil, nil, err + } + return r1, r2, nil +} + +func (prm *prmExactReference) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseDockerReferences(prm.DockerReference, signatureDockerReference) + if err != nil { + return false + } + // prm.DockerReference and signatureDockerReference should be exact; so, verify that now. + if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) { + return false + } + return signature.String() == intended.String() +} + +func (prm *prmExactRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { + intended, signature, err := parseDockerReferences(prm.DockerRepository, signatureDockerReference) + if err != nil { + return false + } + return signature.Name() == intended.Name() +} diff --git a/vendor/src/github.com/containers/image/signature/policy_types.go b/vendor/src/github.com/containers/image/signature/policy_types.go new file mode 100644 index 00000000..4cd770f1 --- /dev/null +++ b/vendor/src/github.com/containers/image/signature/policy_types.go @@ -0,0 +1,152 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +// This defines types used to represent a signature verification policy in memory. +// Do not use the private types directly; either parse a configuration file, or construct a Policy from PolicyRequirements +// built using the constructor functions provided in policy_config.go. + +package signature + +// NOTE: Keep this in sync with docs/policy.json.md! + +// Policy defines requirements for considering a signature, or an image, valid. +type Policy struct { + // Default applies to any image which does not have a matching policy in Transports. + // Note that this can happen even if a matching PolicyTransportScopes exists in Transports + // if the image matches none of the scopes. + Default PolicyRequirements `json:"default"` + Transports map[string]PolicyTransportScopes `json:"transports"` +} + +// PolicyTransportScopes defines policies for images for a specific transport, +// for various scopes, the map keys. +// Scopes are defined by the transport (types.ImageReference.PolicyConfigurationIdentity etc.); +// there is one scope precisely matching to a single image, and namespace scopes as prefixes +// of the single-image scope. (e.g. hostname[/zero[/or[/more[/namespaces[/individualimage]]]]]) +// The empty scope, if exists, is considered a parent namespace of all other scopes. +// Most specific scope wins, duplication is prohibited (hard failure). +type PolicyTransportScopes map[string]PolicyRequirements + +// PolicyRequirements is a set of requirements applying to a set of images; each of them must be satisfied (though perhaps each by a different signature). +// Must not be empty, frequently will only contain a single element. +type PolicyRequirements []PolicyRequirement + +// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image. +// The type is public, but its definition is private. + +// prCommon is the common type field in a JSON encoding of PolicyRequirement. +type prCommon struct { + Type prTypeIdentifier `json:"type"` +} + +// prTypeIdentifier is string designating a kind of a PolicyRequirement. +type prTypeIdentifier string + +const ( + prTypeInsecureAcceptAnything prTypeIdentifier = "insecureAcceptAnything" + prTypeReject prTypeIdentifier = "reject" + prTypeSignedBy prTypeIdentifier = "signedBy" + prTypeSignedBaseLayer prTypeIdentifier = "signedBaseLayer" +) + +// prInsecureAcceptAnything is a PolicyRequirement with type = prTypeInsecureAcceptAnything: +// every image is allowed to run. +// Note that because PolicyRequirements are implicitly ANDed, this is necessary only if it is the only rule (to make the list non-empty and the policy explicit). +// NOTE: This allows the image to run; it DOES NOT consider the signature verified (per IsSignatureAuthorAccepted). +// FIXME? Better name? +type prInsecureAcceptAnything struct { + prCommon +} + +// prReject is a PolicyRequirement with type = prTypeReject: every image is rejected. +type prReject struct { + prCommon +} + +// prSignedBy is a PolicyRequirement with type = prTypeSignedBy: the image is signed by trusted keys for a specified identity +type prSignedBy struct { + prCommon + + // KeyType specifies what kind of key reference KeyPath/KeyData is. + // Acceptable values are “GPGKeys” | “signedByGPGKeys” “X.509Certificates” | “signedByX.509CAs” + // FIXME: eventually also support GPGTOFU, X.509TOFU, with KeyPath only + KeyType sbKeyType `json:"keyType"` + + // KeyPath is a pathname to a local file containing the trusted key(s). Exactly one of KeyPath and KeyData must be specified. + KeyPath string `json:"keyPath,omitempty"` + // KeyData contains the trusted key(s), base64-encoded. Exactly one of KeyPath and KeyData must be specified. + KeyData []byte `json:"keyData,omitempty"` + + // SignedIdentity specifies what image identity the signature must be claiming about the image. + // Defaults to "match-exact" if not specified. + SignedIdentity PolicyReferenceMatch `json:"signedIdentity"` +} + +// sbKeyType are the allowed values for prSignedBy.KeyType +type sbKeyType string + +const ( + // SBKeyTypeGPGKeys refers to keys contained in a GPG keyring + SBKeyTypeGPGKeys sbKeyType = "GPGKeys" + // SBKeyTypeSignedByGPGKeys refers to keys signed by keys in a GPG keyring + SBKeyTypeSignedByGPGKeys sbKeyType = "signedByGPGKeys" + // SBKeyTypeX509Certificates refers to keys in a set of X.509 certificates + // FIXME: PEM, DER? + SBKeyTypeX509Certificates sbKeyType = "X509Certificates" + // SBKeyTypeSignedByX509CAs refers to keys signed by one of the X.509 CAs + // FIXME: PEM, DER? + SBKeyTypeSignedByX509CAs sbKeyType = "signedByX509CAs" +) + +// prSignedBaseLayer is a PolicyRequirement with type = prSignedBaseLayer: the image has a specified, correctly signed, base image. +type prSignedBaseLayer struct { + prCommon + // BaseLayerIdentity specifies the base image to look for. "match-exact" is rejected, "match-repository" is unlikely to be useful. + BaseLayerIdentity PolicyReferenceMatch `json:"baseLayerIdentity"` +} + +// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement. +// The type is public, but its implementation is private. + +// prmCommon is the common type field in a JSON encoding of PolicyReferenceMatch. +type prmCommon struct { + Type prmTypeIdentifier `json:"type"` +} + +// prmTypeIdentifier is string designating a kind of a PolicyReferenceMatch. +type prmTypeIdentifier string + +const ( + prmTypeMatchExact prmTypeIdentifier = "matchExact" + prmTypeMatchRepoDigestOrExact prmTypeIdentifier = "matchRepoDigestOrExact" + prmTypeMatchRepository prmTypeIdentifier = "matchRepository" + prmTypeExactReference prmTypeIdentifier = "exactReference" + prmTypeExactRepository prmTypeIdentifier = "exactRepository" +) + +// prmMatchExact is a PolicyReferenceMatch with type = prmMatchExact: the two references must match exactly. +type prmMatchExact struct { + prmCommon +} + +// prmMatchRepoDigestOrExact is a PolicyReferenceMatch with type = prmMatchExactOrDigest: the two references must match exactly, +// except that digest references are also accepted if the repository name matches (regardless of tag/digest) and the signature applies to the referenced digest +type prmMatchRepoDigestOrExact struct { + prmCommon +} + +// prmMatchRepository is a PolicyReferenceMatch with type = prmMatchRepository: the two references must use the same repository, may differ in the tag. +type prmMatchRepository struct { + prmCommon +} + +// prmExactReference is a PolicyReferenceMatch with type = prmExactReference: matches a specified reference exactly. +type prmExactReference struct { + prmCommon + DockerReference string `json:"dockerReference"` +} + +// prmExactRepository is a PolicyReferenceMatch with type = prmExactRepository: matches a specified repository, with any tag. +type prmExactRepository struct { + prmCommon + DockerRepository string `json:"dockerRepository"` +} diff --git a/vendor/src/github.com/containers/image/signature/signature.go b/vendor/src/github.com/containers/image/signature/signature.go new file mode 100644 index 00000000..b0c6e444 --- /dev/null +++ b/vendor/src/github.com/containers/image/signature/signature.go @@ -0,0 +1,192 @@ +// Note: Consider the API unstable until the code supports at least three different image formats or transports. + +package signature + +import ( + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/containers/image/version" + "github.com/docker/distribution/digest" +) + +const ( + signatureType = "atomic container signature" +) + +// InvalidSignatureError is returned when parsing an invalid signature. +type InvalidSignatureError struct { + msg string +} + +func (err InvalidSignatureError) Error() string { + return err.msg +} + +// Signature is a parsed content of a signature. +type Signature struct { + DockerManifestDigest digest.Digest + DockerReference string // FIXME: more precise type? +} + +// Wrap signature to add to it some methods which we don't want to make public. +type privateSignature struct { + Signature +} + +// Compile-time check that privateSignature implements json.Marshaler +var _ json.Marshaler = (*privateSignature)(nil) + +// MarshalJSON implements the json.Marshaler interface. +func (s privateSignature) MarshalJSON() ([]byte, error) { + return s.marshalJSONWithVariables(time.Now().UTC().Unix(), "atomic "+version.Version) +} + +// Implementation of MarshalJSON, with a caller-chosen values of the variable items to help testing. +func (s privateSignature) marshalJSONWithVariables(timestamp int64, creatorID string) ([]byte, error) { + if s.DockerManifestDigest == "" || s.DockerReference == "" { + return nil, errors.New("Unexpected empty signature content") + } + critical := map[string]interface{}{ + "type": signatureType, + "image": map[string]string{"docker-manifest-digest": s.DockerManifestDigest.String()}, + "identity": map[string]string{"docker-reference": s.DockerReference}, + } + optional := map[string]interface{}{ + "creator": creatorID, + "timestamp": timestamp, + } + signature := map[string]interface{}{ + "critical": critical, + "optional": optional, + } + return json.Marshal(signature) +} + +// Compile-time check that privateSignature implements json.Unmarshaler +var _ json.Unmarshaler = (*privateSignature)(nil) + +// UnmarshalJSON implements the json.Unmarshaler interface +func (s *privateSignature) UnmarshalJSON(data []byte) error { + err := s.strictUnmarshalJSON(data) + if err != nil { + if _, ok := err.(jsonFormatError); ok { + err = InvalidSignatureError{msg: err.Error()} + } + } + return err +} + +// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal jsonFormatError error type. +// Splitting it into a separate function allows us to do the jsonFormatError → InvalidSignatureError in a single place, the caller. +func (s *privateSignature) strictUnmarshalJSON(data []byte) error { + var untyped interface{} + if err := json.Unmarshal(data, &untyped); err != nil { + return err + } + o, ok := untyped.(map[string]interface{}) + if !ok { + return InvalidSignatureError{msg: "Invalid signature format"} + } + if err := validateExactMapKeys(o, "critical", "optional"); err != nil { + return err + } + + c, err := mapField(o, "critical") + if err != nil { + return err + } + if err := validateExactMapKeys(c, "type", "image", "identity"); err != nil { + return err + } + + optional, err := mapField(o, "optional") + if err != nil { + return err + } + _ = optional // We don't use anything from here for now. + + t, err := stringField(c, "type") + if err != nil { + return err + } + if t != signatureType { + return InvalidSignatureError{msg: fmt.Sprintf("Unrecognized signature type %s", t)} + } + + image, err := mapField(c, "image") + if err != nil { + return err + } + if err := validateExactMapKeys(image, "docker-manifest-digest"); err != nil { + return err + } + digestString, err := stringField(image, "docker-manifest-digest") + if err != nil { + return err + } + s.DockerManifestDigest = digest.Digest(digestString) + + identity, err := mapField(c, "identity") + if err != nil { + return err + } + if err := validateExactMapKeys(identity, "docker-reference"); err != nil { + return err + } + reference, err := stringField(identity, "docker-reference") + if err != nil { + return err + } + s.DockerReference = reference + + return nil +} + +// Sign formats the signature and returns a blob signed using mech and keyIdentity +func (s privateSignature) sign(mech SigningMechanism, keyIdentity string) ([]byte, error) { + json, err := json.Marshal(s) + if err != nil { + return nil, err + } + + return mech.Sign(json, keyIdentity) +} + +// signatureAcceptanceRules specifies how to decide whether an untrusted signature is acceptable. +// We centralize the actual parsing and data extraction in verifyAndExtractSignature; this supplies +// the policy. We use an object instead of supplying func parameters to verifyAndExtractSignature +// because all of the functions have the same type, so there is a risk of exchanging the functions; +// named members of this struct are more explicit. +type signatureAcceptanceRules struct { + validateKeyIdentity func(string) error + validateSignedDockerReference func(string) error + validateSignedDockerManifestDigest func(digest.Digest) error +} + +// verifyAndExtractSignature verifies that unverifiedSignature has been signed, and that its principial components +// match expected values, both as specified by rules, and returns it +func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte, rules signatureAcceptanceRules) (*Signature, error) { + signed, keyIdentity, err := mech.Verify(unverifiedSignature) + if err != nil { + return nil, err + } + if err := rules.validateKeyIdentity(keyIdentity); err != nil { + return nil, err + } + + var unmatchedSignature privateSignature + if err := json.Unmarshal(signed, &unmatchedSignature); err != nil { + return nil, InvalidSignatureError{msg: err.Error()} + } + if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.DockerManifestDigest); err != nil { + return nil, err + } + if err := rules.validateSignedDockerReference(unmatchedSignature.DockerReference); err != nil { + return nil, err + } + signature := unmatchedSignature.Signature // Policy OK. + return &signature, nil +} diff --git a/vendor/src/github.com/containers/image/storage/storage_image.go b/vendor/src/github.com/containers/image/storage/storage_image.go new file mode 100644 index 00000000..132c5ad5 --- /dev/null +++ b/vendor/src/github.com/containers/image/storage/storage_image.go @@ -0,0 +1,570 @@ +package storage + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "time" + + "github.com/Sirupsen/logrus" + "github.com/containers/image/image" + "github.com/containers/image/manifest" + "github.com/containers/image/types" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/storage" + ddigest "github.com/docker/distribution/digest" +) + +var ( + // ErrBlobDigestMismatch is returned when PutBlob() is given a blob + // with a digest-based name that doesn't match its contents. + ErrBlobDigestMismatch = errors.New("blob digest mismatch") + // ErrBlobSizeMismatch is returned when PutBlob() is given a blob + // with an expected size that doesn't match the reader. + ErrBlobSizeMismatch = errors.New("blob size mismatch") + // ErrNoManifestLists is returned when GetTargetManifest() is + // called. + ErrNoManifestLists = errors.New("manifest lists are not supported by this transport") + // ErrNoSuchImage is returned when we attempt to access an image which + // doesn't exist in the storage area. + ErrNoSuchImage = storage.ErrNotAnImage +) + +type storageImageSource struct { + imageRef storageReference + Tag string `json:"tag,omitempty"` + Created time.Time `json:"created-time,omitempty"` + ID string `json:"id"` + BlobList []types.BlobInfo `json:"blob-list,omitempty"` // Ordered list of every blob the image has been told to handle + Layers map[ddigest.Digest][]string `json:"layers,omitempty"` // Map from digests of blobs to lists of layer IDs + LayerPosition map[ddigest.Digest]int `json:"-"` // Where we are in reading a blob's layers + SignatureSizes []int `json:"signature-sizes"` // List of sizes of each signature slice +} + +type storageImageDestination struct { + imageRef storageReference + Tag string `json:"tag,omitempty"` + Created time.Time `json:"created-time,omitempty"` + ID string `json:"id"` + BlobList []types.BlobInfo `json:"blob-list,omitempty"` // Ordered list of every blob the image has been told to handle + Layers map[ddigest.Digest][]string `json:"layers,omitempty"` // Map from digests of blobs to lists of layer IDs + BlobData map[ddigest.Digest][]byte `json:"-"` // Map from names of blobs that aren't layers to contents, temporary + Manifest []byte `json:"-"` // Manifest contents, temporary + Signatures []byte `json:"-"` // Signature contents, temporary + SignatureSizes []int `json:"signature-sizes"` // List of sizes of each signature slice +} + +type storageLayerMetadata struct { + Digest string `json:"digest,omitempty"` + Size int64 `json:"size"` + CompressedSize int64 `json:"compressed-size,omitempty"` +} + +type storageImage struct { + types.Image + size int64 +} + +// newImageSource sets us up to read out an image, which needs to already exist. +func newImageSource(imageRef storageReference) (*storageImageSource, error) { + id := imageRef.resolveID() + if id == "" { + logrus.Errorf("no image matching reference %q found", imageRef.StringWithinTransport()) + return nil, ErrNoSuchImage + } + img, err := imageRef.transport.store.GetImage(id) + if err != nil { + return nil, fmt.Errorf("error reading image %q: %v", id, err) + } + image := &storageImageSource{ + imageRef: imageRef, + Created: time.Now(), + ID: img.ID, + BlobList: []types.BlobInfo{}, + Layers: make(map[ddigest.Digest][]string), + LayerPosition: make(map[ddigest.Digest]int), + SignatureSizes: []int{}, + } + if err := json.Unmarshal([]byte(img.Metadata), image); err != nil { + return nil, fmt.Errorf("error decoding metadata for source image: %v", err) + } + return image, nil +} + +// newImageDestination sets us up to write a new image. +func newImageDestination(imageRef storageReference) (*storageImageDestination, error) { + image := &storageImageDestination{ + imageRef: imageRef, + Tag: imageRef.reference, + Created: time.Now(), + ID: imageRef.id, + BlobList: []types.BlobInfo{}, + Layers: make(map[ddigest.Digest][]string), + BlobData: make(map[ddigest.Digest][]byte), + SignatureSizes: []int{}, + } + return image, nil +} + +func (s storageImageSource) Reference() types.ImageReference { + return s.imageRef +} + +func (s storageImageDestination) Reference() types.ImageReference { + return s.imageRef +} + +func (s storageImageSource) Close() { +} + +func (s storageImageDestination) Close() { +} + +func (s storageImageDestination) ShouldCompressLayers() bool { + // We ultimately have to decompress layers to populate trees on disk, + // so callers shouldn't bother compressing them before handing them to + // us, if they're not already compressed. + return false +} + +// PutBlob is used to both store filesystem layers and binary data that is part +// of the image. Filesystem layers are assumed to be imported in order, as +// that is required by some of the underlying storage drivers. +func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobInfo) (types.BlobInfo, error) { + blobSize := int64(-1) + digest := blobinfo.Digest + errorBlobInfo := types.BlobInfo{ + Digest: "", + Size: -1, + } + // Try to read an initial snippet of the blob. + header := make([]byte, 10240) + n, err := stream.Read(header) + if err != nil && err != io.EOF { + return errorBlobInfo, err + } + // Set up to read the whole blob (the initial snippet, plus the rest) + // while digesting it with either the default, or the passed-in digest, + // if one was specified. + hasher := ddigest.Canonical.New() + if digest.Validate() == nil { + if a := digest.Algorithm(); a.Available() { + hasher = a.New() + } + } + hash := "" + counter := ioutils.NewWriteCounter(hasher.Hash()) + defragmented := io.MultiReader(bytes.NewBuffer(header[:n]), stream) + multi := io.TeeReader(defragmented, counter) + if (n > 0) && archive.IsArchive(header[:n]) { + // It's a filesystem layer. If it's not the first one in the + // image, we assume that the most recently added layer is its + // parent. + parentLayer := "" + for _, blob := range s.BlobList { + if layerList, ok := s.Layers[blob.Digest]; ok { + parentLayer = layerList[len(layerList)-1] + } + } + // If we have an expected content digest, generate a layer ID + // based on the parent's ID and the expected content digest. + id := "" + if digest.Validate() == nil { + id = ddigest.Canonical.FromBytes([]byte(parentLayer + "+" + digest.String())).Hex() + } + // Attempt to create the identified layer and import its contents. + layer, uncompressedSize, err := s.imageRef.transport.store.PutLayer(id, parentLayer, nil, "", true, multi) + if err != nil && err != storage.ErrDuplicateID { + logrus.Debugf("error importing layer blob %q as %q: %v", blobinfo.Digest, id, err) + return errorBlobInfo, err + } + if err == storage.ErrDuplicateID { + // We specified an ID, and there's already a layer with + // the same ID. Drain the input so that we can look at + // its length and digest. + _, err := io.Copy(ioutil.Discard, multi) + if err != nil && err != io.EOF { + logrus.Debugf("error digesting layer blob %q: %v", blobinfo.Digest, id, err) + return errorBlobInfo, err + } + hash = hasher.Digest().String() + } else { + // Applied the layer with the specified ID. Note the + // size info and computed digest. + hash = hasher.Digest().String() + layerMeta := storageLayerMetadata{ + Digest: hash, + CompressedSize: counter.Count, + Size: uncompressedSize, + } + if metadata, err := json.Marshal(&layerMeta); len(metadata) != 0 && err == nil { + s.imageRef.transport.store.SetMetadata(layer.ID, string(metadata)) + } + // Hang on to the new layer's ID. + id = layer.ID + } + blobSize = counter.Count + // Check if the size looks right. + if blobinfo.Size >= 0 && blobSize != blobinfo.Size { + logrus.Debugf("blob %q size is %d, not %d, rejecting", blobinfo.Digest, blobSize, blobinfo.Size) + if layer != nil { + // Something's wrong; delete the newly-created layer. + s.imageRef.transport.store.DeleteLayer(layer.ID) + } + return errorBlobInfo, ErrBlobSizeMismatch + } + // If the content digest was specified, verify it. + if digest.Validate() == nil && digest.String() != hash { + logrus.Debugf("blob %q digests to %q, rejecting", blobinfo.Digest, hash) + if layer != nil { + // Something's wrong; delete the newly-created layer. + s.imageRef.transport.store.DeleteLayer(layer.ID) + } + return errorBlobInfo, ErrBlobDigestMismatch + } + // If we didn't get a digest, construct one. + if digest == "" { + digest = ddigest.Digest(hash) + } + // Record that this layer blob is a layer, and the layer ID it + // ended up having. This is a list, in case the same blob is + // being applied more than once. + s.Layers[digest] = append(s.Layers[digest], id) + s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: blobSize}) + if layer != nil { + logrus.Debugf("blob %q imported as a filesystem layer %q", blobinfo.Digest, id) + } else { + logrus.Debugf("layer blob %q already present as layer %q", blobinfo.Digest, id) + } + } else { + // It's just data. Finish scanning it in, check that our + // computed digest matches the passed-in digest, and store it, + // but leave it out of the blob-to-layer-ID map so that we can + // tell that it's not a layer. + blob, err := ioutil.ReadAll(multi) + if err != nil && err != io.EOF { + return errorBlobInfo, err + } + blobSize = int64(len(blob)) + hash = hasher.Digest().String() + if blobinfo.Size >= 0 && blobSize != blobinfo.Size { + logrus.Debugf("blob %q size is %d, not %d, rejecting", blobinfo.Digest, blobSize, blobinfo.Size) + return errorBlobInfo, ErrBlobSizeMismatch + } + // If we were given a digest, verify that the content matches + // it. + if digest.Validate() == nil && digest.String() != hash { + logrus.Debugf("blob %q digests to %q, rejecting", blobinfo.Digest, hash) + return errorBlobInfo, ErrBlobDigestMismatch + } + // If we didn't get a digest, construct one. + if digest == "" { + digest = ddigest.Digest(hash) + } + // Save the blob for when we Commit(). + s.BlobData[digest] = blob + s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: blobSize}) + logrus.Debugf("blob %q imported as opaque data %q", blobinfo.Digest, digest) + } + return types.BlobInfo{ + Digest: digest, + Size: blobSize, + }, nil +} + +func (s *storageImageDestination) HasBlob(blobinfo types.BlobInfo) (bool, int64, error) { + if blobinfo.Digest == "" { + return false, -1, fmt.Errorf(`"Can not check for a blob with unknown digest`) + } + for _, blob := range s.BlobList { + if blob.Digest == blobinfo.Digest { + return true, blob.Size, nil + } + } + return false, -1, types.ErrBlobNotFound +} + +func (s *storageImageDestination) ReapplyBlob(blobinfo types.BlobInfo) (types.BlobInfo, error) { + err := blobinfo.Digest.Validate() + if err != nil { + return types.BlobInfo{}, err + } + if layerList, ok := s.Layers[blobinfo.Digest]; !ok || len(layerList) < 1 { + b, err := s.imageRef.transport.store.GetImageBigData(s.ID, blobinfo.Digest.String()) + if err != nil { + return types.BlobInfo{}, err + } + return types.BlobInfo{Digest: blobinfo.Digest, Size: int64(len(b))}, nil + } + layerList := s.Layers[blobinfo.Digest] + rc, _, err := diffLayer(s.imageRef.transport.store, layerList[len(layerList)-1]) + if err != nil { + return types.BlobInfo{}, err + } + return s.PutBlob(rc, blobinfo) +} + +func (s *storageImageDestination) Commit() error { + // Create the image record. + lastLayer := "" + for _, blob := range s.BlobList { + if layerList, ok := s.Layers[blob.Digest]; ok { + lastLayer = layerList[len(layerList)-1] + } + } + img, err := s.imageRef.transport.store.CreateImage(s.ID, nil, lastLayer, "", nil) + if err != nil { + logrus.Debugf("error creating image: %q", err) + return err + } + logrus.Debugf("created new image ID %q", img.ID) + s.ID = img.ID + if s.Tag != "" { + // We have a name to set, so move the name to this image. + if err := s.imageRef.transport.store.SetNames(img.ID, []string{s.Tag}); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error setting names on image %q: %v", img.ID, err) + return err + } + logrus.Debugf("set name of image %q to %q", img.ID, s.Tag) + } + // Save the data blobs to disk, and drop their contents from memory. + keys := []ddigest.Digest{} + for k, v := range s.BlobData { + if err := s.imageRef.transport.store.SetImageBigData(img.ID, k.String(), v); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving big data %q for image %q: %v", k, img.ID, err) + return err + } + keys = append(keys, k) + } + for _, key := range keys { + delete(s.BlobData, key) + } + // Save the manifest, if we have one. + if err := s.imageRef.transport.store.SetImageBigData(s.ID, "manifest", s.Manifest); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving manifest for image %q: %v", img.ID, err) + return err + } + // Save the signatures, if we have any. + if err := s.imageRef.transport.store.SetImageBigData(s.ID, "signatures", s.Signatures); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) + return err + } + // Save our metadata. + metadata, err := json.Marshal(s) + if err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error encoding metadata for image %q: %v", img.ID, err) + return err + } + if len(metadata) != 0 { + if err = s.imageRef.transport.store.SetMetadata(s.ID, string(metadata)); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving metadata for image %q: %v", img.ID, err) + return err + } + logrus.Debugf("saved image metadata %q", string(metadata)) + } + return nil +} + +func (s *storageImageDestination) SupportedManifestMIMETypes() []string { + return nil +} + +func (s *storageImageDestination) PutManifest(manifest []byte) error { + s.Manifest = make([]byte, len(manifest)) + copy(s.Manifest, manifest) + return nil +} + +// SupportsSignatures returns an error if we can't expect GetSignatures() to +// return data that was previously supplied to PutSignatures(). +func (s *storageImageDestination) SupportsSignatures() error { + return nil +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (s *storageImageDestination) AcceptsForeignLayerURLs() bool { + return false +} + +func (s *storageImageDestination) PutSignatures(signatures [][]byte) error { + sizes := []int{} + sigblob := []byte{} + for _, sig := range signatures { + sizes = append(sizes, len(sig)) + newblob := make([]byte, len(sigblob)+len(sig)) + copy(newblob, sigblob) + copy(newblob[len(sigblob):], sig) + sigblob = newblob + } + s.Signatures = sigblob + s.SignatureSizes = sizes + return nil +} + +func (s *storageImageSource) GetBlob(info types.BlobInfo) (rc io.ReadCloser, n int64, err error) { + rc, n, _, err = s.getBlobAndLayerID(info) + return rc, n, err +} + +func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) { + err = info.Digest.Validate() + if err != nil { + return nil, -1, "", err + } + if layerList, ok := s.Layers[info.Digest]; !ok || len(layerList) < 1 { + b, err := s.imageRef.transport.store.GetImageBigData(s.ID, info.Digest.String()) + if err != nil { + return nil, -1, "", err + } + r := bytes.NewReader(b) + logrus.Debugf("exporting opaque data as blob %q", info.Digest.String()) + return ioutil.NopCloser(r), int64(r.Len()), "", nil + } + // If the blob was "put" more than once, we have multiple layer IDs + // which should all produce the same diff. For the sake of tests that + // want to make sure we created different layers each time the blob was + // "put", though, cycle through the layers. + layerList := s.Layers[info.Digest] + position, ok := s.LayerPosition[info.Digest] + if !ok { + position = 0 + } + s.LayerPosition[info.Digest] = (position + 1) % len(layerList) + logrus.Debugf("exporting filesystem layer %q for blob %q", layerList[position], info.Digest) + rc, n, err = diffLayer(s.imageRef.transport.store, layerList[position]) + return rc, n, layerList[position], err +} + +func diffLayer(store storage.Store, layerID string) (rc io.ReadCloser, n int64, err error) { + layer, err := store.GetLayer(layerID) + if err != nil { + return nil, -1, err + } + layerMeta := storageLayerMetadata{ + CompressedSize: -1, + } + if layer.Metadata != "" { + if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil { + return nil, -1, fmt.Errorf("error decoding metadata for layer %q: %v", layerID, err) + } + } + if layerMeta.CompressedSize <= 0 { + n = -1 + } else { + n = layerMeta.CompressedSize + } + diff, err := store.Diff("", layer.ID) + if err != nil { + return nil, -1, err + } + return diff, n, nil +} + +func (s *storageImageSource) GetManifest() (manifestBlob []byte, MIMEType string, err error) { + manifestBlob, err = s.imageRef.transport.store.GetImageBigData(s.ID, "manifest") + return manifestBlob, manifest.GuessMIMEType(manifestBlob), err +} + +func (s *storageImageSource) GetTargetManifest(digest ddigest.Digest) (manifestBlob []byte, MIMEType string, err error) { + return nil, "", ErrNoManifestLists +} + +func (s *storageImageSource) GetSignatures() (signatures [][]byte, err error) { + var offset int + signature, err := s.imageRef.transport.store.GetImageBigData(s.ID, "signatures") + if err != nil { + return nil, err + } + sigslice := [][]byte{} + for _, length := range s.SignatureSizes { + sigslice = append(sigslice, signature[offset:offset+length]) + offset += length + } + if offset != len(signature) { + return nil, fmt.Errorf("signatures data contained %d extra bytes", len(signatures)-offset) + } + return sigslice, nil +} + +func (s *storageImageSource) getSize() (int64, error) { + var sum int64 + names, err := s.imageRef.transport.store.ListImageBigData(s.imageRef.id) + if err != nil { + return -1, fmt.Errorf("error reading image %q: %v", s.imageRef.id, err) + } + for _, name := range names { + bigSize, err := s.imageRef.transport.store.GetImageBigDataSize(s.imageRef.id, name) + if err != nil { + return -1, fmt.Errorf("error reading data blob size %q for %q: %v", name, s.imageRef.id, err) + } + sum += bigSize + } + for _, sigSize := range s.SignatureSizes { + sum += int64(sigSize) + } + for _, layerList := range s.Layers { + for _, layerID := range layerList { + layer, err := s.imageRef.transport.store.GetLayer(layerID) + if err != nil { + return -1, err + } + layerMeta := storageLayerMetadata{ + Size: -1, + } + if layer.Metadata != "" { + if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil { + return -1, fmt.Errorf("error decoding metadata for layer %q: %v", layerID, err) + } + } + if layerMeta.Size < 0 { + return -1, fmt.Errorf("size for layer %q is unknown, failing getSize()", layerID) + } + sum += layerMeta.Size + } + } + return sum, nil +} + +func (s *storageImage) Size() (int64, error) { + return s.size, nil +} + +// newImage creates an image that also knows its size +func newImage(s storageReference) (types.Image, error) { + src, err := newImageSource(s) + if err != nil { + return nil, err + } + img, err := image.FromSource(src) + if err != nil { + return nil, err + } + size, err := src.getSize() + if err != nil { + return nil, err + } + return &storageImage{Image: img, size: size}, nil +} diff --git a/vendor/src/github.com/containers/image/storage/storage_reference.go b/vendor/src/github.com/containers/image/storage/storage_reference.go new file mode 100644 index 00000000..13413df1 --- /dev/null +++ b/vendor/src/github.com/containers/image/storage/storage_reference.go @@ -0,0 +1,127 @@ +package storage + +import ( + "strings" + + "github.com/Sirupsen/logrus" + "github.com/containers/image/docker/reference" + "github.com/containers/image/types" +) + +// A storageReference holds an arbitrary name and/or an ID, which is a 32-byte +// value hex-encoded into a 64-character string, and a reference to a Store +// where an image is, or would be, kept. +type storageReference struct { + transport storageTransport + reference string + id string + name reference.Named +} + +func newReference(transport storageTransport, reference, id string, name reference.Named) *storageReference { + // We take a copy of the transport, which contains a pointer to the + // store that it used for resolving this reference, so that the + // transport that we'll return from Transport() won't be affected by + // further calls to the original transport's SetStore() method. + return &storageReference{ + transport: transport, + reference: reference, + id: id, + name: name, + } +} + +// Resolve the reference's name to an image ID in the store, if there's already +// one present with the same name or ID. +func (s *storageReference) resolveID() string { + if s.id == "" { + image, err := s.transport.store.GetImage(s.reference) + if image != nil && err == nil { + s.id = image.ID + } + } + return s.id +} + +// Return a Transport object that defaults to using the same store that we used +// to build this reference object. +func (s storageReference) Transport() types.ImageTransport { + return &storageTransport{ + store: s.transport.store, + } +} + +// Return a name with a tag, if we have a name to base them on. +func (s storageReference) DockerReference() reference.Named { + return s.name +} + +// Return a name with a tag, prefixed with the graph root and driver name, to +// disambiguate between images which may be present in multiple stores and +// share only their names. +func (s storageReference) StringWithinTransport() string { + storeSpec := "[" + s.transport.store.GetGraphDriverName() + "@" + s.transport.store.GetGraphRoot() + "]" + if s.name == nil { + return storeSpec + "@" + s.id + } + if s.id == "" { + return storeSpec + s.reference + } + return storeSpec + s.reference + "@" + s.id +} + +func (s storageReference) PolicyConfigurationIdentity() string { + return s.StringWithinTransport() +} + +// Also accept policy that's tied to the combination of the graph root and +// driver name, to apply to all images stored in the Store, and to just the +// graph root, in case we're using multiple drivers in the same directory for +// some reason. +func (s storageReference) PolicyConfigurationNamespaces() []string { + storeSpec := "[" + s.transport.store.GetGraphDriverName() + "@" + s.transport.store.GetGraphRoot() + "]" + driverlessStoreSpec := "[" + s.transport.store.GetGraphRoot() + "]" + namespaces := []string{} + if s.name != nil { + if s.id != "" { + // The reference without the ID is also a valid namespace. + namespaces = append(namespaces, storeSpec+s.reference) + } + components := strings.Split(s.name.FullName(), "/") + for len(components) > 0 { + namespaces = append(namespaces, storeSpec+strings.Join(components, "/")) + components = components[:len(components)-1] + } + } + namespaces = append(namespaces, storeSpec) + namespaces = append(namespaces, driverlessStoreSpec) + return namespaces +} + +func (s storageReference) NewImage(ctx *types.SystemContext) (types.Image, error) { + return newImage(s) +} + +func (s storageReference) DeleteImage(ctx *types.SystemContext) error { + id := s.resolveID() + if id == "" { + logrus.Errorf("reference %q does not resolve to an image ID", s.StringWithinTransport()) + return ErrNoSuchImage + } + layers, err := s.transport.store.DeleteImage(id, true) + if err == nil { + logrus.Debugf("deleted image %q", id) + for _, layer := range layers { + logrus.Debugf("deleted layer %q", layer) + } + } + return err +} + +func (s storageReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) { + return newImageSource(s) +} + +func (s storageReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(s) +} diff --git a/vendor/src/github.com/containers/image/storage/storage_transport.go b/vendor/src/github.com/containers/image/storage/storage_transport.go new file mode 100644 index 00000000..71056ff1 --- /dev/null +++ b/vendor/src/github.com/containers/image/storage/storage_transport.go @@ -0,0 +1,285 @@ +package storage + +import ( + "errors" + "path/filepath" + "regexp" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/containers/image/docker/reference" + "github.com/containers/image/types" + "github.com/containers/storage/storage" + "github.com/docker/distribution/digest" + ddigest "github.com/docker/distribution/digest" +) + +var ( + // Transport is an ImageTransport that uses either a default + // storage.Store or one that's it's explicitly told to use. + Transport StoreTransport = &storageTransport{} + // ErrInvalidReference is returned when ParseReference() is passed an + // empty reference. + ErrInvalidReference = errors.New("invalid reference") + // ErrPathNotAbsolute is returned when a graph root is not an absolute + // path name. + ErrPathNotAbsolute = errors.New("path name is not absolute") + idRegexp = regexp.MustCompile("^(sha256:)?([0-9a-fA-F]{64})$") +) + +// StoreTransport is an ImageTransport that uses a storage.Store to parse +// references, either its own default or one that it's told to use. +type StoreTransport interface { + types.ImageTransport + // SetStore sets the default store for this transport. + SetStore(storage.Store) + // GetImage retrieves the image from the transport's store that's named + // by the reference. + GetImage(types.ImageReference) (*storage.Image, error) + // GetStoreImage retrieves the image from a specified store that's named + // by the reference. + GetStoreImage(storage.Store, types.ImageReference) (*storage.Image, error) + // ParseStoreReference parses a reference, overriding any store + // specification that it may contain. + ParseStoreReference(store storage.Store, reference string) (*storageReference, error) +} + +type storageTransport struct { + store storage.Store +} + +func (s *storageTransport) Name() string { + // Still haven't really settled on a name. + return "containers-storage" +} + +// SetStore sets the Store object which the Transport will use for parsing +// references when information about a Store is not directly specified as part +// of the reference. If one is not set, the library will attempt to initialize +// one with default settings when a reference needs to be parsed. Calling +// SetStore does not affect previously parsed references. +func (s *storageTransport) SetStore(store storage.Store) { + s.store = store +} + +// ParseStoreReference takes a name or an ID, tries to figure out which it is +// relative to the given store, and returns it in a reference object. +func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) { + var name reference.Named + var sum digest.Digest + var err error + if ref == "" { + return nil, ErrInvalidReference + } + if ref[0] == '[' { + // Ignore the store specifier. + closeIndex := strings.IndexRune(ref, ']') + if closeIndex < 1 { + return nil, ErrInvalidReference + } + ref = ref[closeIndex+1:] + } + refInfo := strings.SplitN(ref, "@", 2) + if len(refInfo) == 1 { + // A name. + name, err = reference.ParseNamed(refInfo[0]) + if err != nil { + return nil, err + } + } else if len(refInfo) == 2 { + // An ID, possibly preceded by a name. + if refInfo[0] != "" { + name, err = reference.ParseNamed(refInfo[0]) + if err != nil { + return nil, err + } + } + sum, err = digest.ParseDigest("sha256:" + refInfo[1]) + if err != nil { + return nil, err + } + } else { // Coverage: len(refInfo) is always 1 or 2 + // Anything else: store specified in a form we don't + // recognize. + return nil, ErrInvalidReference + } + storeSpec := "[" + store.GetGraphDriverName() + "@" + store.GetGraphRoot() + "]" + id := "" + if sum.Validate() == nil { + id = sum.Hex() + } + refname := "" + if name != nil { + name = reference.WithDefaultTag(name) + refname = verboseName(name) + } + if refname == "" { + logrus.Debugf("parsed reference into %q", storeSpec+"@"+id) + } else if id == "" { + logrus.Debugf("parsed reference into %q", storeSpec+refname) + } else { + logrus.Debugf("parsed reference into %q", storeSpec+refname+"@"+id) + } + return newReference(storageTransport{store: store}, refname, id, name), nil +} + +func (s *storageTransport) GetStore() (storage.Store, error) { + // Return the transport's previously-set store. If we don't have one + // of those, initialize one now. + if s.store == nil { + store, err := storage.GetStore(storage.DefaultStoreOptions) + if err != nil { + return nil, err + } + s.store = store + } + return s.store, nil +} + +// ParseReference takes a name and/or an ID ("_name_"/"@_id_"/"_name_@_id_"), +// possibly prefixed with a store specifier in the form "[_graphroot_]" or +// "[_driver_@_graphroot_]", tries to figure out which it is, and returns it in +// a reference object. If the _graphroot_ is a location other than the default, +// it needs to have been previously opened using storage.GetStore(), so that it +// can figure out which run root goes with the graph root. +func (s *storageTransport) ParseReference(reference string) (types.ImageReference, error) { + store, err := s.GetStore() + if err != nil { + return nil, err + } + // Check if there's a store location prefix. If there is, then it + // needs to match a store that was previously initialized using + // storage.GetStore(), or be enough to let the storage library fill out + // the rest using knowledge that it has from elsewhere. + if reference[0] == '[' { + closeIndex := strings.IndexRune(reference, ']') + if closeIndex < 1 { + return nil, ErrInvalidReference + } + storeSpec := reference[1:closeIndex] + reference = reference[closeIndex+1:] + storeInfo := strings.SplitN(storeSpec, "@", 2) + if len(storeInfo) == 1 && storeInfo[0] != "" { + // One component: the graph root. + if !filepath.IsAbs(storeInfo[0]) { + return nil, ErrPathNotAbsolute + } + store2, err := storage.GetStore(storage.StoreOptions{ + GraphRoot: storeInfo[0], + }) + if err != nil { + return nil, err + } + store = store2 + } else if len(storeInfo) == 2 && storeInfo[0] != "" && storeInfo[1] != "" { + // Two components: the driver type and the graph root. + if !filepath.IsAbs(storeInfo[1]) { + return nil, ErrPathNotAbsolute + } + store2, err := storage.GetStore(storage.StoreOptions{ + GraphDriverName: storeInfo[0], + GraphRoot: storeInfo[1], + }) + if err != nil { + return nil, err + } + store = store2 + } else { + // Anything else: store specified in a form we don't + // recognize. + return nil, ErrInvalidReference + } + } + return s.ParseStoreReference(store, reference) +} + +func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageReference) (*storage.Image, error) { + dref := ref.DockerReference() + if dref == nil { + if sref, ok := ref.(*storageReference); ok { + if sref.id != "" { + if img, err := store.GetImage(sref.id); err == nil { + return img, nil + } + } + } + return nil, ErrInvalidReference + } + return store.GetImage(verboseName(dref)) +} + +func (s *storageTransport) GetImage(ref types.ImageReference) (*storage.Image, error) { + store, err := s.GetStore() + if err != nil { + return nil, err + } + return s.GetStoreImage(store, ref) +} + +func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { + // Check that there's a store location prefix. Values we're passed are + // expected to come from PolicyConfigurationIdentity or + // PolicyConfigurationNamespaces, so if there's no store location, + // something's wrong. + if scope[0] != '[' { + return ErrInvalidReference + } + // Parse the store location prefix. + closeIndex := strings.IndexRune(scope, ']') + if closeIndex < 1 { + return ErrInvalidReference + } + storeSpec := scope[1:closeIndex] + scope = scope[closeIndex+1:] + storeInfo := strings.SplitN(storeSpec, "@", 2) + if len(storeInfo) == 1 && storeInfo[0] != "" { + // One component: the graph root. + if !filepath.IsAbs(storeInfo[0]) { + return ErrPathNotAbsolute + } + } else if len(storeInfo) == 2 && storeInfo[0] != "" && storeInfo[1] != "" { + // Two components: the driver type and the graph root. + if !filepath.IsAbs(storeInfo[1]) { + return ErrPathNotAbsolute + } + } else { + // Anything else: store specified in a form we don't + // recognize. + return ErrInvalidReference + } + // That might be all of it, and that's okay. + if scope == "" { + return nil + } + // But if there is anything left, it has to be a name, with or without + // a tag, with or without an ID, since we don't return namespace values + // that are just bare IDs. + scopeInfo := strings.SplitN(scope, "@", 2) + if len(scopeInfo) == 1 && scopeInfo[0] != "" { + _, err := reference.ParseNamed(scopeInfo[0]) + if err != nil { + return err + } + } else if len(scopeInfo) == 2 && scopeInfo[0] != "" && scopeInfo[1] != "" { + _, err := reference.ParseNamed(scopeInfo[0]) + if err != nil { + return err + } + _, err = ddigest.ParseDigest("sha256:" + scopeInfo[1]) + if err != nil { + return err + } + } else { + return ErrInvalidReference + } + return nil +} + +func verboseName(name reference.Named) string { + name = reference.WithDefaultTag(name) + tag := "" + if tagged, ok := name.(reference.NamedTagged); ok { + tag = tagged.Tag() + } + return name.FullName() + ":" + tag +} diff --git a/vendor/src/github.com/containers/image/transports/transports.go b/vendor/src/github.com/containers/image/transports/transports.go index 2b7e4f13..d4141df0 100644 --- a/vendor/src/github.com/containers/image/transports/transports.go +++ b/vendor/src/github.com/containers/image/transports/transports.go @@ -6,8 +6,10 @@ import ( "github.com/containers/image/directory" "github.com/containers/image/docker" + "github.com/containers/image/docker/daemon" ociLayout "github.com/containers/image/oci/layout" "github.com/containers/image/openshift" + "github.com/containers/image/storage" "github.com/containers/image/types" ) @@ -16,11 +18,15 @@ var KnownTransports map[string]types.ImageTransport func init() { KnownTransports = make(map[string]types.ImageTransport) + // NOTE: Make sure docs/policy.json.md is updated when adding or updating + // a transport. for _, t := range []types.ImageTransport{ directory.Transport, docker.Transport, + daemon.Transport, ociLayout.Transport, openshift.Transport, + storage.Transport, } { name := t.Name() if _, ok := KnownTransports[name]; ok { diff --git a/vendor/src/github.com/containers/image/types/types.go b/vendor/src/github.com/containers/image/types/types.go index 5d3de3b0..a05a7064 100644 --- a/vendor/src/github.com/containers/image/types/types.go +++ b/vendor/src/github.com/containers/image/types/types.go @@ -1,10 +1,12 @@ package types import ( + "errors" "io" "time" - "github.com/docker/docker/reference" + "github.com/containers/image/docker/reference" + "github.com/docker/distribution/digest" ) // ImageTransport is a top-level namespace for ways to to store/load an image. @@ -70,8 +72,10 @@ type ImageReference interface { // and each following element to be a prefix of the element preceding it. PolicyConfigurationNamespaces() []string - // NewImage returns a types.Image for this reference. + // NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. // The caller must call .Close() on the returned Image. + // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, + // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. NewImage(ctx *SystemContext) (Image, error) // NewImageSource returns a types.ImageSource for this reference, // asking the backend to use a manifest from requestedManifestMIMETypes if possible. @@ -86,21 +90,36 @@ type ImageReference interface { DeleteImage(ctx *SystemContext) error } +// BlobInfo collects known information about a blob (layer/config). +// In some situations, some fields may be unknown, in others they may be mandatory; documenting an “unknown” value here does not override that. +type BlobInfo struct { + Digest digest.Digest // "" if unknown. + Size int64 // -1 if unknown + URLs []string +} + // ImageSource is a service, possibly remote (= slow), to download components of a single image. // This is primarily useful for copying images around; for examining their properties, Image (below) // is usually more useful. // Each ImageSource should eventually be closed by calling Close(). +// +// WARNING: Various methods which return an object identified by digest generally do not +// validate that the returned data actually matches that digest; this is the caller’s responsibility. type ImageSource interface { // Reference returns the reference used to set up this source, _as specified by the user_ // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. Reference() ImageReference // Close removes resources associated with an initialized ImageSource, if any. Close() - // GetManifest returns the image's manifest along with its MIME type. The empty string is returned if the MIME type is unknown. + // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). // It may use a remote (= slow) service. GetManifest() ([]byte, string, error) + // GetTargetManifest returns an image's manifest given a digest. This is mainly used to retrieve a single image's manifest + // out of a manifest list. + GetTargetManifest(digest digest.Digest) ([]byte, string, error) // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). - GetBlob(digest string) (io.ReadCloser, int64, error) + // The Digest field in BlobInfo is guaranteed to be provided; Size may be -1. + GetBlob(BlobInfo) (io.ReadCloser, int64, error) // GetSignatures returns the image's signatures. It may use a remote (= slow) service. GetSignatures() ([][]byte, error) } @@ -109,6 +128,7 @@ type ImageSource interface { // // There is a specific required order for some of the calls: // PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time) +// ReapplyBlob, if used, MUST only be called if HasBlob returned true for the same blob digest // PutSignatures, if called, MUST be called after PutManifest (signatures reference manifest contents) // Finally, Commit MUST be called if the caller wants the image, as formed by the components saved above, to persist. // @@ -126,14 +146,24 @@ type ImageDestination interface { // SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. // Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. SupportsSignatures() error + // ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. + ShouldCompressLayers() bool - // PutBlob writes contents of stream and returns its computed digest and size. - // A digest can be optionally provided if known, the specific image destination can decide to play with it or not. - // The length of stream is expected to be expectedSize; if expectedSize == -1, it is not known. + // AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually + // uploaded to the image destination, true otherwise. + AcceptsForeignLayerURLs() bool + + // PutBlob writes contents of stream and returns data representing the result (with all data filled in). + // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. + // inputInfo.Size is the expected length of stream, if known. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. - PutBlob(stream io.Reader, digest string, expectedSize int64) (string, int64, error) + PutBlob(stream io.Reader, inputInfo BlobInfo) (BlobInfo, error) + // HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob. Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned. A false result will often be accompanied by an ErrBlobNotFound error. + HasBlob(info BlobInfo) (bool, int64, error) + // ReapplyBlob informs the image destination that a blob for which HasBlob previously returned true would have been passed to PutBlob if it had returned false. Like HasBlob and unlike PutBlob, the digest can not be empty. If the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree. + ReapplyBlob(info BlobInfo) (BlobInfo, error) // FIXME? This should also receive a MIME type if known, to differentiate between schema versions. PutManifest([]byte) error PutSignatures(signatures [][]byte) error @@ -144,26 +174,69 @@ type ImageDestination interface { Commit() error } -// Image is the primary API for inspecting properties of images. -// Each Image should eventually be closed by calling Close(). -type Image interface { +// UnparsedImage is an Image-to-be; until it is verified and accepted, it only caries its identity and caches manifest and signature blobs. +// Thus, an UnparsedImage can be created from an ImageSource simply by fetching blobs without interpreting them, +// allowing cryptographic signature verification to happen first, before even fetching the manifest, or parsing anything else. +// This also makes the UnparsedImage→Image conversion an explicitly visible step. +// Each UnparsedImage should eventually be closed by calling Close(). +type UnparsedImage interface { // Reference returns the reference used to set up this source, _as specified by the user_ // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. Reference() ImageReference - // Close removes resources associated with an initialized Image, if any. + // Close removes resources associated with an initialized UnparsedImage, if any. Close() - // ref to repository? // Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. - // NOTE: It is essential for signature verification that Manifest returns the manifest from which BlobDigests is computed. Manifest() ([]byte, string, error) // Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. Signatures() ([][]byte, error) - // BlobDigests returns a list of blob digests referenced by this image. - // The list will not contain duplicates; it is not intended to correspond to the "history" or "parent chain" of a Docker image. - // NOTE: It is essential for signature verification that BlobDigests is computed from the same manifest which is returned by Manifest(). - BlobDigests() ([]string, error) +} + +// Image is the primary API for inspecting properties of images. +// Each Image should eventually be closed by calling Close(). +type Image interface { + // Note that Reference may return nil in the return value of UpdatedImage! + UnparsedImage + // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. + // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. + ConfigInfo() BlobInfo + // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. + // The result is cached; it is OK to call this however often you need. + ConfigBlob() ([]byte, error) + // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). + // The Digest field is guaranteed to be provided; Size may be -1. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfos() []BlobInfo // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. Inspect() (*ImageInspectInfo, error) + // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. + // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute + // (most importantly it forces us to download the full layers even if they are already present at the destination). + UpdatedImageNeedsLayerDiffIDs(options ManifestUpdateOptions) bool + // UpdatedImage returns a types.Image modified according to options. + // Everything in options.InformationOnly should be provided, other fields should be set only if a modification is desired. + // This does not change the state of the original Image object. + UpdatedImage(options ManifestUpdateOptions) (Image, error) + // IsMultiImage returns true if the image's manifest is a list of images, false otherwise. + IsMultiImage() bool + // Size returns an approximation of the amount of disk space which is consumed by the image in its current + // location. If the size is not known, -1 will be returned. + Size() (int64, error) +} + +// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedManifest +type ManifestUpdateOptions struct { + LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls) which should replace the originals, in order (the root layer first, and then successive layered layers) + ManifestMIMEType string + // The values below are NOT requests to modify the image; they provide optional context which may or may not be used. + InformationOnly ManifestUpdateInformation +} + +// ManifestUpdateInformation is a component of ManifestUpdateOptions, named here +// only to make writing struct literals possible. +type ManifestUpdateInformation struct { + Destination ImageDestination // and yes, UpdatedManifest may write to Destination (see the schema2 → schema1 conversion logic in image/docker_schema2.go) + LayerInfos []BlobInfo // Complete BlobInfos (size+digest) which have been uploaded, in order (the root layer first, and then successive layered layers) + LayerDiffIDs []digest.Digest // Digest values for the _uncompressed_ contents of the blobs which have been uploaded, in the same order. } // ImageInspectInfo is a set of metadata describing Docker images, primarily their manifest and configuration. @@ -179,6 +252,12 @@ type ImageInspectInfo struct { Layers []string } +// DockerAuthConfig contains authorization information for connecting to a registry. +type DockerAuthConfig struct { + Username string + Password string +} + // SystemContext allows parametrizing access to implicitly-accessed resources, // like configuration files in /etc and users' login state in their home directory. // Various components can share the same field only if their semantics is exactly @@ -200,6 +279,22 @@ type SystemContext struct { RegistriesDirPath string // === docker.Transport overrides === - DockerCertPath string // If not "", a directory containing "cert.pem" and "key.pem" used when talking to a Docker Registry - DockerInsecureSkipTLSVerify bool // Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. + // If not "", a directory containing a CA certificate (ending with ".crt"), + // a client certificate (ending with ".cert") and a client ceritificate key + // (ending with ".key") used when talking to a Docker Registry. + DockerCertPath string + DockerInsecureSkipTLSVerify bool // Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. + // if nil, the library tries to parse ~/.docker/config.json to retrieve credentials + DockerAuthConfig *DockerAuthConfig + // if not "", an User-Agent header is added to each request when contacting a registry. + DockerRegistryUserAgent string + // if true, a V1 ping attempt isn't done to give users a better error. Default is false. + // Note that this field is used mainly to integrate containers/image into projectatomic/docker + // in order to not break any existing docker's integration tests. + DockerDisableV1Ping bool } + +var ( + // ErrBlobNotFound can be returned by an ImageDestination's HasBlob() method + ErrBlobNotFound = errors.New("no such blob present") +) diff --git a/vendor/src/github.com/containers/storage/LICENSE b/vendor/src/github.com/containers/storage/LICENSE new file mode 100644 index 00000000..8f3fee62 --- /dev/null +++ b/vendor/src/github.com/containers/storage/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/src/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/src/github.com/containers/storage/drivers/aufs/aufs.go new file mode 100644 index 00000000..f9b58bf5 --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/aufs/aufs.go @@ -0,0 +1,586 @@ +// +build linux + +/* + +aufs driver directory structure + + . + ├── layers // Metadata of layers + │ ├── 1 + │ ├── 2 + │ └── 3 + ├── diff // Content of the layer + │ ├── 1 // Contains layers that need to be mounted for the id + │ ├── 2 + │ └── 3 + └── mnt // Mount points for the rw layers to be mounted + ├── 1 + ├── 2 + └── 3 + +*/ + +package aufs + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "sync" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" + + "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/chrootarchive" + "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/idtools" + mountpk "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/stringid" + + "github.com/opencontainers/runc/libcontainer/label" + rsystem "github.com/opencontainers/runc/libcontainer/system" +) + +var ( + // ErrAufsNotSupported is returned if aufs is not supported by the host. + ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems") + // ErrAufsNested means aufs cannot be used bc we are in a user namespace + ErrAufsNested = fmt.Errorf("AUFS cannot be used in non-init user namespace") + backingFs = "" + + enableDirpermLock sync.Once + enableDirperm bool +) + +func init() { + graphdriver.Register("aufs", Init) +} + +// Driver contains information about the filesystem mounted. +type Driver struct { + sync.Mutex + root string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + pathCacheLock sync.Mutex + pathCache map[string]string +} + +// Init returns a new AUFS driver. +// An error is returned if AUFS is not supported. +func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + + // Try to load the aufs kernel module + if err := supportsAufs(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + fsMagic, err := graphdriver.GetFSMagic(root) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + switch fsMagic { + case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicEcryptfs: + logrus.Errorf("AUFS is not supported over %s", backingFs) + return nil, graphdriver.ErrIncompatibleFS + } + + paths := []string{ + "mnt", + "diff", + "layers", + } + + a := &Driver{ + root: root, + uidMaps: uidMaps, + gidMaps: gidMaps, + pathCache: make(map[string]string), + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)), + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + // Create the root aufs driver dir and return + // if it already exists + // If not populate the dir structure + if err := idtools.MkdirAllAs(root, 0700, rootUID, rootGID); err != nil { + if os.IsExist(err) { + return a, nil + } + return nil, err + } + + if err := mountpk.MakePrivate(root); err != nil { + return nil, err + } + + // Populate the dir structure + for _, p := range paths { + if err := idtools.MkdirAllAs(path.Join(root, p), 0700, rootUID, rootGID); err != nil { + return nil, err + } + } + return a, nil +} + +// Return a nil error if the kernel supports aufs +// We cannot modprobe because inside dind modprobe fails +// to run +func supportsAufs() error { + // We can try to modprobe aufs first before looking at + // proc/filesystems for when aufs is supported + exec.Command("modprobe", "aufs").Run() + + if rsystem.RunningInUserNS() { + return ErrAufsNested + } + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if strings.Contains(s.Text(), "aufs") { + return nil + } + } + return ErrAufsNotSupported +} + +func (a *Driver) rootPath() string { + return a.root +} + +func (*Driver) String() string { + return "aufs" +} + +// Status returns current information about the filesystem such as root directory, number of directories mounted, etc. +func (a *Driver) Status() [][2]string { + ids, _ := loadIds(path.Join(a.rootPath(), "layers")) + return [][2]string{ + {"Root Dir", a.rootPath()}, + {"Backing Filesystem", backingFs}, + {"Dirs", fmt.Sprintf("%d", len(ids))}, + {"Dirperm1 Supported", fmt.Sprintf("%v", useDirperm())}, + } +} + +// GetMetadata not implemented +func (a *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +// Exists returns true if the given id is registered with +// this driver +func (a *Driver) Exists(id string) bool { + if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil { + return false + } + return true +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (a *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { + return a.Create(id, parent, mountLabel, storageOpt) +} + +// Create three folders for each id +// mnt, layers, and diff +func (a *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error { + + if len(storageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for aufs") + } + + if err := a.createDirsFor(id); err != nil { + return err + } + // Write the layers metadata + f, err := os.Create(path.Join(a.rootPath(), "layers", id)) + if err != nil { + return err + } + defer f.Close() + + if parent != "" { + ids, err := getParentIds(a.rootPath(), parent) + if err != nil { + return err + } + + if _, err := fmt.Fprintln(f, parent); err != nil { + return err + } + for _, i := range ids { + if _, err := fmt.Fprintln(f, i); err != nil { + return err + } + } + } + + return nil +} + +// createDirsFor creates two directories for the given id. +// mnt and diff +func (a *Driver) createDirsFor(id string) error { + paths := []string{ + "mnt", + "diff", + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(a.uidMaps, a.gidMaps) + if err != nil { + return err + } + // Directory permission is 0755. + // The path of directories are /mnt/ + // and /diff/ + for _, p := range paths { + if err := idtools.MkdirAllAs(path.Join(a.rootPath(), p, id), 0755, rootUID, rootGID); err != nil { + return err + } + } + return nil +} + +// Remove will unmount and remove the given id. +func (a *Driver) Remove(id string) error { + a.pathCacheLock.Lock() + mountpoint, exists := a.pathCache[id] + a.pathCacheLock.Unlock() + if !exists { + mountpoint = a.getMountpoint(id) + } + if err := a.unmount(mountpoint); err != nil { + // no need to return here, we can still try to remove since the `Rename` will fail below if still mounted + logrus.Debugf("aufs: error while unmounting %s: %v", mountpoint, err) + } + + // Atomically remove each directory in turn by first moving it out of the + // way (so that docker doesn't find it anymore) before doing removal of + // the whole tree. + tmpMntPath := path.Join(a.mntPath(), fmt.Sprintf("%s-removing", id)) + if err := os.Rename(mountpoint, tmpMntPath); err != nil && !os.IsNotExist(err) { + return err + } + defer os.RemoveAll(tmpMntPath) + + tmpDiffpath := path.Join(a.diffPath(), fmt.Sprintf("%s-removing", id)) + if err := os.Rename(a.getDiffPath(id), tmpDiffpath); err != nil && !os.IsNotExist(err) { + return err + } + defer os.RemoveAll(tmpDiffpath) + + // Remove the layers file for the id + if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { + return err + } + + a.pathCacheLock.Lock() + delete(a.pathCache, id) + a.pathCacheLock.Unlock() + return nil +} + +// Get returns the rootfs path for the id. +// This will mount the dir at it's given path +func (a *Driver) Get(id, mountLabel string) (string, error) { + parents, err := a.getParentLayerPaths(id) + if err != nil && !os.IsNotExist(err) { + return "", err + } + + a.pathCacheLock.Lock() + m, exists := a.pathCache[id] + a.pathCacheLock.Unlock() + + if !exists { + m = a.getDiffPath(id) + if len(parents) > 0 { + m = a.getMountpoint(id) + } + } + if count := a.ctr.Increment(m); count > 1 { + return m, nil + } + + // If a dir does not have a parent ( no layers )do not try to mount + // just return the diff path to the data + if len(parents) > 0 { + if err := a.mount(id, m, mountLabel, parents); err != nil { + return "", err + } + } + + a.pathCacheLock.Lock() + a.pathCache[id] = m + a.pathCacheLock.Unlock() + return m, nil +} + +// Put unmounts and updates list of active mounts. +func (a *Driver) Put(id string) error { + a.pathCacheLock.Lock() + m, exists := a.pathCache[id] + if !exists { + m = a.getMountpoint(id) + a.pathCache[id] = m + } + a.pathCacheLock.Unlock() + if count := a.ctr.Decrement(m); count > 0 { + return nil + } + + err := a.unmount(m) + if err != nil { + logrus.Debugf("Failed to unmount %s aufs: %v", id, err) + } + return err +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (a *Driver) Diff(id, parent string) (archive.Archive, error) { + // AUFS doesn't need the parent layer to produce a diff. + return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ + Compression: archive.Uncompressed, + ExcludePatterns: []string{archive.WhiteoutMetaPrefix + "*", "!" + archive.WhiteoutOpaqueDir}, + UIDMaps: a.uidMaps, + GIDMaps: a.gidMaps, + }) +} + +type fileGetNilCloser struct { + storage.FileGetter +} + +func (f fileGetNilCloser) Close() error { + return nil +} + +// DiffGetter returns a FileGetCloser that can read files from the directory that +// contains files for the layer differences. Used for direct access for tar-split. +func (a *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + p := path.Join(a.rootPath(), "diff", id) + return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil +} + +func (a *Driver) applyDiff(id string, diff archive.Reader) error { + return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ + UIDMaps: a.uidMaps, + GIDMaps: a.gidMaps, + }) +} + +// DiffSize calculates the changes between the specified id +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (a *Driver) DiffSize(id, parent string) (size int64, err error) { + // AUFS doesn't need the parent layer to calculate the diff size. + return directory.Size(path.Join(a.rootPath(), "diff", id)) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (a *Driver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) { + // AUFS doesn't need the parent id to apply the diff. + if err = a.applyDiff(id, diff); err != nil { + return + } + + return a.DiffSize(id, parent) +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (a *Driver) Changes(id, parent string) ([]archive.Change, error) { + // AUFS doesn't have snapshots, so we need to get changes from all parent + // layers. + layers, err := a.getParentLayerPaths(id) + if err != nil { + return nil, err + } + return archive.Changes(layers, path.Join(a.rootPath(), "diff", id)) +} + +func (a *Driver) getParentLayerPaths(id string) ([]string, error) { + parentIds, err := getParentIds(a.rootPath(), id) + if err != nil { + return nil, err + } + layers := make([]string, len(parentIds)) + + // Get the diff paths for all the parent ids + for i, p := range parentIds { + layers[i] = path.Join(a.rootPath(), "diff", p) + } + return layers, nil +} + +func (a *Driver) mount(id string, target string, mountLabel string, layers []string) error { + a.Lock() + defer a.Unlock() + + // If the id is mounted or we get an error return + if mounted, err := a.mounted(target); err != nil || mounted { + return err + } + + rw := a.getDiffPath(id) + + if err := a.aufsMount(layers, rw, target, mountLabel); err != nil { + return fmt.Errorf("error creating aufs mount to %s: %v", target, err) + } + return nil +} + +func (a *Driver) unmount(mountPath string) error { + a.Lock() + defer a.Unlock() + + if mounted, err := a.mounted(mountPath); err != nil || !mounted { + return err + } + if err := Unmount(mountPath); err != nil { + return err + } + return nil +} + +func (a *Driver) mounted(mountpoint string) (bool, error) { + return graphdriver.Mounted(graphdriver.FsMagicAufs, mountpoint) +} + +// Cleanup aufs and unmount all mountpoints +func (a *Driver) Cleanup() error { + var dirs []string + if err := filepath.Walk(a.mntPath(), func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + return nil + } + dirs = append(dirs, path) + return nil + }); err != nil { + return err + } + + for _, m := range dirs { + if err := a.unmount(m); err != nil { + logrus.Debugf("aufs error unmounting %s: %s", stringid.TruncateID(m), err) + } + } + return mountpk.Unmount(a.root) +} + +func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) { + defer func() { + if err != nil { + Unmount(target) + } + }() + + // Mount options are clipped to page size(4096 bytes). If there are more + // layers then these are remounted individually using append. + + offset := 54 + if useDirperm() { + offset += len("dirperm1") + } + b := make([]byte, syscall.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel + bp := copy(b, fmt.Sprintf("br:%s=rw", rw)) + + firstMount := true + i := 0 + + for { + for ; i < len(ro); i++ { + layer := fmt.Sprintf(":%s=ro+wh", ro[i]) + + if firstMount { + if bp+len(layer) > len(b) { + break + } + bp += copy(b[bp:], layer) + } else { + data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel) + if err = mount("none", target, "aufs", syscall.MS_REMOUNT, data); err != nil { + return + } + } + } + + if firstMount { + opts := "dio,xino=/dev/shm/aufs.xino" + if useDirperm() { + opts += ",dirperm1" + } + data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), mountLabel) + if err = mount("none", target, "aufs", 0, data); err != nil { + return + } + firstMount = false + } + + if i == len(ro) { + break + } + } + + return +} + +// useDirperm checks dirperm1 mount option can be used with the current +// version of aufs. +func useDirperm() bool { + enableDirpermLock.Do(func() { + base, err := ioutil.TempDir("", "docker-aufs-base") + if err != nil { + logrus.Errorf("error checking dirperm1: %v", err) + return + } + defer os.RemoveAll(base) + + union, err := ioutil.TempDir("", "docker-aufs-union") + if err != nil { + logrus.Errorf("error checking dirperm1: %v", err) + return + } + defer os.RemoveAll(union) + + opts := fmt.Sprintf("br:%s,dirperm1,xino=/dev/shm/aufs.xino", base) + if err := mount("none", union, "aufs", 0, opts); err != nil { + return + } + enableDirperm = true + if err := Unmount(union); err != nil { + logrus.Errorf("error checking dirperm1: failed to unmount %v", err) + } + }) + return enableDirperm +} diff --git a/vendor/src/github.com/containers/storage/drivers/aufs/dirs.go b/vendor/src/github.com/containers/storage/drivers/aufs/dirs.go new file mode 100644 index 00000000..eb298d9e --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/aufs/dirs.go @@ -0,0 +1,64 @@ +// +build linux + +package aufs + +import ( + "bufio" + "io/ioutil" + "os" + "path" +) + +// Return all the directories +func loadIds(root string) ([]string, error) { + dirs, err := ioutil.ReadDir(root) + if err != nil { + return nil, err + } + out := []string{} + for _, d := range dirs { + if !d.IsDir() { + out = append(out, d.Name()) + } + } + return out, nil +} + +// Read the layers file for the current id and return all the +// layers represented by new lines in the file +// +// If there are no lines in the file then the id has no parent +// and an empty slice is returned. +func getParentIds(root, id string) ([]string, error) { + f, err := os.Open(path.Join(root, "layers", id)) + if err != nil { + return nil, err + } + defer f.Close() + + out := []string{} + s := bufio.NewScanner(f) + + for s.Scan() { + if t := s.Text(); t != "" { + out = append(out, s.Text()) + } + } + return out, s.Err() +} + +func (a *Driver) getMountpoint(id string) string { + return path.Join(a.mntPath(), id) +} + +func (a *Driver) mntPath() string { + return path.Join(a.rootPath(), "mnt") +} + +func (a *Driver) getDiffPath(id string) string { + return path.Join(a.diffPath(), id) +} + +func (a *Driver) diffPath() string { + return path.Join(a.rootPath(), "diff") +} diff --git a/vendor/src/github.com/containers/storage/drivers/aufs/mount.go b/vendor/src/github.com/containers/storage/drivers/aufs/mount.go new file mode 100644 index 00000000..da1e892f --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/aufs/mount.go @@ -0,0 +1,21 @@ +// +build linux + +package aufs + +import ( + "os/exec" + "syscall" + + "github.com/Sirupsen/logrus" +) + +// Unmount the target specified. +func Unmount(target string) error { + if err := exec.Command("auplink", target, "flush").Run(); err != nil { + logrus.Warnf("Couldn't run auplink before unmount %s: %s", target, err) + } + if err := syscall.Unmount(target, 0); err != nil { + return err + } + return nil +} diff --git a/vendor/src/github.com/containers/storage/drivers/aufs/mount_linux.go b/vendor/src/github.com/containers/storage/drivers/aufs/mount_linux.go new file mode 100644 index 00000000..8062bae4 --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/aufs/mount_linux.go @@ -0,0 +1,7 @@ +package aufs + +import "syscall" + +func mount(source string, target string, fstype string, flags uintptr, data string) error { + return syscall.Mount(source, target, fstype, flags, data) +} diff --git a/vendor/src/github.com/containers/storage/drivers/aufs/mount_unsupported.go b/vendor/src/github.com/containers/storage/drivers/aufs/mount_unsupported.go new file mode 100644 index 00000000..d030b066 --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/aufs/mount_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux + +package aufs + +import "errors" + +// MsRemount declared to specify a non-linux system mount. +const MsRemount = 0 + +func mount(source string, target string, fstype string, flags uintptr, data string) (err error) { + return errors.New("mount is not implemented on this platform") +} diff --git a/vendor/src/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/src/github.com/containers/storage/drivers/btrfs/btrfs.go new file mode 100644 index 00000000..a85c3c77 --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/btrfs/btrfs.go @@ -0,0 +1,520 @@ +// +build linux + +package btrfs + +/* +#include +#include +#include +#include + +static void set_name_btrfs_ioctl_vol_args_v2(struct btrfs_ioctl_vol_args_v2* btrfs_struct, const char* value) { + snprintf(btrfs_struct->name, BTRFS_SUBVOL_NAME_MAX, "%s", value); +} +*/ +import "C" + +import ( + "fmt" + "os" + "path" + "path/filepath" + "strings" + "syscall" + "unsafe" + + "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/parsers" + "github.com/docker/go-units" + "github.com/opencontainers/runc/libcontainer/label" +) + +func init() { + graphdriver.Register("btrfs", Init) +} + +var ( + quotaEnabled = false + userDiskQuota = false +) + +type btrfsOptions struct { + minSpace uint64 + size uint64 +} + +// Init returns a new BTRFS driver. +// An error is returned if BTRFS is not supported. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + + if fsMagic != graphdriver.FsMagicBtrfs { + return nil, graphdriver.ErrPrerequisites + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + opt, err := parseOptions(options) + if err != nil { + return nil, err + } + + if userDiskQuota { + if err := subvolEnableQuota(home); err != nil { + return nil, err + } + quotaEnabled = true + } + + driver := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + options: opt, + } + + return graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), nil +} + +func parseOptions(opt []string) (btrfsOptions, error) { + var options btrfsOptions + for _, option := range opt { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return options, err + } + key = strings.ToLower(key) + switch key { + case "btrfs.min_space": + minSpace, err := units.RAMInBytes(val) + if err != nil { + return options, err + } + userDiskQuota = true + options.minSpace = uint64(minSpace) + default: + return options, fmt.Errorf("Unknown option %s", key) + } + } + return options, nil +} + +// Driver contains information about the filesystem mounted. +type Driver struct { + //root of the file system + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + options btrfsOptions +} + +// String prints the name of the driver (btrfs). +func (d *Driver) String() string { + return "btrfs" +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Build Version" and "Library Version" of the btrfs libraries used. +// Version information can be used to check compatibility with your kernel. +func (d *Driver) Status() [][2]string { + status := [][2]string{} + if bv := btrfsBuildVersion(); bv != "-" { + status = append(status, [2]string{"Build Version", bv}) + } + if lv := btrfsLibVersion(); lv != -1 { + status = append(status, [2]string{"Library Version", fmt.Sprintf("%d", lv)}) + } + return status +} + +// GetMetadata returns empty metadata for this driver. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +// Cleanup unmounts the home directory. +func (d *Driver) Cleanup() error { + if quotaEnabled { + if err := subvolDisableQuota(d.home); err != nil { + return err + } + } + + return mount.Unmount(d.home) +} + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func openDir(path string) (*C.DIR, error) { + Cpath := C.CString(path) + defer free(Cpath) + + dir := C.opendir(Cpath) + if dir == nil { + return nil, fmt.Errorf("Can't open dir") + } + return dir, nil +} + +func closeDir(dir *C.DIR) { + if dir != nil { + C.closedir(dir) + } +} + +func getDirFd(dir *C.DIR) uintptr { + return uintptr(C.dirfd(dir)) +} + +func subvolCreate(path, name string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_vol_args + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error()) + } + return nil +} + +func subvolSnapshot(src, dest, name string) error { + srcDir, err := openDir(src) + if err != nil { + return err + } + defer closeDir(srcDir) + + destDir, err := openDir(dest) + if err != nil { + return err + } + defer closeDir(destDir) + + var args C.struct_btrfs_ioctl_vol_args_v2 + args.fd = C.__s64(getDirFd(srcDir)) + + var cs = C.CString(name) + C.set_name_btrfs_ioctl_vol_args_v2(&args, cs) + C.free(unsafe.Pointer(cs)) + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error()) + } + return nil +} + +func isSubvolume(p string) (bool, error) { + var bufStat syscall.Stat_t + if err := syscall.Lstat(p, &bufStat); err != nil { + return false, err + } + + // return true if it is a btrfs subvolume + return bufStat.Ino == C.BTRFS_FIRST_FREE_OBJECTID, nil +} + +func subvolDelete(dirpath, name string) error { + dir, err := openDir(dirpath) + if err != nil { + return err + } + defer closeDir(dir) + fullPath := path.Join(dirpath, name) + + var args C.struct_btrfs_ioctl_vol_args + + // walk the btrfs subvolumes + walkSubvolumes := func(p string, f os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) && p != fullPath { + // missing most likely because the path was a subvolume that got removed in the previous iteration + // since it's gone anyway, we don't care + return nil + } + return fmt.Errorf("error walking subvolumes: %v", err) + } + // we want to check children only so skip itself + // it will be removed after the filepath walk anyways + if f.IsDir() && p != fullPath { + sv, err := isSubvolume(p) + if err != nil { + return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err) + } + if sv { + if err := subvolDelete(path.Dir(p), f.Name()); err != nil { + return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err) + } + } + } + return nil + } + if err := filepath.Walk(path.Join(dirpath, name), walkSubvolumes); err != nil { + return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err) + } + + // all subvolumes have been removed + // now remove the one originally passed in + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to destroy btrfs snapshot %s for %s: %v", dirpath, name, errno.Error()) + } + return nil +} + +func subvolEnableQuota(path string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_quota_ctl_args + args.cmd = C.BTRFS_QUOTA_CTL_ENABLE + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to enable btrfs quota for %s: %v", dir, errno.Error()) + } + + return nil +} + +func subvolDisableQuota(path string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_quota_ctl_args + args.cmd = C.BTRFS_QUOTA_CTL_DISABLE + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to disable btrfs quota for %s: %v", dir, errno.Error()) + } + + return nil +} + +func subvolRescanQuota(path string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_quota_rescan_args + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to rescan btrfs quota for %s: %v", dir, errno.Error()) + } + + return nil +} + +func subvolLimitQgroup(path string, size uint64) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_qgroup_limit_args + args.lim.max_referenced = C.__u64(size) + args.lim.flags = C.BTRFS_QGROUP_LIMIT_MAX_RFER + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to limit qgroup for %s: %v", dir, errno.Error()) + } + + return nil +} + +func (d *Driver) subvolumesDir() string { + return path.Join(d.home, "subvolumes") +} + +func (d *Driver) subvolumesDirID(id string) string { + return path.Join(d.subvolumesDir(), id) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { + return d.Create(id, parent, mountLabel, storageOpt) +} + +// Create the filesystem with given id. +func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error { + subvolumes := path.Join(d.home, "subvolumes") + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(subvolumes, 0700, rootUID, rootGID); err != nil { + return err + } + if parent == "" { + if err := subvolCreate(subvolumes, id); err != nil { + return err + } + } else { + parentDir := d.subvolumesDirID(parent) + st, err := os.Stat(parentDir) + if err != nil { + return err + } + if !st.IsDir() { + return fmt.Errorf("%s: not a directory", parentDir) + } + if err := subvolSnapshot(parentDir, subvolumes, id); err != nil { + return err + } + } + + if _, ok := storageOpt["size"]; ok { + driver := &Driver{} + if err := d.parseStorageOpt(storageOpt, driver); err != nil { + return err + } + if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil { + return err + } + } + + // if we have a remapped root (user namespaces enabled), change the created snapshot + // dir ownership to match + if rootUID != 0 || rootGID != 0 { + if err := os.Chown(path.Join(subvolumes, id), rootUID, rootGID); err != nil { + return err + } + } + + return label.Relabel(path.Join(subvolumes, id), mountLabel, false) +} + +// Parse btrfs storage options +func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { + // Read size to change the subvolume disk quota per container + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return err + } + driver.options.size = uint64(size) + default: + return fmt.Errorf("Unknown option %s", key) + } + } + + return nil +} + +// Set btrfs storage size +func (d *Driver) setStorageSize(dir string, driver *Driver) error { + if driver.options.size <= 0 { + return fmt.Errorf("btrfs: invalid storage size: %s", units.HumanSize(float64(driver.options.size))) + } + if d.options.minSpace > 0 && driver.options.size < d.options.minSpace { + return fmt.Errorf("btrfs: storage size cannot be less than %s", units.HumanSize(float64(d.options.minSpace))) + } + + if !quotaEnabled { + if err := subvolEnableQuota(d.home); err != nil { + return err + } + quotaEnabled = true + } + + if err := subvolLimitQgroup(dir, driver.options.size); err != nil { + return err + } + + return nil +} + +// Remove the filesystem with given id. +func (d *Driver) Remove(id string) error { + dir := d.subvolumesDirID(id) + if _, err := os.Stat(dir); err != nil { + return err + } + if err := subvolDelete(d.subvolumesDir(), id); err != nil { + return err + } + if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) { + return err + } + if err := subvolRescanQuota(d.home); err != nil { + return err + } + return nil +} + +// Get the requested filesystem id. +func (d *Driver) Get(id, mountLabel string) (string, error) { + dir := d.subvolumesDirID(id) + st, err := os.Stat(dir) + if err != nil { + return "", err + } + + if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + + return dir, nil +} + +// Put is not implemented for BTRFS as there is no cleanup required for the id. +func (d *Driver) Put(id string) error { + // Get() creates no runtime resources (like e.g. mounts) + // so this doesn't need to do anything. + return nil +} + +// Exists checks if the id exists in the filesystem. +func (d *Driver) Exists(id string) bool { + dir := d.subvolumesDirID(id) + _, err := os.Stat(dir) + return err == nil +} diff --git a/vendor/src/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go b/vendor/src/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go new file mode 100644 index 00000000..f0708888 --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux !cgo + +package btrfs diff --git a/vendor/src/github.com/containers/storage/drivers/btrfs/version.go b/vendor/src/github.com/containers/storage/drivers/btrfs/version.go new file mode 100644 index 00000000..73d90cdd --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/btrfs/version.go @@ -0,0 +1,26 @@ +// +build linux,!btrfs_noversion + +package btrfs + +/* +#include + +// around version 3.16, they did not define lib version yet +#ifndef BTRFS_LIB_VERSION +#define BTRFS_LIB_VERSION -1 +#endif + +// upstream had removed it, but now it will be coming back +#ifndef BTRFS_BUILD_VERSION +#define BTRFS_BUILD_VERSION "-" +#endif +*/ +import "C" + +func btrfsBuildVersion() string { + return string(C.BTRFS_BUILD_VERSION) +} + +func btrfsLibVersion() int { + return int(C.BTRFS_LIB_VERSION) +} diff --git a/vendor/src/github.com/containers/storage/drivers/btrfs/version_none.go b/vendor/src/github.com/containers/storage/drivers/btrfs/version_none.go new file mode 100644 index 00000000..f802fbc6 --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/btrfs/version_none.go @@ -0,0 +1,14 @@ +// +build linux,btrfs_noversion + +package btrfs + +// TODO(vbatts) remove this work-around once supported linux distros are on +// btrfs utilities of >= 3.16.1 + +func btrfsBuildVersion() string { + return "-" +} + +func btrfsLibVersion() int { + return -1 +} diff --git a/vendor/src/github.com/containers/storage/drivers/counter.go b/vendor/src/github.com/containers/storage/drivers/counter.go new file mode 100644 index 00000000..5ea604f5 --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/counter.go @@ -0,0 +1,67 @@ +package graphdriver + +import "sync" + +type minfo struct { + check bool + count int +} + +// RefCounter is a generic counter for use by graphdriver Get/Put calls +type RefCounter struct { + counts map[string]*minfo + mu sync.Mutex + checker Checker +} + +// NewRefCounter returns a new RefCounter +func NewRefCounter(c Checker) *RefCounter { + return &RefCounter{ + checker: c, + counts: make(map[string]*minfo), + } +} + +// Increment increaes the ref count for the given id and returns the current count +func (c *RefCounter) Increment(path string) int { + c.mu.Lock() + m := c.counts[path] + if m == nil { + m = &minfo{} + c.counts[path] = m + } + // if we are checking this path for the first time check to make sure + // if it was already mounted on the system and make sure we have a correct ref + // count if it is mounted as it is in use. + if !m.check { + m.check = true + if c.checker.IsMounted(path) { + m.count++ + } + } + m.count++ + c.mu.Unlock() + return m.count +} + +// Decrement decreases the ref count for the given id and returns the current count +func (c *RefCounter) Decrement(path string) int { + c.mu.Lock() + m := c.counts[path] + if m == nil { + m = &minfo{} + c.counts[path] = m + } + // if we are checking this path for the first time check to make sure + // if it was already mounted on the system and make sure we have a correct ref + // count if it is mounted as it is in use. + if !m.check { + m.check = true + if c.checker.IsMounted(path) { + m.count++ + } + } + m.count-- + c.mu.Unlock() + return m.count +} diff --git a/vendor/src/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/src/github.com/containers/storage/drivers/devmapper/deviceset.go new file mode 100644 index 00000000..0accf31f --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/devmapper/deviceset.go @@ -0,0 +1,2661 @@ +// +build linux + +package devmapper + +import ( + "bufio" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + + "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/devicemapper" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/loopback" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/parsers" + "github.com/containers/storage/storageversion" + "github.com/docker/go-units" + + "github.com/opencontainers/runc/libcontainer/label" +) + +var ( + defaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 + defaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 + defaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 + defaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors + defaultUdevSyncOverride = false + maxDeviceID = 0xffffff // 24 bit, pool limit + deviceIDMapSz = (maxDeviceID + 1) / 8 + // We retry device removal so many a times that even error messages + // will fill up console during normal operation. So only log Fatal + // messages by default. + logLevel = devicemapper.LogLevelFatal + driverDeferredRemovalSupport = false + enableDeferredRemoval = false + enableDeferredDeletion = false + userBaseSize = false + defaultMinFreeSpacePercent uint32 = 10 +) + +const deviceSetMetaFile string = "deviceset-metadata" +const transactionMetaFile string = "transaction-metadata" + +type transaction struct { + OpenTransactionID uint64 `json:"open_transaction_id"` + DeviceIDHash string `json:"device_hash"` + DeviceID int `json:"device_id"` +} + +type devInfo struct { + Hash string `json:"-"` + DeviceID int `json:"device_id"` + Size uint64 `json:"size"` + TransactionID uint64 `json:"transaction_id"` + Initialized bool `json:"initialized"` + Deleted bool `json:"deleted"` + devices *DeviceSet + + // The global DeviceSet lock guarantees that we serialize all + // the calls to libdevmapper (which is not threadsafe), but we + // sometimes release that lock while sleeping. In that case + // this per-device lock is still held, protecting against + // other accesses to the device that we're doing the wait on. + // + // WARNING: In order to avoid AB-BA deadlocks when releasing + // the global lock while holding the per-device locks all + // device locks must be acquired *before* the device lock, and + // multiple device locks should be acquired parent before child. + lock sync.Mutex +} + +type metaData struct { + Devices map[string]*devInfo `json:"Devices"` +} + +// DeviceSet holds information about list of devices +type DeviceSet struct { + metaData `json:"-"` + sync.Mutex `json:"-"` // Protects all fields of DeviceSet and serializes calls into libdevmapper + root string + devicePrefix string + TransactionID uint64 `json:"-"` + NextDeviceID int `json:"next_device_id"` + deviceIDMap []byte + + // Options + dataLoopbackSize int64 + metaDataLoopbackSize int64 + baseFsSize uint64 + filesystem string + mountOptions string + mkfsArgs []string + dataDevice string // block or loop dev + dataLoopFile string // loopback file, if used + metadataDevice string // block or loop dev + metadataLoopFile string // loopback file, if used + doBlkDiscard bool + thinpBlockSize uint32 + thinPoolDevice string + transaction `json:"-"` + overrideUdevSyncCheck bool + deferredRemove bool // use deferred removal + deferredDelete bool // use deferred deletion + BaseDeviceUUID string // save UUID of base device + BaseDeviceFilesystem string // save filesystem of base device + nrDeletedDevices uint // number of deleted devices + deletionWorkerTicker *time.Ticker + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + minFreeSpacePercent uint32 //min free space percentage in thinpool +} + +// DiskUsage contains information about disk usage and is used when reporting Status of a device. +type DiskUsage struct { + // Used bytes on the disk. + Used uint64 + // Total bytes on the disk. + Total uint64 + // Available bytes on the disk. + Available uint64 +} + +// Status returns the information about the device. +type Status struct { + // PoolName is the name of the data pool. + PoolName string + // DataFile is the actual block device for data. + DataFile string + // DataLoopback loopback file, if used. + DataLoopback string + // MetadataFile is the actual block device for metadata. + MetadataFile string + // MetadataLoopback is the loopback file, if used. + MetadataLoopback string + // Data is the disk used for data. + Data DiskUsage + // Metadata is the disk used for meta data. + Metadata DiskUsage + // BaseDeviceSize is base size of container and image + BaseDeviceSize uint64 + // BaseDeviceFS is backing filesystem. + BaseDeviceFS string + // SectorSize size of the vector. + SectorSize uint64 + // UdevSyncSupported is true if sync is supported. + UdevSyncSupported bool + // DeferredRemoveEnabled is true then the device is not unmounted. + DeferredRemoveEnabled bool + // True if deferred deletion is enabled. This is different from + // deferred removal. "removal" means that device mapper device is + // deactivated. Thin device is still in thin pool and can be activated + // again. But "deletion" means that thin device will be deleted from + // thin pool and it can't be activated again. + DeferredDeleteEnabled bool + DeferredDeletedDeviceCount uint + MinFreeSpace uint64 +} + +// Structure used to export image/container metadata in docker inspect. +type deviceMetadata struct { + deviceID int + deviceSize uint64 // size in bytes + deviceName string // Device name as used during activation +} + +// DevStatus returns information about device mounted containing its id, size and sector information. +type DevStatus struct { + // DeviceID is the id of the device. + DeviceID int + // Size is the size of the filesystem. + Size uint64 + // TransactionID is a unique integer per device set used to identify an operation on the file system, this number is incremental. + TransactionID uint64 + // SizeInSectors indicates the size of the sectors allocated. + SizeInSectors uint64 + // MappedSectors indicates number of mapped sectors. + MappedSectors uint64 + // HighestMappedSector is the pointer to the highest mapped sector. + HighestMappedSector uint64 +} + +func getDevName(name string) string { + return "/dev/mapper/" + name +} + +func (info *devInfo) Name() string { + hash := info.Hash + if hash == "" { + hash = "base" + } + return fmt.Sprintf("%s-%s", info.devices.devicePrefix, hash) +} + +func (info *devInfo) DevName() string { + return getDevName(info.Name()) +} + +func (devices *DeviceSet) loopbackDir() string { + return path.Join(devices.root, "devicemapper") +} + +func (devices *DeviceSet) metadataDir() string { + return path.Join(devices.root, "metadata") +} + +func (devices *DeviceSet) metadataFile(info *devInfo) string { + file := info.Hash + if file == "" { + file = "base" + } + return path.Join(devices.metadataDir(), file) +} + +func (devices *DeviceSet) transactionMetaFile() string { + return path.Join(devices.metadataDir(), transactionMetaFile) +} + +func (devices *DeviceSet) deviceSetMetaFile() string { + return path.Join(devices.metadataDir(), deviceSetMetaFile) +} + +func (devices *DeviceSet) oldMetadataFile() string { + return path.Join(devices.loopbackDir(), "json") +} + +func (devices *DeviceSet) getPoolName() string { + if devices.thinPoolDevice == "" { + return devices.devicePrefix + "-pool" + } + return devices.thinPoolDevice +} + +func (devices *DeviceSet) getPoolDevName() string { + return getDevName(devices.getPoolName()) +} + +func (devices *DeviceSet) hasImage(name string) bool { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + _, err := os.Stat(filename) + return err == nil +} + +// ensureImage creates a sparse file of bytes at the path +// /devicemapper/. +// If the file already exists and new size is larger than its current size, it grows to the new size. +// Either way it returns the full path. +func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) + if err != nil { + return "", err + } + if err := idtools.MkdirAllAs(dirname, 0700, uid, gid); err != nil && !os.IsExist(err) { + return "", err + } + + if fi, err := os.Stat(filename); err != nil { + if !os.IsNotExist(err) { + return "", err + } + logrus.Debugf("devmapper: Creating loopback file %s for device-manage use", filename) + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return "", err + } + defer file.Close() + + if err := file.Truncate(size); err != nil { + return "", err + } + } else { + if fi.Size() < size { + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return "", err + } + defer file.Close() + if err := file.Truncate(size); err != nil { + return "", fmt.Errorf("devmapper: Unable to grow loopback file %s: %v", filename, err) + } + } else if fi.Size() > size { + logrus.Warnf("devmapper: Can't shrink loopback file %s", filename) + } + } + return filename, nil +} + +func (devices *DeviceSet) allocateTransactionID() uint64 { + devices.OpenTransactionID = devices.TransactionID + 1 + return devices.OpenTransactionID +} + +func (devices *DeviceSet) updatePoolTransactionID() error { + if err := devicemapper.SetTransactionID(devices.getPoolDevName(), devices.TransactionID, devices.OpenTransactionID); err != nil { + return fmt.Errorf("devmapper: Error setting devmapper transaction ID: %s", err) + } + devices.TransactionID = devices.OpenTransactionID + return nil +} + +func (devices *DeviceSet) removeMetadata(info *devInfo) error { + if err := os.RemoveAll(devices.metadataFile(info)); err != nil { + return fmt.Errorf("devmapper: Error removing metadata file %s: %s", devices.metadataFile(info), err) + } + return nil +} + +// Given json data and file path, write it to disk +func (devices *DeviceSet) writeMetaFile(jsonData []byte, filePath string) error { + tmpFile, err := ioutil.TempFile(devices.metadataDir(), ".tmp") + if err != nil { + return fmt.Errorf("devmapper: Error creating metadata file: %s", err) + } + + n, err := tmpFile.Write(jsonData) + if err != nil { + return fmt.Errorf("devmapper: Error writing metadata to %s: %s", tmpFile.Name(), err) + } + if n < len(jsonData) { + return io.ErrShortWrite + } + if err := tmpFile.Sync(); err != nil { + return fmt.Errorf("devmapper: Error syncing metadata file %s: %s", tmpFile.Name(), err) + } + if err := tmpFile.Close(); err != nil { + return fmt.Errorf("devmapper: Error closing metadata file %s: %s", tmpFile.Name(), err) + } + if err := os.Rename(tmpFile.Name(), filePath); err != nil { + return fmt.Errorf("devmapper: Error committing metadata file %s: %s", tmpFile.Name(), err) + } + + return nil +} + +func (devices *DeviceSet) saveMetadata(info *devInfo) error { + jsonData, err := json.Marshal(info) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + if err := devices.writeMetaFile(jsonData, devices.metadataFile(info)); err != nil { + return err + } + return nil +} + +func (devices *DeviceSet) markDeviceIDUsed(deviceID int) { + var mask byte + i := deviceID % 8 + mask = 1 << uint(i) + devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] | mask +} + +func (devices *DeviceSet) markDeviceIDFree(deviceID int) { + var mask byte + i := deviceID % 8 + mask = ^(1 << uint(i)) + devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] & mask +} + +func (devices *DeviceSet) isDeviceIDFree(deviceID int) bool { + var mask byte + i := deviceID % 8 + mask = (1 << uint(i)) + if (devices.deviceIDMap[deviceID/8] & mask) != 0 { + return false + } + return true +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) lookupDevice(hash string) (*devInfo, error) { + info := devices.Devices[hash] + if info == nil { + info = devices.loadMetadata(hash) + if info == nil { + return nil, fmt.Errorf("devmapper: Unknown device %s", hash) + } + + devices.Devices[hash] = info + } + return info, nil +} + +func (devices *DeviceSet) lookupDeviceWithLock(hash string) (*devInfo, error) { + devices.Lock() + defer devices.Unlock() + info, err := devices.lookupDevice(hash) + return info, err +} + +// This function relies on that device hash map has been loaded in advance. +// Should be called with devices.Lock() held. +func (devices *DeviceSet) constructDeviceIDMap() { + logrus.Debugf("devmapper: constructDeviceIDMap()") + defer logrus.Debugf("devmapper: constructDeviceIDMap() END") + + for _, info := range devices.Devices { + devices.markDeviceIDUsed(info.DeviceID) + logrus.Debugf("devmapper: Added deviceId=%d to DeviceIdMap", info.DeviceID) + } +} + +func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo) error { + + // Skip some of the meta files which are not device files. + if strings.HasSuffix(finfo.Name(), ".migrated") { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if strings.HasPrefix(finfo.Name(), ".") { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if finfo.Name() == deviceSetMetaFile { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if finfo.Name() == transactionMetaFile { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + logrus.Debugf("devmapper: Loading data for file %s", path) + + hash := finfo.Name() + if hash == "base" { + hash = "" + } + + // Include deleted devices also as cleanup delete device logic + // will go through it and see if there are any deleted devices. + if _, err := devices.lookupDevice(hash); err != nil { + return fmt.Errorf("devmapper: Error looking up device %s:%v", hash, err) + } + + return nil +} + +func (devices *DeviceSet) loadDeviceFilesOnStart() error { + logrus.Debugf("devmapper: loadDeviceFilesOnStart()") + defer logrus.Debugf("devmapper: loadDeviceFilesOnStart() END") + + var scan = func(path string, info os.FileInfo, err error) error { + if err != nil { + logrus.Debugf("devmapper: Can't walk the file %s", path) + return nil + } + + // Skip any directories + if info.IsDir() { + return nil + } + + return devices.deviceFileWalkFunction(path, info) + } + + return filepath.Walk(devices.metadataDir(), scan) +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) unregisterDevice(id int, hash string) error { + logrus.Debugf("devmapper: unregisterDevice(%v, %v)", id, hash) + info := &devInfo{ + Hash: hash, + DeviceID: id, + } + + delete(devices.Devices, hash) + + if err := devices.removeMetadata(info); err != nil { + logrus.Debugf("devmapper: Error removing metadata: %s", err) + return err + } + + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) registerDevice(id int, hash string, size uint64, transactionID uint64) (*devInfo, error) { + logrus.Debugf("devmapper: registerDevice(%v, %v)", id, hash) + info := &devInfo{ + Hash: hash, + DeviceID: id, + Size: size, + TransactionID: transactionID, + Initialized: false, + devices: devices, + } + + devices.Devices[hash] = info + + if err := devices.saveMetadata(info); err != nil { + // Try to remove unused device + delete(devices.Devices, hash) + return nil, err + } + + return info, nil +} + +func (devices *DeviceSet) activateDeviceIfNeeded(info *devInfo, ignoreDeleted bool) error { + logrus.Debugf("devmapper: activateDeviceIfNeeded(%v)", info.Hash) + + if info.Deleted && !ignoreDeleted { + return fmt.Errorf("devmapper: Can't activate device %v as it is marked for deletion", info.Hash) + } + + // Make sure deferred removal on device is canceled, if one was + // scheduled. + if err := devices.cancelDeferredRemoval(info); err != nil { + return fmt.Errorf("devmapper: Device Deferred Removal Cancellation Failed: %s", err) + } + + if devinfo, _ := devicemapper.GetInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { + return nil + } + + return devicemapper.ActivateDevice(devices.getPoolDevName(), info.Name(), info.DeviceID, info.Size) +} + +// Return true only if kernel supports xfs and mkfs.xfs is available +func xfsSupported() bool { + // Make sure mkfs.xfs is available + if _, err := exec.LookPath("mkfs.xfs"); err != nil { + return false + } + + // Check if kernel supports xfs filesystem or not. + exec.Command("modprobe", "xfs").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err) + return false + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if strings.HasSuffix(s.Text(), "\txfs") { + return true + } + } + + if err := s.Err(); err != nil { + logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err) + } + return false +} + +func determineDefaultFS() string { + if xfsSupported() { + return "xfs" + } + + logrus.Warn("devmapper: XFS is not supported in your system. Either the kernel doesn't support it or mkfs.xfs is not in your PATH. Defaulting to ext4 filesystem") + return "ext4" +} + +func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) { + devname := info.DevName() + + args := []string{} + for _, arg := range devices.mkfsArgs { + args = append(args, arg) + } + + args = append(args, devname) + + if devices.filesystem == "" { + devices.filesystem = determineDefaultFS() + } + if err := devices.saveBaseDeviceFilesystem(devices.filesystem); err != nil { + return err + } + + logrus.Infof("devmapper: Creating filesystem %s on device %s", devices.filesystem, info.Name()) + defer func() { + if err != nil { + logrus.Infof("devmapper: Error while creating filesystem %s on device %s: %v", devices.filesystem, info.Name(), err) + } else { + logrus.Infof("devmapper: Successfully created filesystem %s on device %s", devices.filesystem, info.Name()) + } + }() + + switch devices.filesystem { + case "xfs": + err = exec.Command("mkfs.xfs", args...).Run() + case "ext4": + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0"}, args...)...).Run() + if err != nil { + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0"}, args...)...).Run() + } + if err != nil { + return err + } + err = exec.Command("tune2fs", append([]string{"-c", "-1", "-i", "0"}, devname)...).Run() + default: + err = fmt.Errorf("devmapper: Unsupported filesystem type %s", devices.filesystem) + } + return +} + +func (devices *DeviceSet) migrateOldMetaData() error { + // Migrate old metadata file + jsonData, err := ioutil.ReadFile(devices.oldMetadataFile()) + if err != nil && !os.IsNotExist(err) { + return err + } + + if jsonData != nil { + m := metaData{Devices: make(map[string]*devInfo)} + + if err := json.Unmarshal(jsonData, &m); err != nil { + return err + } + + for hash, info := range m.Devices { + info.Hash = hash + devices.saveMetadata(info) + } + if err := os.Rename(devices.oldMetadataFile(), devices.oldMetadataFile()+".migrated"); err != nil { + return err + } + + } + + return nil +} + +// Cleanup deleted devices. It assumes that all the devices have been +// loaded in the hash table. +func (devices *DeviceSet) cleanupDeletedDevices() error { + devices.Lock() + + // If there are no deleted devices, there is nothing to do. + if devices.nrDeletedDevices == 0 { + devices.Unlock() + return nil + } + + var deletedDevices []*devInfo + + for _, info := range devices.Devices { + if !info.Deleted { + continue + } + logrus.Debugf("devmapper: Found deleted device %s.", info.Hash) + deletedDevices = append(deletedDevices, info) + } + + // Delete the deleted devices. DeleteDevice() first takes the info lock + // and then devices.Lock(). So drop it to avoid deadlock. + devices.Unlock() + + for _, info := range deletedDevices { + // This will again try deferred deletion. + if err := devices.DeleteDevice(info.Hash, false); err != nil { + logrus.Warnf("devmapper: Deletion of device %s, device_id=%v failed:%v", info.Hash, info.DeviceID, err) + } + } + + return nil +} + +func (devices *DeviceSet) countDeletedDevices() { + for _, info := range devices.Devices { + if !info.Deleted { + continue + } + devices.nrDeletedDevices++ + } +} + +func (devices *DeviceSet) startDeviceDeletionWorker() { + // Deferred deletion is not enabled. Don't do anything. + if !devices.deferredDelete { + return + } + + logrus.Debug("devmapper: Worker to cleanup deleted devices started") + for range devices.deletionWorkerTicker.C { + devices.cleanupDeletedDevices() + } +} + +func (devices *DeviceSet) initMetaData() error { + devices.Lock() + defer devices.Unlock() + + if err := devices.migrateOldMetaData(); err != nil { + return err + } + + _, transactionID, _, _, _, _, err := devices.poolStatus() + if err != nil { + return err + } + + devices.TransactionID = transactionID + + if err := devices.loadDeviceFilesOnStart(); err != nil { + return fmt.Errorf("devmapper: Failed to load device files:%v", err) + } + + devices.constructDeviceIDMap() + devices.countDeletedDevices() + + if err := devices.processPendingTransaction(); err != nil { + return err + } + + // Start a goroutine to cleanup Deleted Devices + go devices.startDeviceDeletionWorker() + return nil +} + +func (devices *DeviceSet) incNextDeviceID() { + // IDs are 24bit, so wrap around + devices.NextDeviceID = (devices.NextDeviceID + 1) & maxDeviceID +} + +func (devices *DeviceSet) getNextFreeDeviceID() (int, error) { + devices.incNextDeviceID() + for i := 0; i <= maxDeviceID; i++ { + if devices.isDeviceIDFree(devices.NextDeviceID) { + devices.markDeviceIDUsed(devices.NextDeviceID) + return devices.NextDeviceID, nil + } + devices.incNextDeviceID() + } + + return 0, fmt.Errorf("devmapper: Unable to find a free device ID") +} + +func (devices *DeviceSet) poolHasFreeSpace() error { + if devices.minFreeSpacePercent == 0 { + return nil + } + + _, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() + if err != nil { + return err + } + + minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100 + if minFreeData < 1 { + minFreeData = 1 + } + dataFree := dataTotal - dataUsed + if dataFree < minFreeData { + return fmt.Errorf("devmapper: Thin Pool has %v free data blocks which is less than minimum required %v free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior", (dataTotal - dataUsed), minFreeData) + } + + minFreeMetadata := (metadataTotal * uint64(devices.minFreeSpacePercent)) / 100 + if minFreeMetadata < 1 { + minFreeMetadata = 1 + } + + metadataFree := metadataTotal - metadataUsed + if metadataFree < minFreeMetadata { + return fmt.Errorf("devmapper: Thin Pool has %v free metadata blocks which is less than minimum required %v free metadata blocks. Create more free metadata space in thin pool or use dm.min_free_space option to change behavior", (metadataTotal - metadataUsed), minFreeMetadata) + } + + return nil +} + +func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) { + devices.Lock() + defer devices.Unlock() + + deviceID, err := devices.getNextFreeDeviceID() + if err != nil { + return nil, err + } + + if err := devices.openTransaction(hash, deviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + + for { + if err := devicemapper.CreateDevice(devices.getPoolDevName(), deviceID); err != nil { + if devicemapper.DeviceIDExists(err) { + // Device ID already exists. This should not + // happen. Now we have a mechanism to find + // a free device ID. So something is not right. + // Give a warning and continue. + logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) + deviceID, err = devices.getNextFreeDeviceID() + if err != nil { + return nil, err + } + // Save new device id into transaction + devices.refreshTransaction(deviceID) + continue + } + logrus.Debugf("devmapper: Error creating device: %s", err) + devices.markDeviceIDFree(deviceID) + return nil, err + } + break + } + + logrus.Debugf("devmapper: Registering device (id %v) with FS size %v", deviceID, devices.baseFsSize) + info, err := devices.registerDevice(deviceID, hash, devices.baseFsSize, devices.OpenTransactionID) + if err != nil { + _ = devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + + if err := devices.closeTransaction(); err != nil { + devices.unregisterDevice(deviceID, hash) + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + return info, nil +} + +func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInfo, size uint64) error { + if err := devices.poolHasFreeSpace(); err != nil { + return err + } + + deviceID, err := devices.getNextFreeDeviceID() + if err != nil { + return err + } + + if err := devices.openTransaction(hash, deviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) + devices.markDeviceIDFree(deviceID) + return err + } + + for { + if err := devicemapper.CreateSnapDevice(devices.getPoolDevName(), deviceID, baseInfo.Name(), baseInfo.DeviceID); err != nil { + if devicemapper.DeviceIDExists(err) { + // Device ID already exists. This should not + // happen. Now we have a mechanism to find + // a free device ID. So something is not right. + // Give a warning and continue. + logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) + deviceID, err = devices.getNextFreeDeviceID() + if err != nil { + return err + } + // Save new device id into transaction + devices.refreshTransaction(deviceID) + continue + } + logrus.Debugf("devmapper: Error creating snap device: %s", err) + devices.markDeviceIDFree(deviceID) + return err + } + break + } + + if _, err := devices.registerDevice(deviceID, hash, size, devices.OpenTransactionID); err != nil { + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + logrus.Debugf("devmapper: Error registering device: %s", err) + return err + } + + if err := devices.closeTransaction(); err != nil { + devices.unregisterDevice(deviceID, hash) + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return err + } + return nil +} + +func (devices *DeviceSet) loadMetadata(hash string) *devInfo { + info := &devInfo{Hash: hash, devices: devices} + + jsonData, err := ioutil.ReadFile(devices.metadataFile(info)) + if err != nil { + logrus.Debugf("devmapper: Failed to read %s with err: %v", devices.metadataFile(info), err) + return nil + } + + if err := json.Unmarshal(jsonData, &info); err != nil { + logrus.Debugf("devmapper: Failed to unmarshal devInfo from %s with err: %v", devices.metadataFile(info), err) + return nil + } + + if info.DeviceID > maxDeviceID { + logrus.Errorf("devmapper: Ignoring Invalid DeviceId=%d", info.DeviceID) + return nil + } + + return info +} + +func getDeviceUUID(device string) (string, error) { + out, err := exec.Command("blkid", "-s", "UUID", "-o", "value", device).Output() + if err != nil { + return "", fmt.Errorf("devmapper: Failed to find uuid for device %s:%v", device, err) + } + + uuid := strings.TrimSuffix(string(out), "\n") + uuid = strings.TrimSpace(uuid) + logrus.Debugf("devmapper: UUID for device: %s is:%s", device, uuid) + return uuid, nil +} + +func (devices *DeviceSet) getBaseDeviceSize() uint64 { + info, _ := devices.lookupDevice("") + if info == nil { + return 0 + } + return info.Size +} + +func (devices *DeviceSet) getBaseDeviceFS() string { + return devices.BaseDeviceFilesystem +} + +func (devices *DeviceSet) verifyBaseDeviceUUIDFS(baseInfo *devInfo) error { + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { + return err + } + defer devices.deactivateDevice(baseInfo) + + uuid, err := getDeviceUUID(baseInfo.DevName()) + if err != nil { + return err + } + + if devices.BaseDeviceUUID != uuid { + return fmt.Errorf("devmapper: Current Base Device UUID:%s does not match with stored UUID:%s. Possibly using a different thin pool than last invocation", uuid, devices.BaseDeviceUUID) + } + + if devices.BaseDeviceFilesystem == "" { + fsType, err := ProbeFsType(baseInfo.DevName()) + if err != nil { + return err + } + if err := devices.saveBaseDeviceFilesystem(fsType); err != nil { + return err + } + } + + // If user specified a filesystem using dm.fs option and current + // file system of base image is not same, warn user that dm.fs + // will be ignored. + if devices.BaseDeviceFilesystem != devices.filesystem { + logrus.Warnf("devmapper: Base device already exists and has filesystem %s on it. User specified filesystem %s will be ignored.", devices.BaseDeviceFilesystem, devices.filesystem) + devices.filesystem = devices.BaseDeviceFilesystem + } + return nil +} + +func (devices *DeviceSet) saveBaseDeviceFilesystem(fs string) error { + devices.BaseDeviceFilesystem = fs + return devices.saveDeviceSetMetaData() +} + +func (devices *DeviceSet) saveBaseDeviceUUID(baseInfo *devInfo) error { + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { + return err + } + defer devices.deactivateDevice(baseInfo) + + uuid, err := getDeviceUUID(baseInfo.DevName()) + if err != nil { + return err + } + + devices.BaseDeviceUUID = uuid + return devices.saveDeviceSetMetaData() +} + +func (devices *DeviceSet) createBaseImage() error { + logrus.Debug("devmapper: Initializing base device-mapper thin volume") + + // Create initial device + info, err := devices.createRegisterDevice("") + if err != nil { + return err + } + + logrus.Debug("devmapper: Creating filesystem on base device-mapper thin volume") + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return err + } + + if err := devices.createFilesystem(info); err != nil { + return err + } + + info.Initialized = true + if err := devices.saveMetadata(info); err != nil { + info.Initialized = false + return err + } + + if err := devices.saveBaseDeviceUUID(info); err != nil { + return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) + } + + return nil +} + +// Returns if thin pool device exists or not. If device exists, also makes +// sure it is a thin pool device and not some other type of device. +func (devices *DeviceSet) thinPoolExists(thinPoolDevice string) (bool, error) { + logrus.Debugf("devmapper: Checking for existence of the pool %s", thinPoolDevice) + + info, err := devicemapper.GetInfo(thinPoolDevice) + if err != nil { + return false, fmt.Errorf("devmapper: GetInfo() on device %s failed: %v", thinPoolDevice, err) + } + + // Device does not exist. + if info.Exists == 0 { + return false, nil + } + + _, _, deviceType, _, err := devicemapper.GetStatus(thinPoolDevice) + if err != nil { + return false, fmt.Errorf("devmapper: GetStatus() on device %s failed: %v", thinPoolDevice, err) + } + + if deviceType != "thin-pool" { + return false, fmt.Errorf("devmapper: Device %s is not a thin pool", thinPoolDevice) + } + + return true, nil +} + +func (devices *DeviceSet) checkThinPool() error { + _, transactionID, dataUsed, _, _, _, err := devices.poolStatus() + if err != nil { + return err + } + if dataUsed != 0 { + return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) that already has used data blocks", + devices.thinPoolDevice) + } + if transactionID != 0 { + return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) with non-zero transaction ID", + devices.thinPoolDevice) + } + return nil +} + +// Base image is initialized properly. Either save UUID for first time (for +// upgrade case or verify UUID. +func (devices *DeviceSet) setupVerifyBaseImageUUIDFS(baseInfo *devInfo) error { + // If BaseDeviceUUID is nil (upgrade case), save it and return success. + if devices.BaseDeviceUUID == "" { + if err := devices.saveBaseDeviceUUID(baseInfo); err != nil { + return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) + } + return nil + } + + if err := devices.verifyBaseDeviceUUIDFS(baseInfo); err != nil { + return fmt.Errorf("devmapper: Base Device UUID and Filesystem verification failed: %v", err) + } + + return nil +} + +func (devices *DeviceSet) checkGrowBaseDeviceFS(info *devInfo) error { + + if !userBaseSize { + return nil + } + + if devices.baseFsSize < devices.getBaseDeviceSize() { + return fmt.Errorf("devmapper: Base device size cannot be smaller than %s", units.HumanSize(float64(devices.getBaseDeviceSize()))) + } + + if devices.baseFsSize == devices.getBaseDeviceSize() { + return nil + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + info.Size = devices.baseFsSize + + if err := devices.saveMetadata(info); err != nil { + // Try to remove unused device + delete(devices.Devices, info.Hash) + return err + } + + return devices.growFS(info) +} + +func (devices *DeviceSet) growFS(info *devInfo) error { + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return fmt.Errorf("Error activating devmapper device: %s", err) + } + + defer devices.deactivateDevice(info) + + fsMountPoint := "/run/docker/mnt" + if _, err := os.Stat(fsMountPoint); os.IsNotExist(err) { + if err := os.MkdirAll(fsMountPoint, 0700); err != nil { + return err + } + defer os.RemoveAll(fsMountPoint) + } + + options := "" + if devices.BaseDeviceFilesystem == "xfs" { + // XFS needs nouuid or it can't mount filesystems with the same fs + options = joinMountOptions(options, "nouuid") + } + options = joinMountOptions(options, devices.mountOptions) + + if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil { + return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), fsMountPoint, err) + } + + defer syscall.Unmount(fsMountPoint, syscall.MNT_DETACH) + + switch devices.BaseDeviceFilesystem { + case "ext4": + if out, err := exec.Command("resize2fs", info.DevName()).CombinedOutput(); err != nil { + return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) + } + case "xfs": + if out, err := exec.Command("xfs_growfs", info.DevName()).CombinedOutput(); err != nil { + return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) + } + default: + return fmt.Errorf("Unsupported filesystem type %s", devices.BaseDeviceFilesystem) + } + return nil +} + +func (devices *DeviceSet) setupBaseImage() error { + oldInfo, _ := devices.lookupDeviceWithLock("") + + // base image already exists. If it is initialized properly, do UUID + // verification and return. Otherwise remove image and set it up + // fresh. + + if oldInfo != nil { + if oldInfo.Initialized && !oldInfo.Deleted { + if err := devices.setupVerifyBaseImageUUIDFS(oldInfo); err != nil { + return err + } + + if err := devices.checkGrowBaseDeviceFS(oldInfo); err != nil { + return err + } + + return nil + } + + logrus.Debug("devmapper: Removing uninitialized base image") + // If previous base device is in deferred delete state, + // that needs to be cleaned up first. So don't try + // deferred deletion. + if err := devices.DeleteDevice("", true); err != nil { + return err + } + } + + // If we are setting up base image for the first time, make sure + // thin pool is empty. + if devices.thinPoolDevice != "" && oldInfo == nil { + if err := devices.checkThinPool(); err != nil { + return err + } + } + + // Create new base image device + if err := devices.createBaseImage(); err != nil { + return err + } + + return nil +} + +func setCloseOnExec(name string) { + if fileInfos, _ := ioutil.ReadDir("/proc/self/fd"); fileInfos != nil { + for _, i := range fileInfos { + link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name())) + if link == name { + fd, err := strconv.Atoi(i.Name()) + if err == nil { + syscall.CloseOnExec(fd) + } + } + } + } +} + +// DMLog implements logging using DevMapperLogger interface. +func (devices *DeviceSet) DMLog(level int, file string, line int, dmError int, message string) { + // By default libdm sends us all the messages including debug ones. + // We need to filter out messages here and figure out which one + // should be printed. + if level > logLevel { + return + } + + // FIXME(vbatts) push this back into ./pkg/devicemapper/ + if level <= devicemapper.LogLevelErr { + logrus.Errorf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + } else if level <= devicemapper.LogLevelInfo { + logrus.Infof("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + } else { + // FIXME(vbatts) push this back into ./pkg/devicemapper/ + logrus.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + } +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// ResizePool increases the size of the pool. +func (devices *DeviceSet) ResizePool(size int64) error { + dirname := devices.loopbackDir() + datafilename := path.Join(dirname, "data") + if len(devices.dataDevice) > 0 { + datafilename = devices.dataDevice + } + metadatafilename := path.Join(dirname, "metadata") + if len(devices.metadataDevice) > 0 { + metadatafilename = devices.metadataDevice + } + + datafile, err := os.OpenFile(datafilename, os.O_RDWR, 0) + if datafile == nil { + return err + } + defer datafile.Close() + + fi, err := datafile.Stat() + if fi == nil { + return err + } + + if fi.Size() > size { + return fmt.Errorf("devmapper: Can't shrink file") + } + + dataloopback := loopback.FindLoopDeviceFor(datafile) + if dataloopback == nil { + return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", datafilename) + } + defer dataloopback.Close() + + metadatafile, err := os.OpenFile(metadatafilename, os.O_RDWR, 0) + if metadatafile == nil { + return err + } + defer metadatafile.Close() + + metadataloopback := loopback.FindLoopDeviceFor(metadatafile) + if metadataloopback == nil { + return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", metadatafilename) + } + defer metadataloopback.Close() + + // Grow loopback file + if err := datafile.Truncate(size); err != nil { + return fmt.Errorf("devmapper: Unable to grow loopback file: %s", err) + } + + // Reload size for loopback device + if err := loopback.SetCapacity(dataloopback); err != nil { + return fmt.Errorf("Unable to update loopback capacity: %s", err) + } + + // Suspend the pool + if err := devicemapper.SuspendDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("devmapper: Unable to suspend pool: %s", err) + } + + // Reload with the new block sizes + if err := devicemapper.ReloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil { + return fmt.Errorf("devmapper: Unable to reload pool: %s", err) + } + + // Resume the pool + if err := devicemapper.ResumeDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("devmapper: Unable to resume pool: %s", err) + } + + return nil +} + +func (devices *DeviceSet) loadTransactionMetaData() error { + jsonData, err := ioutil.ReadFile(devices.transactionMetaFile()) + if err != nil { + // There is no active transaction. This will be the case + // during upgrade. + if os.IsNotExist(err) { + devices.OpenTransactionID = devices.TransactionID + return nil + } + return err + } + + json.Unmarshal(jsonData, &devices.transaction) + return nil +} + +func (devices *DeviceSet) saveTransactionMetaData() error { + jsonData, err := json.Marshal(&devices.transaction) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + + return devices.writeMetaFile(jsonData, devices.transactionMetaFile()) +} + +func (devices *DeviceSet) removeTransactionMetaData() error { + if err := os.RemoveAll(devices.transactionMetaFile()); err != nil { + return err + } + return nil +} + +func (devices *DeviceSet) rollbackTransaction() error { + logrus.Debugf("devmapper: Rolling back open transaction: TransactionID=%d hash=%s device_id=%d", devices.OpenTransactionID, devices.DeviceIDHash, devices.DeviceID) + + // A device id might have already been deleted before transaction + // closed. In that case this call will fail. Just leave a message + // in case of failure. + if err := devicemapper.DeleteDevice(devices.getPoolDevName(), devices.DeviceID); err != nil { + logrus.Errorf("devmapper: Unable to delete device: %s", err) + } + + dinfo := &devInfo{Hash: devices.DeviceIDHash} + if err := devices.removeMetadata(dinfo); err != nil { + logrus.Errorf("devmapper: Unable to remove metadata: %s", err) + } else { + devices.markDeviceIDFree(devices.DeviceID) + } + + if err := devices.removeTransactionMetaData(); err != nil { + logrus.Errorf("devmapper: Unable to remove transaction meta file %s: %s", devices.transactionMetaFile(), err) + } + + return nil +} + +func (devices *DeviceSet) processPendingTransaction() error { + if err := devices.loadTransactionMetaData(); err != nil { + return err + } + + // If there was open transaction but pool transaction ID is same + // as open transaction ID, nothing to roll back. + if devices.TransactionID == devices.OpenTransactionID { + return nil + } + + // If open transaction ID is less than pool transaction ID, something + // is wrong. Bail out. + if devices.OpenTransactionID < devices.TransactionID { + logrus.Errorf("devmapper: Open Transaction id %d is less than pool transaction id %d", devices.OpenTransactionID, devices.TransactionID) + return nil + } + + // Pool transaction ID is not same as open transaction. There is + // a transaction which was not completed. + if err := devices.rollbackTransaction(); err != nil { + return fmt.Errorf("devmapper: Rolling back open transaction failed: %s", err) + } + + devices.OpenTransactionID = devices.TransactionID + return nil +} + +func (devices *DeviceSet) loadDeviceSetMetaData() error { + jsonData, err := ioutil.ReadFile(devices.deviceSetMetaFile()) + if err != nil { + // For backward compatibility return success if file does + // not exist. + if os.IsNotExist(err) { + return nil + } + return err + } + + return json.Unmarshal(jsonData, devices) +} + +func (devices *DeviceSet) saveDeviceSetMetaData() error { + jsonData, err := json.Marshal(devices) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + + return devices.writeMetaFile(jsonData, devices.deviceSetMetaFile()) +} + +func (devices *DeviceSet) openTransaction(hash string, DeviceID int) error { + devices.allocateTransactionID() + devices.DeviceIDHash = hash + devices.DeviceID = DeviceID + if err := devices.saveTransactionMetaData(); err != nil { + return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) + } + return nil +} + +func (devices *DeviceSet) refreshTransaction(DeviceID int) error { + devices.DeviceID = DeviceID + if err := devices.saveTransactionMetaData(); err != nil { + return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) + } + return nil +} + +func (devices *DeviceSet) closeTransaction() error { + if err := devices.updatePoolTransactionID(); err != nil { + logrus.Debug("devmapper: Failed to close Transaction") + return err + } + return nil +} + +func determineDriverCapabilities(version string) error { + /* + * Driver version 4.27.0 and greater support deferred activation + * feature. + */ + + logrus.Debugf("devicemapper: driver version is %s", version) + + versionSplit := strings.Split(version, ".") + major, err := strconv.Atoi(versionSplit[0]) + if err != nil { + return graphdriver.ErrNotSupported + } + + if major > 4 { + driverDeferredRemovalSupport = true + return nil + } + + if major < 4 { + return nil + } + + minor, err := strconv.Atoi(versionSplit[1]) + if err != nil { + return graphdriver.ErrNotSupported + } + + /* + * If major is 4 and minor is 27, then there is no need to + * check for patch level as it can not be less than 0. + */ + if minor >= 27 { + driverDeferredRemovalSupport = true + return nil + } + + return nil +} + +// Determine the major and minor number of loopback device +func getDeviceMajorMinor(file *os.File) (uint64, uint64, error) { + stat, err := file.Stat() + if err != nil { + return 0, 0, err + } + + dev := stat.Sys().(*syscall.Stat_t).Rdev + majorNum := major(dev) + minorNum := minor(dev) + + logrus.Debugf("devmapper: Major:Minor for device: %s is:%v:%v", file.Name(), majorNum, minorNum) + return majorNum, minorNum, nil +} + +// Given a file which is backing file of a loop back device, find the +// loopback device name and its major/minor number. +func getLoopFileDeviceMajMin(filename string) (string, uint64, uint64, error) { + file, err := os.Open(filename) + if err != nil { + logrus.Debugf("devmapper: Failed to open file %s", filename) + return "", 0, 0, err + } + + defer file.Close() + loopbackDevice := loopback.FindLoopDeviceFor(file) + if loopbackDevice == nil { + return "", 0, 0, fmt.Errorf("devmapper: Unable to find loopback mount for: %s", filename) + } + defer loopbackDevice.Close() + + Major, Minor, err := getDeviceMajorMinor(loopbackDevice) + if err != nil { + return "", 0, 0, err + } + return loopbackDevice.Name(), Major, Minor, nil +} + +// Get the major/minor numbers of thin pool data and metadata devices +func (devices *DeviceSet) getThinPoolDataMetaMajMin() (uint64, uint64, uint64, uint64, error) { + var params, poolDataMajMin, poolMetadataMajMin string + + _, _, _, params, err := devicemapper.GetTable(devices.getPoolName()) + if err != nil { + return 0, 0, 0, 0, err + } + + if _, err = fmt.Sscanf(params, "%s %s", &poolMetadataMajMin, &poolDataMajMin); err != nil { + return 0, 0, 0, 0, err + } + + logrus.Debugf("devmapper: poolDataMajMin=%s poolMetaMajMin=%s\n", poolDataMajMin, poolMetadataMajMin) + + poolDataMajMinorSplit := strings.Split(poolDataMajMin, ":") + poolDataMajor, err := strconv.ParseUint(poolDataMajMinorSplit[0], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolDataMinor, err := strconv.ParseUint(poolDataMajMinorSplit[1], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolMetadataMajMinorSplit := strings.Split(poolMetadataMajMin, ":") + poolMetadataMajor, err := strconv.ParseUint(poolMetadataMajMinorSplit[0], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolMetadataMinor, err := strconv.ParseUint(poolMetadataMajMinorSplit[1], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + return poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, nil +} + +func (devices *DeviceSet) loadThinPoolLoopBackInfo() error { + poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, err := devices.getThinPoolDataMetaMajMin() + if err != nil { + return err + } + + dirname := devices.loopbackDir() + + // data device has not been passed in. So there should be a data file + // which is being mounted as loop device. + if devices.dataDevice == "" { + datafilename := path.Join(dirname, "data") + dataLoopDevice, dataMajor, dataMinor, err := getLoopFileDeviceMajMin(datafilename) + if err != nil { + return err + } + + // Compare the two + if poolDataMajor == dataMajor && poolDataMinor == dataMinor { + devices.dataDevice = dataLoopDevice + devices.dataLoopFile = datafilename + } + + } + + // metadata device has not been passed in. So there should be a + // metadata file which is being mounted as loop device. + if devices.metadataDevice == "" { + metadatafilename := path.Join(dirname, "metadata") + metadataLoopDevice, metadataMajor, metadataMinor, err := getLoopFileDeviceMajMin(metadatafilename) + if err != nil { + return err + } + if poolMetadataMajor == metadataMajor && poolMetadataMinor == metadataMinor { + devices.metadataDevice = metadataLoopDevice + devices.metadataLoopFile = metadatafilename + } + } + + return nil +} + +func (devices *DeviceSet) enableDeferredRemovalDeletion() error { + + // If user asked for deferred removal then check both libdm library + // and kernel driver support deferred removal otherwise error out. + if enableDeferredRemoval { + if !driverDeferredRemovalSupport { + return fmt.Errorf("devmapper: Deferred removal can not be enabled as kernel does not support it") + } + if !devicemapper.LibraryDeferredRemovalSupport { + return fmt.Errorf("devmapper: Deferred removal can not be enabled as libdm does not support it") + } + logrus.Debug("devmapper: Deferred removal support enabled.") + devices.deferredRemove = true + } + + if enableDeferredDeletion { + if !devices.deferredRemove { + return fmt.Errorf("devmapper: Deferred deletion can not be enabled as deferred removal is not enabled. Enable deferred removal using --storage-opt dm.use_deferred_removal=true parameter") + } + logrus.Debug("devmapper: Deferred deletion support enabled.") + devices.deferredDelete = true + } + return nil +} + +func (devices *DeviceSet) initDevmapper(doInit bool) error { + // give ourselves to libdm as a log handler + devicemapper.LogInit(devices) + + version, err := devicemapper.GetDriverVersion() + if err != nil { + // Can't even get driver version, assume not supported + return graphdriver.ErrNotSupported + } + + if err := determineDriverCapabilities(version); err != nil { + return graphdriver.ErrNotSupported + } + + if err := devices.enableDeferredRemovalDeletion(); err != nil { + return err + } + + // https://github.com/docker/docker/issues/4036 + if supported := devicemapper.UdevSetSyncSupport(true); !supported { + if storageversion.IAmStatic == "true" { + logrus.Errorf("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a dynamic binary to use devicemapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/daemon/#daemon-storage-driver-option") + } else { + logrus.Errorf("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/daemon/#daemon-storage-driver-option") + } + + if !devices.overrideUdevSyncCheck { + return graphdriver.ErrNotSupported + } + } + + //create the root dir of the devmapper driver ownership to match this + //daemon's remapped root uid/gid so containers can start properly + uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAs(devices.root, 0700, uid, gid); err != nil && !os.IsExist(err) { + return err + } + if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil && !os.IsExist(err) { + return err + } + + // Set the device prefix from the device id and inode of the docker root dir + + st, err := os.Stat(devices.root) + if err != nil { + return fmt.Errorf("devmapper: Error looking up dir %s: %s", devices.root, err) + } + sysSt := st.Sys().(*syscall.Stat_t) + // "reg-" stands for "regular file". + // In the future we might use "dev-" for "device file", etc. + // docker-maj,min[-inode] stands for: + // - Managed by docker + // - The target of this device is at major and minor + // - If is defined, use that file inside the device as a loopback image. Otherwise use the device itself. + devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino) + logrus.Debugf("devmapper: Generated prefix: %s", devices.devicePrefix) + + // Check for the existence of the thin-pool device + poolExists, err := devices.thinPoolExists(devices.getPoolName()) + if err != nil { + return err + } + + // It seems libdevmapper opens this without O_CLOEXEC, and go exec will not close files + // that are not Close-on-exec, + // so we add this badhack to make sure it closes itself + setCloseOnExec("/dev/mapper/control") + + // Make sure the sparse images exist in /devicemapper/data and + // /devicemapper/metadata + + createdLoopback := false + + // If the pool doesn't exist, create it + if !poolExists && devices.thinPoolDevice == "" { + logrus.Debug("devmapper: Pool doesn't exist. Creating it.") + + var ( + dataFile *os.File + metadataFile *os.File + ) + + if devices.dataDevice == "" { + // Make sure the sparse images exist in /devicemapper/data + + hasData := devices.hasImage("data") + + if !doInit && !hasData { + return errors.New("Loopback data file not found") + } + + if !hasData { + createdLoopback = true + } + + data, err := devices.ensureImage("data", devices.dataLoopbackSize) + if err != nil { + logrus.Debugf("devmapper: Error device ensureImage (data): %s", err) + return err + } + + dataFile, err = loopback.AttachLoopDevice(data) + if err != nil { + return err + } + devices.dataLoopFile = data + devices.dataDevice = dataFile.Name() + } else { + dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0600) + if err != nil { + return err + } + } + defer dataFile.Close() + + if devices.metadataDevice == "" { + // Make sure the sparse images exist in /devicemapper/metadata + + hasMetadata := devices.hasImage("metadata") + + if !doInit && !hasMetadata { + return errors.New("Loopback metadata file not found") + } + + if !hasMetadata { + createdLoopback = true + } + + metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize) + if err != nil { + logrus.Debugf("devmapper: Error device ensureImage (metadata): %s", err) + return err + } + + metadataFile, err = loopback.AttachLoopDevice(metadata) + if err != nil { + return err + } + devices.metadataLoopFile = metadata + devices.metadataDevice = metadataFile.Name() + } else { + metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0600) + if err != nil { + return err + } + } + defer metadataFile.Close() + + if err := devicemapper.CreatePool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { + return err + } + } + + // Pool already exists and caller did not pass us a pool. That means + // we probably created pool earlier and could not remove it as some + // containers were still using it. Detect some of the properties of + // pool, like is it using loop devices. + if poolExists && devices.thinPoolDevice == "" { + if err := devices.loadThinPoolLoopBackInfo(); err != nil { + logrus.Debugf("devmapper: Failed to load thin pool loopback device information:%v", err) + return err + } + } + + // If we didn't just create the data or metadata image, we need to + // load the transaction id and migrate old metadata + if !createdLoopback { + if err := devices.initMetaData(); err != nil { + return err + } + } + + if devices.thinPoolDevice == "" { + if devices.metadataLoopFile != "" || devices.dataLoopFile != "" { + logrus.Warn("devmapper: Usage of loopback devices is strongly discouraged for production use. Please use `--storage-opt dm.thinpooldev` or use `man docker` to refer to dm.thinpooldev section.") + } + } + + // Right now this loads only NextDeviceID. If there is more metadata + // down the line, we might have to move it earlier. + if err := devices.loadDeviceSetMetaData(); err != nil { + return err + } + + // Setup the base image + if doInit { + if err := devices.setupBaseImage(); err != nil { + logrus.Debugf("devmapper: Error device setupBaseImage: %s", err) + return err + } + } + + return nil +} + +// AddDevice adds a device and registers in the hash. +func (devices *DeviceSet) AddDevice(hash, baseHash string, storageOpt map[string]string) error { + logrus.Debugf("devmapper: AddDevice(hash=%s basehash=%s)", hash, baseHash) + defer logrus.Debugf("devmapper: AddDevice(hash=%s basehash=%s) END", hash, baseHash) + + // If a deleted device exists, return error. + baseInfo, err := devices.lookupDeviceWithLock(baseHash) + if err != nil { + return err + } + + if baseInfo.Deleted { + return fmt.Errorf("devmapper: Base device %v has been marked for deferred deletion", baseInfo.Hash) + } + + baseInfo.lock.Lock() + defer baseInfo.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + // Also include deleted devices in case hash of new device is + // same as one of the deleted devices. + if info, _ := devices.lookupDevice(hash); info != nil { + return fmt.Errorf("devmapper: device %s already exists. Deleted=%v", hash, info.Deleted) + } + + size, err := devices.parseStorageOpt(storageOpt) + if err != nil { + return err + } + + if size == 0 { + size = baseInfo.Size + } + + if size < baseInfo.Size { + return fmt.Errorf("devmapper: Container size cannot be smaller than %s", units.HumanSize(float64(baseInfo.Size))) + } + + if err := devices.createRegisterSnapDevice(hash, baseInfo, size); err != nil { + return err + } + + // Grow the container rootfs. + if size > baseInfo.Size { + info, err := devices.lookupDevice(hash) + if err != nil { + return err + } + + if err := devices.growFS(info); err != nil { + return err + } + } + + return nil +} + +func (devices *DeviceSet) parseStorageOpt(storageOpt map[string]string) (uint64, error) { + + // Read size to change the block device size per container. + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return 0, err + } + return uint64(size), nil + default: + return 0, fmt.Errorf("Unknown option %s", key) + } + } + + return 0, nil +} + +func (devices *DeviceSet) markForDeferredDeletion(info *devInfo) error { + // If device is already in deleted state, there is nothing to be done. + if info.Deleted { + return nil + } + + logrus.Debugf("devmapper: Marking device %s for deferred deletion.", info.Hash) + + info.Deleted = true + + // save device metadata to reflect deleted state. + if err := devices.saveMetadata(info); err != nil { + info.Deleted = false + return err + } + + devices.nrDeletedDevices++ + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) deleteTransaction(info *devInfo, syncDelete bool) error { + if err := devices.openTransaction(info.Hash, info.DeviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceId = %d", "", info.DeviceID) + return err + } + + defer devices.closeTransaction() + + err := devicemapper.DeleteDevice(devices.getPoolDevName(), info.DeviceID) + if err != nil { + // If syncDelete is true, we want to return error. If deferred + // deletion is not enabled, we return an error. If error is + // something other then EBUSY, return an error. + if syncDelete || !devices.deferredDelete || err != devicemapper.ErrBusy { + logrus.Debugf("devmapper: Error deleting device: %s", err) + return err + } + } + + if err == nil { + if err := devices.unregisterDevice(info.DeviceID, info.Hash); err != nil { + return err + } + // If device was already in deferred delete state that means + // deletion was being tried again later. Reduce the deleted + // device count. + if info.Deleted { + devices.nrDeletedDevices-- + } + devices.markDeviceIDFree(info.DeviceID) + } else { + if err := devices.markForDeferredDeletion(info); err != nil { + return err + } + } + + return nil +} + +// Issue discard only if device open count is zero. +func (devices *DeviceSet) issueDiscard(info *devInfo) error { + logrus.Debugf("devmapper: issueDiscard(device: %s). START", info.Hash) + defer logrus.Debugf("devmapper: issueDiscard(device: %s). END", info.Hash) + // This is a workaround for the kernel not discarding block so + // on the thin pool when we remove a thinp device, so we do it + // manually. + // Even if device is deferred deleted, activate it and issue + // discards. + if err := devices.activateDeviceIfNeeded(info, true); err != nil { + return err + } + + devinfo, err := devicemapper.GetInfo(info.Name()) + if err != nil { + return err + } + + if devinfo.OpenCount != 0 { + logrus.Debugf("devmapper: Device: %s is in use. OpenCount=%d. Not issuing discards.", info.Hash, devinfo.OpenCount) + return nil + } + + if err := devicemapper.BlockDeviceDiscard(info.DevName()); err != nil { + logrus.Debugf("devmapper: Error discarding block on device: %s (ignoring)", err) + } + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) deleteDevice(info *devInfo, syncDelete bool) error { + if devices.doBlkDiscard { + devices.issueDiscard(info) + } + + // Try to deactivate device in case it is active. + if err := devices.deactivateDevice(info); err != nil { + logrus.Debugf("devmapper: Error deactivating device: %s", err) + return err + } + + if err := devices.deleteTransaction(info, syncDelete); err != nil { + return err + } + + return nil +} + +// DeleteDevice will return success if device has been marked for deferred +// removal. If one wants to override that and want DeleteDevice() to fail if +// device was busy and could not be deleted, set syncDelete=true. +func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error { + logrus.Debugf("devmapper: DeleteDevice(hash=%v syncDelete=%v) START", hash, syncDelete) + defer logrus.Debugf("devmapper: DeleteDevice(hash=%v syncDelete=%v) END", hash, syncDelete) + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + return devices.deleteDevice(info, syncDelete) +} + +func (devices *DeviceSet) deactivatePool() error { + logrus.Debug("devmapper: deactivatePool()") + defer logrus.Debug("devmapper: deactivatePool END") + devname := devices.getPoolDevName() + + devinfo, err := devicemapper.GetInfo(devname) + if err != nil { + return err + } + + if devinfo.Exists == 0 { + return nil + } + if err := devicemapper.RemoveDevice(devname); err != nil { + return err + } + + if d, err := devicemapper.GetDeps(devname); err == nil { + logrus.Warnf("devmapper: device %s still has %d active dependents", devname, d.Count) + } + + return nil +} + +func (devices *DeviceSet) deactivateDevice(info *devInfo) error { + logrus.Debugf("devmapper: deactivateDevice(%s)", info.Hash) + defer logrus.Debugf("devmapper: deactivateDevice END(%s)", info.Hash) + + devinfo, err := devicemapper.GetInfo(info.Name()) + if err != nil { + return err + } + + if devinfo.Exists == 0 { + return nil + } + + if devices.deferredRemove { + if err := devicemapper.RemoveDeviceDeferred(info.Name()); err != nil { + return err + } + } else { + if err := devices.removeDevice(info.Name()); err != nil { + return err + } + } + return nil +} + +// Issues the underlying dm remove operation. +func (devices *DeviceSet) removeDevice(devname string) error { + var err error + + logrus.Debugf("devmapper: removeDevice START(%s)", devname) + defer logrus.Debugf("devmapper: removeDevice END(%s)", devname) + + for i := 0; i < 200; i++ { + err = devicemapper.RemoveDevice(devname) + if err == nil { + break + } + if err != devicemapper.ErrBusy { + return err + } + + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + devices.Unlock() + time.Sleep(100 * time.Millisecond) + devices.Lock() + } + + return err +} + +func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error { + if !devices.deferredRemove { + return nil + } + + logrus.Debugf("devmapper: cancelDeferredRemoval START(%s)", info.Name()) + defer logrus.Debugf("devmapper: cancelDeferredRemoval END(%s)", info.Name()) + + devinfo, err := devicemapper.GetInfoWithDeferred(info.Name()) + + if devinfo != nil && devinfo.DeferredRemove == 0 { + return nil + } + + // Cancel deferred remove + for i := 0; i < 100; i++ { + err = devicemapper.CancelDeferredRemove(info.Name()) + if err == nil { + break + } + + if err == devicemapper.ErrEnxio { + // Device is probably already gone. Return success. + return nil + } + + if err != devicemapper.ErrBusy { + return err + } + + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + devices.Unlock() + time.Sleep(100 * time.Millisecond) + devices.Lock() + } + return err +} + +// Shutdown shuts down the device by unmounting the root. +func (devices *DeviceSet) Shutdown(home string) error { + logrus.Debugf("devmapper: [deviceset %s] Shutdown()", devices.devicePrefix) + logrus.Debugf("devmapper: Shutting down DeviceSet: %s", devices.root) + defer logrus.Debugf("devmapper: [deviceset %s] Shutdown() END", devices.devicePrefix) + + // Stop deletion worker. This should start delivering new events to + // ticker channel. That means no new instance of cleanupDeletedDevice() + // will run after this call. If one instance is already running at + // the time of the call, it must be holding devices.Lock() and + // we will block on this lock till cleanup function exits. + devices.deletionWorkerTicker.Stop() + + devices.Lock() + // Save DeviceSet Metadata first. Docker kills all threads if they + // don't finish in certain time. It is possible that Shutdown() + // routine does not finish in time as we loop trying to deactivate + // some devices while these are busy. In that case shutdown() routine + // will be killed and we will not get a chance to save deviceset + // metadata. Hence save this early before trying to deactivate devices. + devices.saveDeviceSetMetaData() + + // ignore the error since it's just a best effort to not try to unmount something that's mounted + mounts, _ := mount.GetMounts() + mounted := make(map[string]bool, len(mounts)) + for _, mnt := range mounts { + mounted[mnt.Mountpoint] = true + } + + if err := filepath.Walk(path.Join(home, "mnt"), func(p string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if p == path.Join(home, "mnt") { + return nil + } + if !info.IsDir() { + return nil + } + + if mounted[p] { + // We use MNT_DETACH here in case it is still busy in some running + // container. This means it'll go away from the global scope directly, + // and the device will be released when that container dies. + if err := syscall.Unmount(p, syscall.MNT_DETACH); err != nil { + logrus.Debugf("devmapper: Shutdown unmounting %s, error: %s", p, err) + } + } + + if devInfo, err := devices.lookupDevice(path.Base(p)); err != nil { + logrus.Debugf("devmapper: Shutdown lookup device %s, error: %s", path.Base(p), err) + } else { + if err := devices.deactivateDevice(devInfo); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate %s , error: %s", devInfo.Hash, err) + } + } + + return nil + }); err != nil && !os.IsNotExist(err) { + devices.Unlock() + return err + } + + devices.Unlock() + + info, _ := devices.lookupDeviceWithLock("") + if info != nil { + info.lock.Lock() + devices.Lock() + if err := devices.deactivateDevice(info); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate base , error: %s", err) + } + devices.Unlock() + info.lock.Unlock() + } + + devices.Lock() + if devices.thinPoolDevice == "" { + if err := devices.deactivatePool(); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate pool , error: %s", err) + } + } + devices.Unlock() + + return nil +} + +// MountDevice mounts the device if not already mounted. +func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + if info.Deleted { + return fmt.Errorf("devmapper: Can't mount device %v as it has been marked for deferred deletion", info.Hash) + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) + } + + fstype, err := ProbeFsType(info.DevName()) + if err != nil { + return err + } + + options := "" + + if fstype == "xfs" { + // XFS needs nouuid or it can't mount filesystems with the same fs + options = joinMountOptions(options, "nouuid") + } + + options = joinMountOptions(options, devices.mountOptions) + options = joinMountOptions(options, label.FormatMountLabel("", mountLabel)) + + if err := mount.Mount(info.DevName(), path, fstype, options); err != nil { + return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s", info.DevName(), path, err) + } + + return nil +} + +// UnmountDevice unmounts the device and removes it from hash. +func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error { + logrus.Debugf("devmapper: UnmountDevice(hash=%s)", hash) + defer logrus.Debugf("devmapper: UnmountDevice(hash=%s) END", hash) + + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + logrus.Debugf("devmapper: Unmount(%s)", mountPath) + if err := syscall.Unmount(mountPath, syscall.MNT_DETACH); err != nil { + return err + } + logrus.Debug("devmapper: Unmount done") + + if err := devices.deactivateDevice(info); err != nil { + return err + } + + return nil +} + +// HasDevice returns true if the device metadata exists. +func (devices *DeviceSet) HasDevice(hash string) bool { + info, _ := devices.lookupDeviceWithLock(hash) + return info != nil +} + +// List returns a list of device ids. +func (devices *DeviceSet) List() []string { + devices.Lock() + defer devices.Unlock() + + ids := make([]string, len(devices.Devices)) + i := 0 + for k := range devices.Devices { + ids[i] = k + i++ + } + return ids +} + +func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) { + var params string + _, sizeInSectors, _, params, err = devicemapper.GetStatus(devName) + if err != nil { + return + } + if _, err = fmt.Sscanf(params, "%d %d", &mappedSectors, &highestMappedSector); err == nil { + return + } + return +} + +// GetDeviceStatus provides size, mapped sectors +func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return nil, err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + status := &DevStatus{ + DeviceID: info.DeviceID, + Size: info.Size, + TransactionID: info.TransactionID, + } + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return nil, fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) + } + + sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName()) + + if err != nil { + return nil, err + } + + status.SizeInSectors = sizeInSectors + status.MappedSectors = mappedSectors + status.HighestMappedSector = highestMappedSector + + return status, nil +} + +func (devices *DeviceSet) poolStatus() (totalSizeInSectors, transactionID, dataUsed, dataTotal, metadataUsed, metadataTotal uint64, err error) { + var params string + if _, totalSizeInSectors, _, params, err = devicemapper.GetStatus(devices.getPoolName()); err == nil { + _, err = fmt.Sscanf(params, "%d %d/%d %d/%d", &transactionID, &metadataUsed, &metadataTotal, &dataUsed, &dataTotal) + } + return +} + +// DataDevicePath returns the path to the data storage for this deviceset, +// regardless of loopback or block device +func (devices *DeviceSet) DataDevicePath() string { + return devices.dataDevice +} + +// MetadataDevicePath returns the path to the metadata storage for this deviceset, +// regardless of loopback or block device +func (devices *DeviceSet) MetadataDevicePath() string { + return devices.metadataDevice +} + +func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) { + buf := new(syscall.Statfs_t) + if err := syscall.Statfs(loopFile, buf); err != nil { + logrus.Warnf("devmapper: Couldn't stat loopfile filesystem %v: %v", loopFile, err) + return 0, err + } + return buf.Bfree * uint64(buf.Bsize), nil +} + +func (devices *DeviceSet) isRealFile(loopFile string) (bool, error) { + if loopFile != "" { + fi, err := os.Stat(loopFile) + if err != nil { + logrus.Warnf("devmapper: Couldn't stat loopfile %v: %v", loopFile, err) + return false, err + } + return fi.Mode().IsRegular(), nil + } + return false, nil +} + +// Status returns the current status of this deviceset +func (devices *DeviceSet) Status() *Status { + devices.Lock() + defer devices.Unlock() + + status := &Status{} + + status.PoolName = devices.getPoolName() + status.DataFile = devices.DataDevicePath() + status.DataLoopback = devices.dataLoopFile + status.MetadataFile = devices.MetadataDevicePath() + status.MetadataLoopback = devices.metadataLoopFile + status.UdevSyncSupported = devicemapper.UdevSyncSupported() + status.DeferredRemoveEnabled = devices.deferredRemove + status.DeferredDeleteEnabled = devices.deferredDelete + status.DeferredDeletedDeviceCount = devices.nrDeletedDevices + status.BaseDeviceSize = devices.getBaseDeviceSize() + status.BaseDeviceFS = devices.getBaseDeviceFS() + + totalSizeInSectors, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() + if err == nil { + // Convert from blocks to bytes + blockSizeInSectors := totalSizeInSectors / dataTotal + + status.Data.Used = dataUsed * blockSizeInSectors * 512 + status.Data.Total = dataTotal * blockSizeInSectors * 512 + status.Data.Available = status.Data.Total - status.Data.Used + + // metadata blocks are always 4k + status.Metadata.Used = metadataUsed * 4096 + status.Metadata.Total = metadataTotal * 4096 + status.Metadata.Available = status.Metadata.Total - status.Metadata.Used + + status.SectorSize = blockSizeInSectors * 512 + + if check, _ := devices.isRealFile(devices.dataLoopFile); check { + actualSpace, err := devices.getUnderlyingAvailableSpace(devices.dataLoopFile) + if err == nil && actualSpace < status.Data.Available { + status.Data.Available = actualSpace + } + } + + if check, _ := devices.isRealFile(devices.metadataLoopFile); check { + actualSpace, err := devices.getUnderlyingAvailableSpace(devices.metadataLoopFile) + if err == nil && actualSpace < status.Metadata.Available { + status.Metadata.Available = actualSpace + } + } + + minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100 + status.MinFreeSpace = minFreeData * blockSizeInSectors * 512 + } + + return status +} + +// Status returns the current status of this deviceset +func (devices *DeviceSet) exportDeviceMetadata(hash string) (*deviceMetadata, error) { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return nil, err + } + + info.lock.Lock() + defer info.lock.Unlock() + + metadata := &deviceMetadata{info.DeviceID, info.Size, info.Name()} + return metadata, nil +} + +// NewDeviceSet creates the device set based on the options provided. +func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps []idtools.IDMap) (*DeviceSet, error) { + devicemapper.SetDevDir("/dev") + + devices := &DeviceSet{ + root: root, + metaData: metaData{Devices: make(map[string]*devInfo)}, + dataLoopbackSize: defaultDataLoopbackSize, + metaDataLoopbackSize: defaultMetaDataLoopbackSize, + baseFsSize: defaultBaseFsSize, + overrideUdevSyncCheck: defaultUdevSyncOverride, + doBlkDiscard: true, + thinpBlockSize: defaultThinpBlockSize, + deviceIDMap: make([]byte, deviceIDMapSz), + deletionWorkerTicker: time.NewTicker(time.Second * 30), + uidMaps: uidMaps, + gidMaps: gidMaps, + minFreeSpacePercent: defaultMinFreeSpacePercent, + } + + // Pick up initialization settings, if any were saved before + defaultsFile := path.Join(root, "defaults") + defaultsBytes, err := ioutil.ReadFile(defaultsFile) + defaults := []string{} + settings := map[string]string{} + if err == nil && len(defaultsBytes) > 0 { + defaults = strings.Split(string(defaultsBytes), "\n") + } + + foundBlkDiscard := false + nthOption := 0 + for _, option := range append(defaults, options...) { + nthOption = nthOption + 1 + if len(option) == 0 { + continue + } + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case "dm.basesize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + userBaseSize = true + devices.baseFsSize = uint64(size) + case "dm.loopdatasize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.dataLoopbackSize = size + case "dm.loopmetadatasize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.metaDataLoopbackSize = size + case "dm.fs": + if val != "ext4" && val != "xfs" { + return nil, fmt.Errorf("devmapper: Unsupported filesystem %s\n", val) + } + devices.filesystem = val + case "dm.mkfsarg": + devices.mkfsArgs = append(devices.mkfsArgs, val) + case "dm.mountopt": + devices.mountOptions = joinMountOptions(devices.mountOptions, val) + case "dm.metadatadev": + devices.metadataDevice = val + case "dm.datadev": + devices.dataDevice = val + case "dm.thinpooldev": + devices.thinPoolDevice = strings.TrimPrefix(val, "/dev/mapper/") + case "dm.blkdiscard": + foundBlkDiscard = true + devices.doBlkDiscard, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + case "dm.blocksize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + // convert to 512b sectors + devices.thinpBlockSize = uint32(size) >> 9 + case "dm.override_udev_sync_check": + devices.overrideUdevSyncCheck, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.use_deferred_removal": + enableDeferredRemoval, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.use_deferred_deletion": + enableDeferredDeletion, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.min_free_space": + if !strings.HasSuffix(val, "%") { + return nil, fmt.Errorf("devmapper: Option dm.min_free_space requires %% suffix") + } + + valstring := strings.TrimSuffix(val, "%") + minFreeSpacePercent, err := strconv.ParseUint(valstring, 10, 32) + if err != nil { + return nil, err + } + + if minFreeSpacePercent >= 100 { + return nil, fmt.Errorf("devmapper: Invalid value %v for option dm.min_free_space", val) + } + + devices.minFreeSpacePercent = uint32(minFreeSpacePercent) + default: + if nthOption > len(defaults) { + return nil, fmt.Errorf("devmapper: Unknown option %s\n", key) + } + logrus.Errorf("devmapper: Unknown option %s, ignoring\n", key) + } + settings[key] = val + } + + // By default, don't do blk discard hack on raw devices, its rarely useful and is expensive + if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") { + devices.doBlkDiscard = false + } + + if err := devices.initDevmapper(doInit); err != nil { + return nil, err + } + + // Save these settings along with the other metadata + defaults = []string{} + for key, val := range settings { + defaults = append(defaults, key+"="+val) + } + defaultsBytes = []byte(strings.Join(defaults, "\n") + "\n") + if err := ioutils.AtomicWriteFile(defaultsFile, defaultsBytes, 0600); err != nil { + return nil, err + } + + return devices, nil +} diff --git a/vendor/src/github.com/containers/storage/drivers/devmapper/devmapper_doc.go b/vendor/src/github.com/containers/storage/drivers/devmapper/devmapper_doc.go new file mode 100644 index 00000000..9ab3e4f8 --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/devmapper/devmapper_doc.go @@ -0,0 +1,106 @@ +package devmapper + +// Definition of struct dm_task and sub structures (from lvm2) +// +// struct dm_ioctl { +// /* +// * The version number is made up of three parts: +// * major - no backward or forward compatibility, +// * minor - only backwards compatible, +// * patch - both backwards and forwards compatible. +// * +// * All clients of the ioctl interface should fill in the +// * version number of the interface that they were +// * compiled with. +// * +// * All recognized ioctl commands (ie. those that don't +// * return -ENOTTY) fill out this field, even if the +// * command failed. +// */ +// uint32_t version[3]; /* in/out */ +// uint32_t data_size; /* total size of data passed in +// * including this struct */ + +// uint32_t data_start; /* offset to start of data +// * relative to start of this struct */ + +// uint32_t target_count; /* in/out */ +// int32_t open_count; /* out */ +// uint32_t flags; /* in/out */ + +// /* +// * event_nr holds either the event number (input and output) or the +// * udev cookie value (input only). +// * The DM_DEV_WAIT ioctl takes an event number as input. +// * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls +// * use the field as a cookie to return in the DM_COOKIE +// * variable with the uevents they issue. +// * For output, the ioctls return the event number, not the cookie. +// */ +// uint32_t event_nr; /* in/out */ +// uint32_t padding; + +// uint64_t dev; /* in/out */ + +// char name[DM_NAME_LEN]; /* device name */ +// char uuid[DM_UUID_LEN]; /* unique identifier for +// * the block device */ +// char data[7]; /* padding or data */ +// }; + +// struct target { +// uint64_t start; +// uint64_t length; +// char *type; +// char *params; + +// struct target *next; +// }; + +// typedef enum { +// DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */ +// DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */ +// } dm_add_node_t; + +// struct dm_task { +// int type; +// char *dev_name; +// char *mangled_dev_name; + +// struct target *head, *tail; + +// int read_only; +// uint32_t event_nr; +// int major; +// int minor; +// int allow_default_major_fallback; +// uid_t uid; +// gid_t gid; +// mode_t mode; +// uint32_t read_ahead; +// uint32_t read_ahead_flags; +// union { +// struct dm_ioctl *v4; +// } dmi; +// char *newname; +// char *message; +// char *geometry; +// uint64_t sector; +// int no_flush; +// int no_open_count; +// int skip_lockfs; +// int query_inactive_table; +// int suppress_identical_reload; +// dm_add_node_t add_node; +// uint64_t existing_table_size; +// int cookie_set; +// int new_uuid; +// int secure_data; +// int retry_remove; +// int enable_checks; +// int expected_errno; + +// char *uuid; +// char *mangled_uuid; +// }; +// diff --git a/vendor/src/github.com/containers/storage/drivers/devmapper/driver.go b/vendor/src/github.com/containers/storage/drivers/devmapper/driver.go new file mode 100644 index 00000000..1fc3a7b3 --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/devmapper/driver.go @@ -0,0 +1,226 @@ +// +build linux + +package devmapper + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strconv" + + "github.com/Sirupsen/logrus" + + "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/devicemapper" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/mount" + "github.com/docker/go-units" +) + +func init() { + graphdriver.Register("devicemapper", Init) +} + +// Driver contains the device set mounted and the home directory +type Driver struct { + *DeviceSet + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter +} + +// Init creates a driver with the given home and the set of options. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + deviceSet, err := NewDeviceSet(home, true, options, uidMaps, gidMaps) + if err != nil { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + d := &Driver{ + DeviceSet: deviceSet, + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), + } + + return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil +} + +func (d *Driver) String() string { + return "devicemapper" +} + +// Status returns the status about the driver in a printable format. +// Information returned contains Pool Name, Data File, Metadata file, disk usage by +// the data and metadata, etc. +func (d *Driver) Status() [][2]string { + s := d.DeviceSet.Status() + + status := [][2]string{ + {"Pool Name", s.PoolName}, + {"Pool Blocksize", fmt.Sprintf("%s", units.HumanSize(float64(s.SectorSize)))}, + {"Base Device Size", fmt.Sprintf("%s", units.HumanSize(float64(s.BaseDeviceSize)))}, + {"Backing Filesystem", s.BaseDeviceFS}, + {"Data file", s.DataFile}, + {"Metadata file", s.MetadataFile}, + {"Data Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Used)))}, + {"Data Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Total)))}, + {"Data Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Available)))}, + {"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Used)))}, + {"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Total)))}, + {"Metadata Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Available)))}, + {"Thin Pool Minimum Free Space", fmt.Sprintf("%s", units.HumanSize(float64(s.MinFreeSpace)))}, + {"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)}, + {"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)}, + {"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)}, + {"Deferred Deleted Device Count", fmt.Sprintf("%v", s.DeferredDeletedDeviceCount)}, + } + if len(s.DataLoopback) > 0 { + status = append(status, [2]string{"Data loop file", s.DataLoopback}) + } + if len(s.MetadataLoopback) > 0 { + status = append(status, [2]string{"Metadata loop file", s.MetadataLoopback}) + } + if vStr, err := devicemapper.GetLibraryVersion(); err == nil { + status = append(status, [2]string{"Library Version", vStr}) + } + return status +} + +// GetMetadata returns a map of information about the device. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + m, err := d.DeviceSet.exportDeviceMetadata(id) + + if err != nil { + return nil, err + } + + metadata := make(map[string]string) + metadata["DeviceId"] = strconv.Itoa(m.deviceID) + metadata["DeviceSize"] = strconv.FormatUint(m.deviceSize, 10) + metadata["DeviceName"] = m.deviceName + return metadata, nil +} + +// Cleanup unmounts a device. +func (d *Driver) Cleanup() error { + err := d.DeviceSet.Shutdown(d.home) + + if err2 := mount.Unmount(d.home); err == nil { + err = err2 + } + + return err +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { + return d.Create(id, parent, mountLabel, storageOpt) +} + +// Create adds a device with a given id and the parent. +func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error { + if err := d.DeviceSet.AddDevice(id, parent, storageOpt); err != nil { + return err + } + + return nil +} + +// Remove removes a device with a given id, unmounts the filesystem. +func (d *Driver) Remove(id string) error { + if !d.DeviceSet.HasDevice(id) { + // Consider removing a non-existing device a no-op + // This is useful to be able to progress on container removal + // if the underlying device has gone away due to earlier errors + return nil + } + + // This assumes the device has been properly Get/Put:ed and thus is unmounted + if err := d.DeviceSet.DeleteDevice(id, false); err != nil { + return err + } + + mp := path.Join(d.home, "mnt", id) + if err := os.RemoveAll(mp); err != nil && !os.IsNotExist(err) { + return err + } + + return nil +} + +// Get mounts a device with given id into the root filesystem +func (d *Driver) Get(id, mountLabel string) (string, error) { + mp := path.Join(d.home, "mnt", id) + rootFs := path.Join(mp, "rootfs") + if count := d.ctr.Increment(mp); count > 1 { + return rootFs, nil + } + + uid, gid, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + d.ctr.Decrement(mp) + return "", err + } + + // Create the target directories if they don't exist + if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil && !os.IsExist(err) { + d.ctr.Decrement(mp) + return "", err + } + if err := idtools.MkdirAs(mp, 0755, uid, gid); err != nil && !os.IsExist(err) { + d.ctr.Decrement(mp) + return "", err + } + + // Mount the device + if err := d.DeviceSet.MountDevice(id, mp, mountLabel); err != nil { + d.ctr.Decrement(mp) + return "", err + } + + if err := idtools.MkdirAllAs(rootFs, 0755, uid, gid); err != nil && !os.IsExist(err) { + d.ctr.Decrement(mp) + d.DeviceSet.UnmountDevice(id, mp) + return "", err + } + + idFile := path.Join(mp, "id") + if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) { + // Create an "id" file with the container/image id in it to help reconstruct this in case + // of later problems + if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil { + d.ctr.Decrement(mp) + d.DeviceSet.UnmountDevice(id, mp) + return "", err + } + } + + return rootFs, nil +} + +// Put unmounts a device and removes it. +func (d *Driver) Put(id string) error { + mp := path.Join(d.home, "mnt", id) + if count := d.ctr.Decrement(mp); count > 0 { + return nil + } + err := d.DeviceSet.UnmountDevice(id, mp) + if err != nil { + logrus.Errorf("devmapper: Error unmounting device %s: %s", id, err) + } + return err +} + +// Exists checks to see if the device exists. +func (d *Driver) Exists(id string) bool { + return d.DeviceSet.HasDevice(id) +} diff --git a/vendor/src/github.com/containers/storage/drivers/devmapper/mount.go b/vendor/src/github.com/containers/storage/drivers/devmapper/mount.go new file mode 100644 index 00000000..cca1fe1b --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/devmapper/mount.go @@ -0,0 +1,89 @@ +// +build linux + +package devmapper + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "syscall" +) + +// FIXME: this is copy-pasted from the aufs driver. +// It should be moved into the core. + +// Mounted returns true if a mount point exists. +func Mounted(mountpoint string) (bool, error) { + mntpoint, err := os.Stat(mountpoint) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + parent, err := os.Stat(filepath.Join(mountpoint, "..")) + if err != nil { + return false, err + } + mntpointSt := mntpoint.Sys().(*syscall.Stat_t) + parentSt := parent.Sys().(*syscall.Stat_t) + return mntpointSt.Dev != parentSt.Dev, nil +} + +type probeData struct { + fsName string + magic string + offset uint64 +} + +// ProbeFsType returns the filesystem name for the given device id. +func ProbeFsType(device string) (string, error) { + probes := []probeData{ + {"btrfs", "_BHRfS_M", 0x10040}, + {"ext4", "\123\357", 0x438}, + {"xfs", "XFSB", 0}, + } + + maxLen := uint64(0) + for _, p := range probes { + l := p.offset + uint64(len(p.magic)) + if l > maxLen { + maxLen = l + } + } + + file, err := os.Open(device) + if err != nil { + return "", err + } + defer file.Close() + + buffer := make([]byte, maxLen) + l, err := file.Read(buffer) + if err != nil { + return "", err + } + + if uint64(l) != maxLen { + return "", fmt.Errorf("devmapper: unable to detect filesystem type of %s, short read", device) + } + + for _, p := range probes { + if bytes.Equal([]byte(p.magic), buffer[p.offset:p.offset+uint64(len(p.magic))]) { + return p.fsName, nil + } + } + + return "", fmt.Errorf("devmapper: Unknown filesystem type on %s", device) +} + +func joinMountOptions(a, b string) string { + if a == "" { + return b + } + if b == "" { + return a + } + return a + "," + b +} diff --git a/vendor/src/github.com/containers/storage/drivers/driver.go b/vendor/src/github.com/containers/storage/drivers/driver.go new file mode 100644 index 00000000..e267f618 --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/driver.go @@ -0,0 +1,243 @@ +package graphdriver + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" +) + +// FsMagic unsigned id of the filesystem in use. +type FsMagic uint32 + +const ( + // FsMagicUnsupported is a predefined constant value other than a valid filesystem id. + FsMagicUnsupported = FsMagic(0x00000000) +) + +var ( + // All registered drivers + drivers map[string]InitFunc + + // ErrNotSupported returned when driver is not supported. + ErrNotSupported = errors.New("driver not supported") + // ErrPrerequisites retuned when driver does not meet prerequisites. + ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)") + // ErrIncompatibleFS returned when file system is not supported. + ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver") +) + +// InitFunc initializes the storage driver. +type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) + +// ProtoDriver defines the basic capabilities of a driver. +// This interface exists solely to be a minimum set of methods +// for client code which choose not to implement the entire Driver +// interface and use the NaiveDiffDriver wrapper constructor. +// +// Use of ProtoDriver directly by client code is not recommended. +type ProtoDriver interface { + // String returns a string representation of this driver. + String() string + // CreateReadWrite creates a new, empty filesystem layer that is ready + // to be used as the storage for a container. + CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error + // Create creates a new, empty, filesystem layer with the + // specified id and parent and mountLabel. Parent and mountLabel may be "". + Create(id, parent, mountLabel string, storageOpt map[string]string) error + // Remove attempts to remove the filesystem layer with this id. + Remove(id string) error + // Get returns the mountpoint for the layered filesystem referred + // to by this id. You can optionally specify a mountLabel or "". + // Returns the absolute path to the mounted layered filesystem. + Get(id, mountLabel string) (dir string, err error) + // Put releases the system resources for the specified id, + // e.g, unmounting layered filesystem. + Put(id string) error + // Exists returns whether a filesystem layer with the specified + // ID exists on this driver. + Exists(id string) bool + // Status returns a set of key-value pairs which give low + // level diagnostic status about this driver. + Status() [][2]string + // Returns a set of key-value pairs which give low level information + // about the image/container driver is managing. + GetMetadata(id string) (map[string]string, error) + // Cleanup performs necessary tasks to release resources + // held by the driver, e.g., unmounting all layered filesystems + // known to this driver. + Cleanup() error +} + +// Driver is the interface for layered/snapshot file system drivers. +type Driver interface { + ProtoDriver + // Diff produces an archive of the changes between the specified + // layer and its parent layer which may be "". + Diff(id, parent string) (archive.Archive, error) + // Changes produces a list of changes between the specified layer + // and its parent layer. If parent is "", then all changes will be ADD changes. + Changes(id, parent string) ([]archive.Change, error) + // ApplyDiff extracts the changeset from the given diff into the + // layer with the specified id and parent, returning the size of the + // new layer in bytes. + // The archive.Reader must be an uncompressed stream. + ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) + // DiffSize calculates the changes between the specified id + // and its parent and returns the size in bytes of the changes + // relative to its base filesystem directory. + DiffSize(id, parent string) (size int64, err error) +} + +// DiffGetterDriver is the interface for layered file system drivers that +// provide a specialized function for getting file contents for tar-split. +type DiffGetterDriver interface { + Driver + // DiffGetter returns an interface to efficiently retrieve the contents + // of files in a layer. + DiffGetter(id string) (FileGetCloser, error) +} + +// FileGetCloser extends the storage.FileGetter interface with a Close method +// for cleaning up. +type FileGetCloser interface { + storage.FileGetter + // Close cleans up any resources associated with the FileGetCloser. + Close() error +} + +// Checker makes checks on specified filesystems. +type Checker interface { + // IsMounted returns true if the provided path is mounted for the specific checker + IsMounted(path string) bool +} + +func init() { + drivers = make(map[string]InitFunc) +} + +// Register registers an InitFunc for the driver. +func Register(name string, initFunc InitFunc) error { + if _, exists := drivers[name]; exists { + return fmt.Errorf("Name already registered %s", name) + } + drivers[name] = initFunc + + return nil +} + +// GetDriver initializes and returns the registered driver +func GetDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps) + } + if pluginDriver, err := lookupPlugin(name, home, options); err == nil { + return pluginDriver, nil + } + logrus.Errorf("Failed to GetDriver graph %s %s", name, home) + return nil, ErrNotSupported +} + +// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins +func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps) + } + logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home) + return nil, ErrNotSupported +} + +// New creates the driver and initializes it at the specified root. +func New(root string, name string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { + if name != "" { + logrus.Debugf("[graphdriver] trying provided driver %q", name) // so the logs show specified driver + return GetDriver(name, root, options, uidMaps, gidMaps) + } + + // Guess for prior driver + driversMap := scanPriorDrivers(root) + for _, name := range priority { + if name == "vfs" { + // don't use vfs even if there is state present. + continue + } + if _, prior := driversMap[name]; prior { + // of the state found from prior drivers, check in order of our priority + // which we would prefer + driver, err := getBuiltinDriver(name, root, options, uidMaps, gidMaps) + if err != nil { + // unlike below, we will return error here, because there is prior + // state, and now it is no longer supported/prereq/compatible, so + // something changed and needs attention. Otherwise the daemon's + // images would just "disappear". + logrus.Errorf("[graphdriver] prior storage driver %q failed: %s", name, err) + return nil, err + } + + // abort starting when there are other prior configured drivers + // to ensure the user explicitly selects the driver to load + if len(driversMap)-1 > 0 { + var driversSlice []string + for name := range driversMap { + driversSlice = append(driversSlice, name) + } + + return nil, fmt.Errorf("%q contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s )", root, strings.Join(driversSlice, ", ")) + } + + logrus.Infof("[graphdriver] using prior storage driver %q", name) + return driver, nil + } + } + + // Check for priority drivers first + for _, name := range priority { + driver, err := getBuiltinDriver(name, root, options, uidMaps, gidMaps) + if err != nil { + if isDriverNotSupported(err) { + continue + } + return nil, err + } + return driver, nil + } + + // Check all registered drivers if no priority driver is found + for name, initFunc := range drivers { + driver, err := initFunc(filepath.Join(root, name), options, uidMaps, gidMaps) + if err != nil { + if isDriverNotSupported(err) { + continue + } + return nil, err + } + return driver, nil + } + return nil, fmt.Errorf("No supported storage backend found") +} + +// isDriverNotSupported returns true if the error initializing +// the graph driver is a non-supported error. +func isDriverNotSupported(err error) bool { + return err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS +} + +// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers +func scanPriorDrivers(root string) map[string]bool { + driversMap := make(map[string]bool) + + for driver := range drivers { + p := filepath.Join(root, driver) + if _, err := os.Stat(p); err == nil && driver != "vfs" { + driversMap[driver] = true + } + } + return driversMap +} diff --git a/vendor/src/github.com/containers/storage/drivers/driver_freebsd.go b/vendor/src/github.com/containers/storage/drivers/driver_freebsd.go new file mode 100644 index 00000000..2891a84f --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/driver_freebsd.go @@ -0,0 +1,19 @@ +package graphdriver + +import "syscall" + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "zfs", + } +) + +// Mounted checks if the given path is mounted as the fs type +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + var buf syscall.Statfs_t + if err := syscall.Statfs(mountPath, &buf); err != nil { + return false, err + } + return FsMagic(buf.Type) == fsType, nil +} diff --git a/vendor/src/github.com/containers/storage/drivers/driver_linux.go b/vendor/src/github.com/containers/storage/drivers/driver_linux.go new file mode 100644 index 00000000..2b421e32 --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/driver_linux.go @@ -0,0 +1,133 @@ +// +build linux + +package graphdriver + +import ( + "path/filepath" + "syscall" + + "github.com/containers/storage/pkg/mount" +) + +const ( + // FsMagicAufs filesystem id for Aufs + FsMagicAufs = FsMagic(0x61756673) + // FsMagicBtrfs filesystem id for Btrfs + FsMagicBtrfs = FsMagic(0x9123683E) + // FsMagicCramfs filesystem id for Cramfs + FsMagicCramfs = FsMagic(0x28cd3d45) + // FsMagicEcryptfs filesystem id for eCryptfs + FsMagicEcryptfs = FsMagic(0xf15f) + // FsMagicExtfs filesystem id for Extfs + FsMagicExtfs = FsMagic(0x0000EF53) + // FsMagicF2fs filesystem id for F2fs + FsMagicF2fs = FsMagic(0xF2F52010) + // FsMagicGPFS filesystem id for GPFS + FsMagicGPFS = FsMagic(0x47504653) + // FsMagicJffs2Fs filesystem if for Jffs2Fs + FsMagicJffs2Fs = FsMagic(0x000072b6) + // FsMagicJfs filesystem id for Jfs + FsMagicJfs = FsMagic(0x3153464a) + // FsMagicNfsFs filesystem id for NfsFs + FsMagicNfsFs = FsMagic(0x00006969) + // FsMagicRAMFs filesystem id for RamFs + FsMagicRAMFs = FsMagic(0x858458f6) + // FsMagicReiserFs filesystem id for ReiserFs + FsMagicReiserFs = FsMagic(0x52654973) + // FsMagicSmbFs filesystem id for SmbFs + FsMagicSmbFs = FsMagic(0x0000517B) + // FsMagicSquashFs filesystem id for SquashFs + FsMagicSquashFs = FsMagic(0x73717368) + // FsMagicTmpFs filesystem id for TmpFs + FsMagicTmpFs = FsMagic(0x01021994) + // FsMagicVxFS filesystem id for VxFs + FsMagicVxFS = FsMagic(0xa501fcf5) + // FsMagicXfs filesystem id for Xfs + FsMagicXfs = FsMagic(0x58465342) + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) + // FsMagicOverlay filesystem id for overlay + FsMagicOverlay = FsMagic(0x794C7630) +) + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "aufs", + "btrfs", + "zfs", + "devicemapper", + "overlay", + "vfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicAufs: "aufs", + FsMagicBtrfs: "btrfs", + FsMagicCramfs: "cramfs", + FsMagicExtfs: "extfs", + FsMagicF2fs: "f2fs", + FsMagicGPFS: "gpfs", + FsMagicJffs2Fs: "jffs2", + FsMagicJfs: "jfs", + FsMagicNfsFs: "nfs", + FsMagicRAMFs: "ramfs", + FsMagicReiserFs: "reiserfs", + FsMagicSmbFs: "smb", + FsMagicSquashFs: "squashfs", + FsMagicTmpFs: "tmpfs", + FsMagicUnsupported: "unsupported", + FsMagicVxFS: "vxfs", + FsMagicXfs: "xfs", + FsMagicZfs: "zfs", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + var buf syscall.Statfs_t + if err := syscall.Statfs(filepath.Dir(rootpath), &buf); err != nil { + return 0, err + } + return FsMagic(buf.Type), nil +} + +// NewFsChecker returns a checker configured for the provied FsMagic +func NewFsChecker(t FsMagic) Checker { + return &fsChecker{ + t: t, + } +} + +type fsChecker struct { + t FsMagic +} + +func (c *fsChecker) IsMounted(path string) bool { + m, _ := Mounted(c.t, path) + return m +} + +// NewDefaultChecker returns a check that parses /proc/mountinfo to check +// if the specified path is mounted. +func NewDefaultChecker() Checker { + return &defaultChecker{} +} + +type defaultChecker struct { +} + +func (c *defaultChecker) IsMounted(path string) bool { + m, _ := mount.Mounted(path) + return m +} + +// Mounted checks if the given path is mounted as the fs type +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + var buf syscall.Statfs_t + if err := syscall.Statfs(mountPath, &buf); err != nil { + return false, err + } + return FsMagic(buf.Type) == fsType, nil +} diff --git a/vendor/src/github.com/containers/storage/drivers/driver_solaris.go b/vendor/src/github.com/containers/storage/drivers/driver_solaris.go new file mode 100644 index 00000000..29719ffa --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/driver_solaris.go @@ -0,0 +1,65 @@ +// +build solaris,cgo + +package graphdriver + +/* +#include +#include + +static inline struct statvfs *getstatfs(char *s) { + struct statvfs *buf; + int err; + buf = (struct statvfs *)malloc(sizeof(struct statvfs)); + err = statvfs(s, buf); + return buf; +} +*/ +import "C" +import ( + "path/filepath" + "unsafe" + + log "github.com/Sirupsen/logrus" +) + +const ( + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) +) + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "zfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicZfs: "zfs", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + return 0, nil +} + +// Mounted checks if the given path is mounted as the fs type +//Solaris supports only ZFS for now +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + + cs := C.CString(filepath.Dir(mountPath)) + buf := C.getstatfs(cs) + + // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] + if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || + (buf.f_basetype[3] != 0) { + log.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath) + C.free(unsafe.Pointer(buf)) + return false, ErrPrerequisites + } + + C.free(unsafe.Pointer(buf)) + C.free(unsafe.Pointer(cs)) + return true, nil +} diff --git a/vendor/src/github.com/containers/storage/drivers/driver_unsupported.go b/vendor/src/github.com/containers/storage/drivers/driver_unsupported.go new file mode 100644 index 00000000..4a875608 --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/driver_unsupported.go @@ -0,0 +1,15 @@ +// +build !linux,!windows,!freebsd,!solaris + +package graphdriver + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "unsupported", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + return FsMagicUnsupported, nil +} diff --git a/vendor/src/github.com/containers/storage/drivers/driver_windows.go b/vendor/src/github.com/containers/storage/drivers/driver_windows.go new file mode 100644 index 00000000..ffd30c29 --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/driver_windows.go @@ -0,0 +1,14 @@ +package graphdriver + +var ( + // Slice of drivers that should be used in order + priority = []string{ + "windowsfilter", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + // Note it is OK to return FsMagicUnsupported on Windows. + return FsMagicUnsupported, nil +} diff --git a/vendor/src/github.com/containers/storage/drivers/fsdiff.go b/vendor/src/github.com/containers/storage/drivers/fsdiff.go new file mode 100644 index 00000000..0f5b4fe4 --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/fsdiff.go @@ -0,0 +1,157 @@ +package graphdriver + +import ( + "time" + + "github.com/Sirupsen/logrus" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/chrootarchive" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" +) + +var ( + // ApplyUncompressedLayer defines the unpack method used by the graph + // driver. + ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer +) + +// NaiveDiffDriver takes a ProtoDriver and adds the +// capability of the Diffing methods which it may or may not +// support on its own. See the comment on the exported +// NewNaiveDiffDriver function below. +// Notably, the AUFS driver doesn't need to be wrapped like this. +type NaiveDiffDriver struct { + ProtoDriver + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap +} + +// NewNaiveDiffDriver returns a fully functional driver that wraps the +// given ProtoDriver and adds the capability of the following methods which +// it may or may not support on its own: +// Diff(id, parent string) (archive.Archive, error) +// Changes(id, parent string) ([]archive.Change, error) +// ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) +// DiffSize(id, parent string) (size int64, err error) +func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver { + gdw := &NaiveDiffDriver{ + ProtoDriver: driver, + uidMaps: uidMaps, + gidMaps: gidMaps, + } + return gdw +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch archive.Archive, err error) { + layerFs, err := gdw.Get(id, "") + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + gdw.Put(id) + } + }() + + if parent == "" { + archive, err := archive.Tar(layerFs, archive.Uncompressed) + if err != nil { + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + gdw.Put(id) + return err + }), nil + } + + parentFs, err := gdw.Get(parent, "") + if err != nil { + return nil, err + } + defer gdw.Put(parent) + + changes, err := archive.ChangesDirs(layerFs, parentFs) + if err != nil { + return nil, err + } + + archive, err := archive.ExportChanges(layerFs, changes, gdw.uidMaps, gdw.gidMaps) + if err != nil { + return nil, err + } + + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + gdw.Put(id) + return err + }), nil +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) { + layerFs, err := gdw.Get(id, "") + if err != nil { + return nil, err + } + defer gdw.Put(id) + + parentFs := "" + + if parent != "" { + parentFs, err = gdw.Get(parent, "") + if err != nil { + return nil, err + } + defer gdw.Put(parent) + } + + return archive.ChangesDirs(layerFs, parentFs) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) { + // Mount the root filesystem so we can apply the diff/layer. + layerFs, err := gdw.Get(id, "") + if err != nil { + return + } + defer gdw.Put(id) + + options := &archive.TarOptions{UIDMaps: gdw.uidMaps, + GIDMaps: gdw.gidMaps} + start := time.Now().UTC() + logrus.Debug("Start untar layer") + if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil { + return + } + logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) + + return +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) { + changes, err := gdw.Changes(id, parent) + if err != nil { + return + } + + layerFs, err := gdw.Get(id, "") + if err != nil { + return + } + defer gdw.Put(id) + + return archive.ChangesSize(layerFs, changes), nil +} diff --git a/vendor/src/github.com/containers/storage/drivers/overlay/copy.go b/vendor/src/github.com/containers/storage/drivers/overlay/copy.go new file mode 100644 index 00000000..703d51a8 --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/overlay/copy.go @@ -0,0 +1,169 @@ +// +build linux + +package overlay + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + "time" + + "github.com/containers/storage/pkg/pools" + "github.com/containers/storage/pkg/system" +) + +type copyFlags int + +const ( + copyHardlink copyFlags = 1 << iota +) + +func copyRegular(srcPath, dstPath string, mode os.FileMode) error { + srcFile, err := os.Open(srcPath) + if err != nil { + return err + } + defer srcFile.Close() + + dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE, mode) + if err != nil { + return err + } + defer dstFile.Close() + + _, err = pools.Copy(dstFile, srcFile) + + return err +} + +func copyXattr(srcPath, dstPath, attr string) error { + data, err := system.Lgetxattr(srcPath, attr) + if err != nil { + return err + } + if data != nil { + if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil { + return err + } + } + return nil +} + +func copyDir(srcDir, dstDir string, flags copyFlags) error { + err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(srcDir, srcPath) + if err != nil { + return err + } + + dstPath := filepath.Join(dstDir, relPath) + if err != nil { + return err + } + + stat, ok := f.Sys().(*syscall.Stat_t) + if !ok { + return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath) + } + + isHardlink := false + + switch f.Mode() & os.ModeType { + case 0: // Regular file + if flags©Hardlink != 0 { + isHardlink = true + if err := os.Link(srcPath, dstPath); err != nil { + return err + } + } else { + if err := copyRegular(srcPath, dstPath, f.Mode()); err != nil { + return err + } + } + + case os.ModeDir: + if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) { + return err + } + + case os.ModeSymlink: + link, err := os.Readlink(srcPath) + if err != nil { + return err + } + + if err := os.Symlink(link, dstPath); err != nil { + return err + } + + case os.ModeNamedPipe: + fallthrough + case os.ModeSocket: + if err := syscall.Mkfifo(dstPath, stat.Mode); err != nil { + return err + } + + case os.ModeDevice: + if err := syscall.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil { + return err + } + + default: + return fmt.Errorf("Unknown file type for %s\n", srcPath) + } + + // Everything below is copying metadata from src to dst. All this metadata + // already shares an inode for hardlinks. + if isHardlink { + return nil + } + + if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil { + return err + } + + if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil { + return err + } + + // We need to copy this attribute if it appears in an overlay upper layer, as + // this function is used to copy those. It is set by overlay if a directory + // is removed and then re-created and should not inherit anything from the + // same dir in the lower dir. + if err := copyXattr(srcPath, dstPath, "trusted.overlay.opaque"); err != nil { + return err + } + + isSymlink := f.Mode()&os.ModeSymlink != 0 + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if !isSymlink { + if err := os.Chmod(dstPath, f.Mode()); err != nil { + return err + } + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if !isSymlink { + aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec)) + if err := system.Chtimes(dstPath, aTime, mTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{stat.Atim, stat.Mtim} + if err := system.LUtimesNano(dstPath, ts); err != nil { + return err + } + } + return nil + }) + return err +} diff --git a/vendor/src/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/src/github.com/containers/storage/drivers/overlay/overlay.go new file mode 100644 index 00000000..ec4aed46 --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/overlay/overlay.go @@ -0,0 +1,450 @@ +// +build linux + +package overlay + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path" + "syscall" + + "github.com/Sirupsen/logrus" + + "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" + + "github.com/containers/storage/pkg/mount" + "github.com/opencontainers/runc/libcontainer/label" +) + +// This is a small wrapper over the NaiveDiffWriter that lets us have a custom +// implementation of ApplyDiff() + +var ( + // ErrApplyDiffFallback is returned to indicate that a normal ApplyDiff is applied as a fallback from Naive diff writer. + ErrApplyDiffFallback = fmt.Errorf("Fall back to normal ApplyDiff") + backingFs = "" +) + +// ApplyDiffProtoDriver wraps the ProtoDriver by extending the interface with ApplyDiff method. +type ApplyDiffProtoDriver interface { + graphdriver.ProtoDriver + // ApplyDiff writes the diff to the archive for the given id and parent id. + // It returns the size in bytes written if successful, an error ErrApplyDiffFallback is returned otherwise. + ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) +} + +type naiveDiffDriverWithApply struct { + graphdriver.Driver + applyDiff ApplyDiffProtoDriver +} + +// NaiveDiffDriverWithApply returns a NaiveDiff driver with custom ApplyDiff. +func NaiveDiffDriverWithApply(driver ApplyDiffProtoDriver, uidMaps, gidMaps []idtools.IDMap) graphdriver.Driver { + return &naiveDiffDriverWithApply{ + Driver: graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), + applyDiff: driver, + } +} + +// ApplyDiff creates a diff layer with either the NaiveDiffDriver or with a fallback. +func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) { + b, err := d.applyDiff.ApplyDiff(id, parent, diff) + if err == ErrApplyDiffFallback { + return d.Driver.ApplyDiff(id, parent, diff) + } + return b, err +} + +// This backend uses the overlay union filesystem for containers +// plus hard link file sharing for images. + +// Each container/image can have a "root" subdirectory which is a plain +// filesystem hierarchy, or they can use overlay. + +// If they use overlay there is a "upper" directory and a "lower-id" +// file, as well as "merged" and "work" directories. The "upper" +// directory has the upper layer of the overlay, and "lower-id" contains +// the id of the parent whose "root" directory shall be used as the lower +// layer in the overlay. The overlay itself is mounted in the "merged" +// directory, and the "work" dir is needed for overlay to work. + +// When an overlay layer is created there are two cases, either the +// parent has a "root" dir, then we start out with an empty "upper" +// directory overlaid on the parents root. This is typically the +// case with the init layer of a container which is based on an image. +// If there is no "root" in the parent, we inherit the lower-id from +// the parent and start by making a copy in the parent's "upper" dir. +// This is typically the case for a container layer which copies +// its parent -init upper layer. + +// Additionally we also have a custom implementation of ApplyLayer +// which makes a recursive copy of the parent "root" layer using +// hardlinks to share file data, and then applies the layer on top +// of that. This means all child images share file (but not directory) +// data with the parent. + +// Driver contains information about the home directory and the list of active mounts that are created using this driver. +type Driver struct { + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter +} + +func init() { + graphdriver.Register("overlay", Init) +} + +// Init returns the NaiveDiffDriver, a native diff driver for overlay filesystem. +// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error. +// If an overlay filesystem is not supported over an existing filesystem then error graphdriver.ErrIncompatibleFS is returned. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + + if err := supportsOverlay(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + switch fsMagic { + case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicZfs, graphdriver.FsMagicEcryptfs: + logrus.Errorf("'overlay' is not supported over %s", backingFs) + return nil, graphdriver.ErrIncompatibleFS + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + // Create the driver home dir + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + d := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), + } + + return NaiveDiffDriverWithApply(d, uidMaps, gidMaps), nil +} + +func supportsOverlay() error { + // We can try to modprobe overlay first before looking at + // proc/filesystems for when overlay is supported + exec.Command("modprobe", "overlay").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if s.Text() == "nodev\toverlay" { + return nil + } + } + logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") + return graphdriver.ErrNotSupported +} + +func (d *Driver) String() string { + return "overlay" +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Backing Filesystem" used in this implementation. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"Backing Filesystem", backingFs}, + } +} + +// GetMetadata returns meta data about the overlay driver such as root, LowerDir, UpperDir, WorkDir and MergeDir used to store data. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return nil, err + } + + metadata := make(map[string]string) + + // If id has a root, it is an image + rootDir := path.Join(dir, "root") + if _, err := os.Stat(rootDir); err == nil { + metadata["RootDir"] = rootDir + return metadata, nil + } + + lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) + if err != nil { + return nil, err + } + + metadata["LowerDir"] = path.Join(d.dir(string(lowerID)), "root") + metadata["UpperDir"] = path.Join(dir, "upper") + metadata["WorkDir"] = path.Join(dir, "work") + metadata["MergedDir"] = path.Join(dir, "merged") + + return metadata, nil +} + +// Cleanup any state created by overlay which should be cleaned when daemon +// is being shutdown. For now, we just have to unmount the bind mounted +// we had created. +func (d *Driver) Cleanup() error { + return mount.Unmount(d.home) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { + return d.Create(id, parent, mountLabel, storageOpt) +} + +// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. +// The parent filesystem is used to configure these directories for the overlay. +func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) (retErr error) { + + if len(storageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for overlay") + } + + dir := d.dir(id) + + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil { + return err + } + + defer func() { + // Clean up on failure + if retErr != nil { + os.RemoveAll(dir) + } + }() + + // Toplevel images are just a "root" dir + if parent == "" { + if err := idtools.MkdirAs(path.Join(dir, "root"), 0755, rootUID, rootGID); err != nil { + return err + } + return nil + } + + parentDir := d.dir(parent) + + // Ensure parent exists + if _, err := os.Lstat(parentDir); err != nil { + return err + } + + // If parent has a root, just do an overlay to it + parentRoot := path.Join(parentDir, "root") + + if s, err := os.Lstat(parentRoot); err == nil { + if err := idtools.MkdirAs(path.Join(dir, "upper"), s.Mode(), rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(dir, "lower-id"), []byte(parent), 0666); err != nil { + return err + } + return nil + } + + // Otherwise, copy the upper and the lower-id from the parent + + lowerID, err := ioutil.ReadFile(path.Join(parentDir, "lower-id")) + if err != nil { + return err + } + + if err := ioutil.WriteFile(path.Join(dir, "lower-id"), lowerID, 0666); err != nil { + return err + } + + parentUpperDir := path.Join(parentDir, "upper") + s, err := os.Lstat(parentUpperDir) + if err != nil { + return err + } + + upperDir := path.Join(dir, "upper") + if err := idtools.MkdirAs(upperDir, s.Mode(), rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + return err + } + + return copyDir(parentUpperDir, upperDir, 0) +} + +func (d *Driver) dir(id string) string { + return path.Join(d.home, id) +} + +// Remove cleans the directories that are created for this id. +func (d *Driver) Remove(id string) error { + if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// Get creates and mounts the required file system for the given id and returns the mount path. +func (d *Driver) Get(id string, mountLabel string) (s string, err error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return "", err + } + // If id has a root, just return it + rootDir := path.Join(dir, "root") + if _, err := os.Stat(rootDir); err == nil { + return rootDir, nil + } + mergedDir := path.Join(dir, "merged") + if count := d.ctr.Increment(mergedDir); count > 1 { + return mergedDir, nil + } + defer func() { + if err != nil { + if c := d.ctr.Decrement(mergedDir); c <= 0 { + syscall.Unmount(mergedDir, 0) + } + } + }() + lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) + if err != nil { + return "", err + } + var ( + lowerDir = path.Join(d.dir(string(lowerID)), "root") + upperDir = path.Join(dir, "upper") + workDir = path.Join(dir, "work") + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir) + ) + if err := syscall.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil { + return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) + } + // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a + // user namespace requires this to move a directory from lower to upper. + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return "", err + } + if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil { + return "", err + } + return mergedDir, nil +} + +// Put unmounts the mount path created for the give id. +func (d *Driver) Put(id string) error { + mountpoint := path.Join(d.dir(id), "merged") + if count := d.ctr.Decrement(mountpoint); count > 0 { + return nil + } + err := syscall.Unmount(mountpoint, 0) + if err != nil { + rootDir := path.Join(d.dir(id), "root") + if _, err := os.Stat(rootDir); err == nil { + // We weren't mounting a "merged" directory anyway + return nil + } + logrus.Debugf("Failed to unmount %s overlay: %v", id, err) + } + return err +} + +// ApplyDiff applies the new layer on top of the root, if parent does not exist with will return an ErrApplyDiffFallback error. +func (d *Driver) ApplyDiff(id string, parent string, diff archive.Reader) (size int64, err error) { + dir := d.dir(id) + + if parent == "" { + return 0, ErrApplyDiffFallback + } + + parentRootDir := path.Join(d.dir(parent), "root") + if _, err := os.Stat(parentRootDir); err != nil { + return 0, ErrApplyDiffFallback + } + + // We now know there is a parent, and it has a "root" directory containing + // the full root filesystem. We can just hardlink it and apply the + // layer. This relies on two things: + // 1) ApplyDiff is only run once on a clean (no writes to upper layer) container + // 2) ApplyDiff doesn't do any in-place writes to files (would break hardlinks) + // These are all currently true and are not expected to break + + tmpRootDir, err := ioutil.TempDir(dir, "tmproot") + if err != nil { + return 0, err + } + defer func() { + if err != nil { + os.RemoveAll(tmpRootDir) + } else { + os.RemoveAll(path.Join(dir, "upper")) + os.RemoveAll(path.Join(dir, "work")) + os.RemoveAll(path.Join(dir, "merged")) + os.RemoveAll(path.Join(dir, "lower-id")) + } + }() + + if err = copyDir(parentRootDir, tmpRootDir, copyHardlink); err != nil { + return 0, err + } + + options := &archive.TarOptions{UIDMaps: d.uidMaps, GIDMaps: d.gidMaps} + if size, err = graphdriver.ApplyUncompressedLayer(tmpRootDir, diff, options); err != nil { + return 0, err + } + + rootDir := path.Join(dir, "root") + if err := os.Rename(tmpRootDir, rootDir); err != nil { + return 0, err + } + + return +} + +// Exists checks to see if the id is already mounted. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} diff --git a/vendor/src/github.com/containers/storage/drivers/overlay/overlay_unsupported.go b/vendor/src/github.com/containers/storage/drivers/overlay/overlay_unsupported.go new file mode 100644 index 00000000..3dbb4de4 --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/overlay/overlay_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package overlay diff --git a/vendor/src/github.com/containers/storage/drivers/overlay2/mount.go b/vendor/src/github.com/containers/storage/drivers/overlay2/mount.go new file mode 100644 index 00000000..73bf9264 --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/overlay2/mount.go @@ -0,0 +1,88 @@ +// +build linux + +package overlay2 + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "os" + "runtime" + "syscall" + + "github.com/containers/storage/pkg/reexec" +) + +func init() { + reexec.Register("docker-mountfrom", mountFromMain) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} + +type mountOptions struct { + Device string + Target string + Type string + Label string + Flag uint32 +} + +func mountFrom(dir, device, target, mType, label string) error { + options := &mountOptions{ + Device: device, + Target: target, + Type: mType, + Flag: 0, + Label: label, + } + + cmd := reexec.Command("docker-mountfrom", dir) + w, err := cmd.StdinPipe() + if err != nil { + return fmt.Errorf("mountfrom error on pipe creation: %v", err) + } + + output := bytes.NewBuffer(nil) + cmd.Stdout = output + cmd.Stderr = output + + if err := cmd.Start(); err != nil { + return fmt.Errorf("mountfrom error on re-exec cmd: %v", err) + } + //write the options to the pipe for the untar exec to read + if err := json.NewEncoder(w).Encode(options); err != nil { + return fmt.Errorf("mountfrom json encode to pipe failed: %v", err) + } + w.Close() + + if err := cmd.Wait(); err != nil { + return fmt.Errorf("mountfrom re-exec error: %v: output: %s", err, output) + } + return nil +} + +// mountfromMain is the entry-point for docker-mountfrom on re-exec. +func mountFromMain() { + runtime.LockOSThread() + flag.Parse() + + var options *mountOptions + + if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil { + fatal(err) + } + + if err := os.Chdir(flag.Arg(0)); err != nil { + fatal(err) + } + + if err := syscall.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil { + fatal(err) + } + + os.Exit(0) +} diff --git a/vendor/src/github.com/containers/storage/drivers/overlay2/overlay.go b/vendor/src/github.com/containers/storage/drivers/overlay2/overlay.go new file mode 100644 index 00000000..b9a1fa85 --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/overlay2/overlay.go @@ -0,0 +1,514 @@ +// +build linux + +package overlay2 + +import ( + "bufio" + "errors" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path" + "strconv" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + + "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/chrootarchive" + "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/parsers" + "github.com/containers/storage/pkg/parsers/kernel" + + "github.com/opencontainers/runc/libcontainer/label" +) + +var ( + // untar defines the untar method + untar = chrootarchive.UntarUncompressed +) + +// This backend uses the overlay union filesystem for containers +// with diff directories for each layer. + +// This version of the overlay driver requires at least kernel +// 4.0.0 in order to support mounting multiple diff directories. + +// Each container/image has at least a "diff" directory and "link" file. +// If there is also a "lower" file when there are diff layers +// below as well as "merged" and "work" directories. The "diff" directory +// has the upper layer of the overlay and is used to capture any +// changes to the layer. The "lower" file contains all the lower layer +// mounts separated by ":" and ordered from uppermost to lowermost +// layers. The overlay itself is mounted in the "merged" directory, +// and the "work" dir is needed for overlay to work. + +// The "link" file for each layer contains a unique string for the layer. +// Under the "l" directory at the root there will be a symbolic link +// with that unique string pointing the "diff" directory for the layer. +// The symbolic links are used to reference lower layers in the "lower" +// file and on mount. The links are used to shorten the total length +// of a layer reference without requiring changes to the layer identifier +// or root directory. Mounts are always done relative to root and +// referencing the symbolic links in order to ensure the number of +// lower directories can fit in a single page for making the mount +// syscall. A hard upper limit of 128 lower layers is enforced to ensure +// that mounts do not fail due to length. + +const ( + driverName = "overlay2" + linkDir = "l" + lowerFile = "lower" + maxDepth = 128 + + // idLength represents the number of random characters + // which can be used to create the unique link identifer + // for every layer. If this value is too long then the + // page size limit for the mount command may be exceeded. + // The idLength should be selected such that following equation + // is true (512 is a buffer for label metadata). + // ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512) + idLength = 26 +) + +// Driver contains information about the home directory and the list of active mounts that are created using this driver. +type Driver struct { + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter +} + +var backingFs = "" + +func init() { + graphdriver.Register(driverName, Init) +} + +// Init returns the a native diff driver for overlay filesystem. +// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error. +// If a overlay filesystem is not supported over a existing filesystem then error graphdriver.ErrIncompatibleFS is returned. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + opts, err := parseOptions(options) + if err != nil { + return nil, err + } + + if err := supportsOverlay(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + // require kernel 4.0.0 to ensure multiple lower dirs are supported + v, err := kernel.GetKernelVersion() + if err != nil { + return nil, err + } + if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 0, Minor: 0}) < 0 { + if !opts.overrideKernelCheck { + return nil, graphdriver.ErrNotSupported + } + logrus.Warnf("Using pre-4.0.0 kernel for overlay2, mount failures may require kernel update") + } + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + // check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs + switch fsMagic { + case graphdriver.FsMagicBtrfs, graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs: + logrus.Errorf("'overlay2' is not supported over %s", backingFs) + return nil, graphdriver.ErrIncompatibleFS + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + // Create the driver home dir + if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + d := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), + } + + return d, nil +} + +type overlayOptions struct { + overrideKernelCheck bool +} + +func parseOptions(options []string) (*overlayOptions, error) { + o := &overlayOptions{} + for _, option := range options { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case "overlay2.override_kernel_check": + o.overrideKernelCheck, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("overlay2: Unknown option %s\n", key) + } + } + return o, nil +} + +func supportsOverlay() error { + // We can try to modprobe overlay first before looking at + // proc/filesystems for when overlay is supported + exec.Command("modprobe", "overlay").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if s.Text() == "nodev\toverlay" { + return nil + } + } + logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") + return graphdriver.ErrNotSupported +} + +func (d *Driver) String() string { + return driverName +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Backing Filesystem" used in this implementation. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"Backing Filesystem", backingFs}, + } +} + +// GetMetadata returns meta data about the overlay driver such as +// LowerDir, UpperDir, WorkDir and MergeDir used to store data. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return nil, err + } + + metadata := map[string]string{ + "WorkDir": path.Join(dir, "work"), + "MergedDir": path.Join(dir, "merged"), + "UpperDir": path.Join(dir, "diff"), + } + + lowerDirs, err := d.getLowerDirs(id) + if err != nil { + return nil, err + } + if len(lowerDirs) > 0 { + metadata["LowerDir"] = strings.Join(lowerDirs, ":") + } + + return metadata, nil +} + +// Cleanup any state created by overlay which should be cleaned when daemon +// is being shutdown. For now, we just have to unmount the bind mounted +// we had created. +func (d *Driver) Cleanup() error { + return mount.Unmount(d.home) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { + return d.Create(id, parent, mountLabel, storageOpt) +} + +// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. +// The parent filesystem is used to configure these directories for the overlay. +func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) (retErr error) { + + if len(storageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for overlay") + } + + dir := d.dir(id) + + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil { + return err + } + + defer func() { + // Clean up on failure + if retErr != nil { + os.RemoveAll(dir) + } + }() + + if err := idtools.MkdirAs(path.Join(dir, "diff"), 0755, rootUID, rootGID); err != nil { + return err + } + + lid := generateID(idLength) + if err := os.Symlink(path.Join("..", id, "diff"), path.Join(d.home, linkDir, lid)); err != nil { + return err + } + + // Write link id to link file + if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { + return err + } + + // if no parent directory, done + if parent == "" { + return nil + } + + if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + return err + } + + lower, err := d.getLower(parent) + if err != nil { + return err + } + if lower != "" { + if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { + return err + } + } + + return nil +} + +func (d *Driver) getLower(parent string) (string, error) { + parentDir := d.dir(parent) + + // Ensure parent exists + if _, err := os.Lstat(parentDir); err != nil { + return "", err + } + + // Read Parent link fileA + parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link")) + if err != nil { + return "", err + } + lowers := []string{path.Join(linkDir, string(parentLink))} + + parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile)) + if err == nil { + parentLowers := strings.Split(string(parentLower), ":") + lowers = append(lowers, parentLowers...) + } + if len(lowers) > maxDepth { + return "", errors.New("max depth exceeded") + } + return strings.Join(lowers, ":"), nil +} + +func (d *Driver) dir(id string) string { + return path.Join(d.home, id) +} + +func (d *Driver) getLowerDirs(id string) ([]string, error) { + var lowersArray []string + lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)) + if err == nil { + for _, s := range strings.Split(string(lowers), ":") { + lp, err := os.Readlink(path.Join(d.home, s)) + if err != nil { + return nil, err + } + lowersArray = append(lowersArray, path.Clean(path.Join(d.home, "link", lp))) + } + } else if !os.IsNotExist(err) { + return nil, err + } + return lowersArray, nil +} + +// Remove cleans the directories that are created for this id. +func (d *Driver) Remove(id string) error { + dir := d.dir(id) + lid, err := ioutil.ReadFile(path.Join(dir, "link")) + if err == nil { + if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { + logrus.Debugf("Failed to remove link: %v", err) + } + } + + if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// Get creates and mounts the required file system for the given id and returns the mount path. +func (d *Driver) Get(id string, mountLabel string) (s string, err error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return "", err + } + + diffDir := path.Join(dir, "diff") + lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile)) + if err != nil { + // If no lower, just return diff directory + if os.IsNotExist(err) { + return diffDir, nil + } + return "", err + } + + mergedDir := path.Join(dir, "merged") + if count := d.ctr.Increment(mergedDir); count > 1 { + return mergedDir, nil + } + defer func() { + if err != nil { + if c := d.ctr.Decrement(mergedDir); c <= 0 { + syscall.Unmount(mergedDir, 0) + } + } + }() + + workDir := path.Join(dir, "work") + opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", string(lowers), path.Join(id, "diff"), path.Join(id, "work")) + mountLabel = label.FormatMountLabel(opts, mountLabel) + if len(mountLabel) > syscall.Getpagesize() { + return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountLabel)) + } + + if err := mountFrom(d.home, "overlay", path.Join(id, "merged"), "overlay", mountLabel); err != nil { + return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) + } + + // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a + // user namespace requires this to move a directory from lower to upper. + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return "", err + } + + if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil { + return "", err + } + + return mergedDir, nil +} + +// Put unmounts the mount path created for the give id. +func (d *Driver) Put(id string) error { + mountpoint := path.Join(d.dir(id), "merged") + if count := d.ctr.Decrement(mountpoint); count > 0 { + return nil + } + err := syscall.Unmount(mountpoint, 0) + if err != nil { + if _, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)); err != nil { + // We didn't have a "lower" directory, so we weren't mounting a "merged" directory anyway + return nil + } + logrus.Debugf("Failed to unmount %s overlay: %v", id, err) + } + return err +} + +// Exists checks to see if the id is already mounted. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} + +// ApplyDiff applies the new layer into a root +func (d *Driver) ApplyDiff(id string, parent string, diff archive.Reader) (size int64, err error) { + applyDir := d.getDiffPath(id) + + logrus.Debugf("Applying tar in %s", applyDir) + // Overlay doesn't need the parent id to apply the diff + if err := untar(diff, applyDir, &archive.TarOptions{ + UIDMaps: d.uidMaps, + GIDMaps: d.gidMaps, + WhiteoutFormat: archive.OverlayWhiteoutFormat, + }); err != nil { + return 0, err + } + + return d.DiffSize(id, parent) +} + +func (d *Driver) getDiffPath(id string) string { + dir := d.dir(id) + + return path.Join(dir, "diff") +} + +// DiffSize calculates the changes between the specified id +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (d *Driver) DiffSize(id, parent string) (size int64, err error) { + return directory.Size(d.getDiffPath(id)) +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (d *Driver) Diff(id, parent string) (archive.Archive, error) { + diffPath := d.getDiffPath(id) + logrus.Debugf("Tar with options on %s", diffPath) + return archive.TarWithOptions(diffPath, &archive.TarOptions{ + Compression: archive.Uncompressed, + UIDMaps: d.uidMaps, + GIDMaps: d.gidMaps, + WhiteoutFormat: archive.OverlayWhiteoutFormat, + }) +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { + // Overlay doesn't have snapshots, so we need to get changes from all parent + // layers. + diffPath := d.getDiffPath(id) + layers, err := d.getLowerDirs(id) + if err != nil { + return nil, err + } + + return archive.OverlayChanges(layers, diffPath) +} diff --git a/vendor/src/github.com/containers/storage/drivers/overlay2/overlay_unsupported.go b/vendor/src/github.com/containers/storage/drivers/overlay2/overlay_unsupported.go new file mode 100644 index 00000000..e5ac4ca8 --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/overlay2/overlay_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package overlay2 diff --git a/vendor/src/github.com/containers/storage/drivers/overlay2/randomid.go b/vendor/src/github.com/containers/storage/drivers/overlay2/randomid.go new file mode 100644 index 00000000..af5cb659 --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/overlay2/randomid.go @@ -0,0 +1,80 @@ +// +build linux + +package overlay2 + +import ( + "crypto/rand" + "encoding/base32" + "fmt" + "io" + "os" + "syscall" + "time" + + "github.com/Sirupsen/logrus" +) + +// generateID creates a new random string identifier with the given length +func generateID(l int) string { + const ( + // ensures we backoff for less than 450ms total. Use the following to + // select new value, in units of 10ms: + // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2 + maxretries = 9 + backoff = time.Millisecond * 10 + ) + + var ( + totalBackoff time.Duration + count int + retries int + size = (l*5 + 7) / 8 + u = make([]byte, size) + ) + // TODO: Include time component, counter component, random component + + for { + // This should never block but the read may fail. Because of this, + // we just try to read the random number generator until we get + // something. This is a very rare condition but may happen. + b := time.Duration(retries) * backoff + time.Sleep(b) + totalBackoff += b + + n, err := io.ReadFull(rand.Reader, u[count:]) + if err != nil { + if retryOnError(err) && retries < maxretries { + count += n + retries++ + logrus.Errorf("error generating version 4 uuid, retrying: %v", err) + continue + } + + // Any other errors represent a system problem. What did someone + // do to /dev/urandom? + panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err)) + } + + break + } + + s := base32.StdEncoding.EncodeToString(u) + + return s[:l] +} + +// retryOnError tries to detect whether or not retrying would be fruitful. +func retryOnError(err error) bool { + switch err := err.(type) { + case *os.PathError: + return retryOnError(err.Err) // unpack the target error + case syscall.Errno: + if err == syscall.EPERM { + // EPERM represents an entropy pool exhaustion, a condition under + // which we backoff and retry. + return true + } + } + + return false +} diff --git a/vendor/src/github.com/containers/storage/drivers/plugin.go b/vendor/src/github.com/containers/storage/drivers/plugin.go new file mode 100644 index 00000000..a76aae6e --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/plugin.go @@ -0,0 +1,32 @@ +// +build experimental + +package graphdriver + +import ( + "fmt" + "io" + + "github.com/containers/storage/pkg/plugins" +) + +type pluginClient interface { + // Call calls the specified method with the specified arguments for the plugin. + Call(string, interface{}, interface{}) error + // Stream calls the specified method with the specified arguments for the plugin and returns the response IO stream + Stream(string, interface{}) (io.ReadCloser, error) + // SendFile calls the specified method, and passes through the IO stream + SendFile(string, io.Reader, interface{}) error +} + +func lookupPlugin(name, home string, opts []string) (Driver, error) { + pl, err := plugins.Get(name, "GraphDriver") + if err != nil { + return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err) + } + return newPluginDriver(name, home, opts, pl.Client()) +} + +func newPluginDriver(name, home string, opts []string, c pluginClient) (Driver, error) { + proxy := &graphDriverProxy{name, c} + return proxy, proxy.Init(home, opts) +} diff --git a/vendor/src/github.com/containers/storage/drivers/plugin_unsupported.go b/vendor/src/github.com/containers/storage/drivers/plugin_unsupported.go new file mode 100644 index 00000000..daa7a170 --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/plugin_unsupported.go @@ -0,0 +1,7 @@ +// +build !experimental + +package graphdriver + +func lookupPlugin(name, home string, opts []string) (Driver, error) { + return nil, ErrNotSupported +} diff --git a/vendor/src/github.com/containers/storage/drivers/proxy.go b/vendor/src/github.com/containers/storage/drivers/proxy.go new file mode 100644 index 00000000..1bab2ef3 --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/proxy.go @@ -0,0 +1,225 @@ +// +build experimental + +package graphdriver + +import ( + "errors" + "fmt" + + "github.com/containers/storage/pkg/archive" +) + +type graphDriverProxy struct { + name string + client pluginClient +} + +type graphDriverRequest struct { + ID string `json:",omitempty"` + Parent string `json:",omitempty"` + MountLabel string `json:",omitempty"` +} + +type graphDriverResponse struct { + Err string `json:",omitempty"` + Dir string `json:",omitempty"` + Exists bool `json:",omitempty"` + Status [][2]string `json:",omitempty"` + Changes []archive.Change `json:",omitempty"` + Size int64 `json:",omitempty"` + Metadata map[string]string `json:",omitempty"` +} + +type graphDriverInitRequest struct { + Home string + Opts []string +} + +func (d *graphDriverProxy) Init(home string, opts []string) error { + args := &graphDriverInitRequest{ + Home: home, + Opts: opts, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Init", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) String() string { + return d.name +} + +func (d *graphDriverProxy) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + MountLabel: mountLabel, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.CreateReadWrite", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Create(id, parent, mountLabel string, storageOpt map[string]string) error { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + MountLabel: mountLabel, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Create", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Remove(id string) error { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Remove", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Get(id, mountLabel string) (string, error) { + args := &graphDriverRequest{ + ID: id, + MountLabel: mountLabel, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Get", args, &ret); err != nil { + return "", err + } + var err error + if ret.Err != "" { + err = errors.New(ret.Err) + } + return ret.Dir, err +} + +func (d *graphDriverProxy) Put(id string) error { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Put", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Exists(id string) bool { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Exists", args, &ret); err != nil { + return false + } + return ret.Exists +} + +func (d *graphDriverProxy) Status() [][2]string { + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Status", args, &ret); err != nil { + return nil + } + return ret.Status +} + +func (d *graphDriverProxy) GetMetadata(id string) (map[string]string, error) { + args := &graphDriverRequest{ + ID: id, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.GetMetadata", args, &ret); err != nil { + return nil, err + } + if ret.Err != "" { + return nil, errors.New(ret.Err) + } + return ret.Metadata, nil +} + +func (d *graphDriverProxy) Cleanup() error { + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Cleanup", args, &ret); err != nil { + return nil + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Diff(id, parent string) (archive.Archive, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + body, err := d.client.Stream("GraphDriver.Diff", args) + if err != nil { + return nil, err + } + return archive.Archive(body), nil +} + +func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Changes", args, &ret); err != nil { + return nil, err + } + if ret.Err != "" { + return nil, errors.New(ret.Err) + } + + return ret.Changes, nil +} + +func (d *graphDriverProxy) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) { + var ret graphDriverResponse + if err := d.client.SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil { + return -1, err + } + if ret.Err != "" { + return -1, errors.New(ret.Err) + } + return ret.Size, nil +} + +func (d *graphDriverProxy) DiffSize(id, parent string) (int64, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.DiffSize", args, &ret); err != nil { + return -1, err + } + if ret.Err != "" { + return -1, errors.New(ret.Err) + } + return ret.Size, nil +} diff --git a/vendor/src/github.com/containers/storage/drivers/register/register_aufs.go b/vendor/src/github.com/containers/storage/drivers/register/register_aufs.go new file mode 100644 index 00000000..7743dced --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/register/register_aufs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_aufs,linux + +package register + +import ( + // register the aufs graphdriver + _ "github.com/containers/storage/drivers/aufs" +) diff --git a/vendor/src/github.com/containers/storage/drivers/register/register_btrfs.go b/vendor/src/github.com/containers/storage/drivers/register/register_btrfs.go new file mode 100644 index 00000000..40ff1cdd --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/register/register_btrfs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_btrfs,linux + +package register + +import ( + // register the btrfs graphdriver + _ "github.com/containers/storage/drivers/btrfs" +) diff --git a/vendor/src/github.com/containers/storage/drivers/register/register_devicemapper.go b/vendor/src/github.com/containers/storage/drivers/register/register_devicemapper.go new file mode 100644 index 00000000..08ac984b --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/register/register_devicemapper.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_devicemapper,linux + +package register + +import ( + // register the devmapper graphdriver + _ "github.com/containers/storage/drivers/devmapper" +) diff --git a/vendor/src/github.com/containers/storage/drivers/register/register_overlay.go b/vendor/src/github.com/containers/storage/drivers/register/register_overlay.go new file mode 100644 index 00000000..3b6c5f85 --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/register/register_overlay.go @@ -0,0 +1,9 @@ +// +build !exclude_graphdriver_overlay,linux + +package register + +import ( + // register the overlay graphdriver + _ "github.com/containers/storage/drivers/overlay" + _ "github.com/containers/storage/drivers/overlay2" +) diff --git a/vendor/src/github.com/containers/storage/drivers/register/register_vfs.go b/vendor/src/github.com/containers/storage/drivers/register/register_vfs.go new file mode 100644 index 00000000..691ce859 --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/register/register_vfs.go @@ -0,0 +1,6 @@ +package register + +import ( + // register vfs + _ "github.com/containers/storage/drivers/vfs" +) diff --git a/vendor/src/github.com/containers/storage/drivers/register/register_windows.go b/vendor/src/github.com/containers/storage/drivers/register/register_windows.go new file mode 100644 index 00000000..048b2709 --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/register/register_windows.go @@ -0,0 +1,6 @@ +package register + +import ( + // register the windows graph driver + _ "github.com/containers/storage/drivers/windows" +) diff --git a/vendor/src/github.com/containers/storage/drivers/register/register_zfs.go b/vendor/src/github.com/containers/storage/drivers/register/register_zfs.go new file mode 100644 index 00000000..c748468e --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/register/register_zfs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd, solaris + +package register + +import ( + // register the zfs driver + _ "github.com/containers/storage/drivers/zfs" +) diff --git a/vendor/src/github.com/containers/storage/drivers/vfs/driver.go b/vendor/src/github.com/containers/storage/drivers/vfs/driver.go new file mode 100644 index 00000000..36a975e3 --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/vfs/driver.go @@ -0,0 +1,145 @@ +package vfs + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/chrootarchive" + "github.com/containers/storage/pkg/idtools" + + "github.com/opencontainers/runc/libcontainer/label" +) + +var ( + // CopyWithTar defines the copy method to use. + CopyWithTar = chrootarchive.CopyWithTar +) + +func init() { + graphdriver.Register("vfs", Init) +} + +// Init returns a new VFS driver. +// This sets the home directory for the driver and returns NaiveDiffDriver. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + d := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + } + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { + return nil, err + } + return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil +} + +// Driver holds information about the driver, home directory of the driver. +// Driver implements graphdriver.ProtoDriver. It uses only basic vfs operations. +// In order to support layering, files are copied from the parent layer into the new layer. There is no copy-on-write support. +// Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver +type Driver struct { + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap +} + +func (d *Driver) String() string { + return "vfs" +} + +// Status is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any status information. +func (d *Driver) Status() [][2]string { + return nil +} + +// GetMetadata is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any meta data. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. +func (d *Driver) Cleanup() error { + return nil +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { + return d.Create(id, parent, mountLabel, storageOpt) +} + +// Create prepares the filesystem for the VFS driver and copies the directory for the given id under the parent. +func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error { + if len(storageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for vfs") + } + + dir := d.dir(id) + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(filepath.Dir(dir), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(dir, 0755, rootUID, rootGID); err != nil { + return err + } + opts := []string{"level:s0"} + if _, mountLabel, err := label.InitLabels(opts); err == nil { + label.SetFileLabel(dir, mountLabel) + } + if parent == "" { + return nil + } + parentDir, err := d.Get(parent, "") + if err != nil { + return fmt.Errorf("%s: %s", parent, err) + } + if err := CopyWithTar(parentDir, dir); err != nil { + return err + } + return nil +} + +func (d *Driver) dir(id string) string { + return filepath.Join(d.home, "dir", filepath.Base(id)) +} + +// Remove deletes the content from the directory for a given id. +func (d *Driver) Remove(id string) error { + if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// Get returns the directory for the given id. +func (d *Driver) Get(id, mountLabel string) (string, error) { + dir := d.dir(id) + if st, err := os.Stat(dir); err != nil { + return "", err + } else if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + return dir, nil +} + +// Put is a noop for vfs that return nil for the error, since this driver has no runtime resources to clean up. +func (d *Driver) Put(id string) error { + // The vfs driver has no runtime resources (e.g. mounts) + // to clean up, so we don't need anything here + return nil +} + +// Exists checks to see if the directory exists for the given id. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} diff --git a/vendor/src/github.com/containers/storage/drivers/zfs/MAINTAINERS b/vendor/src/github.com/containers/storage/drivers/zfs/MAINTAINERS new file mode 100644 index 00000000..9c270c54 --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/zfs/MAINTAINERS @@ -0,0 +1,2 @@ +Jörg Thalheim (@Mic92) +Arthur Gautier (@baloose) diff --git a/vendor/src/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/src/github.com/containers/storage/drivers/zfs/zfs.go new file mode 100644 index 00000000..16426337 --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/zfs/zfs.go @@ -0,0 +1,405 @@ +// +build linux freebsd solaris + +package zfs + +import ( + "fmt" + "os" + "os/exec" + "path" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/parsers" + zfs "github.com/mistifyio/go-zfs" + "github.com/opencontainers/runc/libcontainer/label" +) + +type zfsOptions struct { + fsName string + mountPath string +} + +func init() { + graphdriver.Register("zfs", Init) +} + +// Logger returns a zfs logger implementation. +type Logger struct{} + +// Log wraps log message from ZFS driver with a prefix '[zfs]'. +func (*Logger) Log(cmd []string) { + logrus.Debugf("[zfs] %s", strings.Join(cmd, " ")) +} + +// Init returns a new ZFS driver. +// It takes base mount path and an array of options which are represented as key value pairs. +// Each option is in the for key=value. 'zfs.fsname' is expected to be a valid key in the options. +func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + var err error + + if _, err := exec.LookPath("zfs"); err != nil { + logrus.Debugf("[zfs] zfs command is not available: %v", err) + return nil, graphdriver.ErrPrerequisites + } + + file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 600) + if err != nil { + logrus.Debugf("[zfs] cannot open /dev/zfs: %v", err) + return nil, graphdriver.ErrPrerequisites + } + defer file.Close() + + options, err := parseOptions(opt) + if err != nil { + return nil, err + } + options.mountPath = base + + rootdir := path.Dir(base) + + if options.fsName == "" { + err = checkRootdirFs(rootdir) + if err != nil { + return nil, err + } + } + + if options.fsName == "" { + options.fsName, err = lookupZfsDataset(rootdir) + if err != nil { + return nil, err + } + } + + zfs.SetLogger(new(Logger)) + + filesystems, err := zfs.Filesystems(options.fsName) + if err != nil { + return nil, fmt.Errorf("Cannot find root filesystem %s: %v", options.fsName, err) + } + + filesystemsCache := make(map[string]bool, len(filesystems)) + var rootDataset *zfs.Dataset + for _, fs := range filesystems { + if fs.Name == options.fsName { + rootDataset = fs + } + filesystemsCache[fs.Name] = true + } + + if rootDataset == nil { + return nil, fmt.Errorf("BUG: zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName) + } + + if err := mount.MakePrivate(base); err != nil { + return nil, err + } + d := &Driver{ + dataset: rootDataset, + options: options, + filesystemsCache: filesystemsCache, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), + } + return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil +} + +func parseOptions(opt []string) (zfsOptions, error) { + var options zfsOptions + options.fsName = "" + for _, option := range opt { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return options, err + } + key = strings.ToLower(key) + switch key { + case "zfs.fsname": + options.fsName = val + default: + return options, fmt.Errorf("Unknown option %s", key) + } + } + return options, nil +} + +func lookupZfsDataset(rootdir string) (string, error) { + var stat syscall.Stat_t + if err := syscall.Stat(rootdir, &stat); err != nil { + return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + wantedDev := stat.Dev + + mounts, err := mount.GetMounts() + if err != nil { + return "", err + } + for _, m := range mounts { + if err := syscall.Stat(m.Mountpoint, &stat); err != nil { + logrus.Debugf("[zfs] failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err) + continue // may fail on fuse file systems + } + + if stat.Dev == wantedDev && m.Fstype == "zfs" { + return m.Source, nil + } + } + + return "", fmt.Errorf("Failed to find zfs dataset mounted on '%s' in /proc/mounts", rootdir) +} + +// Driver holds information about the driver, such as zfs dataset, options and cache. +type Driver struct { + dataset *zfs.Dataset + options zfsOptions + sync.Mutex // protects filesystem cache against concurrent access + filesystemsCache map[string]bool + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter +} + +func (d *Driver) String() string { + return "zfs" +} + +// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. +func (d *Driver) Cleanup() error { + return nil +} + +// Status returns information about the ZFS filesystem. It returns a two dimensional array of information +// such as pool name, dataset name, disk usage, parent quota and compression used. +// Currently it return 'Zpool', 'Zpool Health', 'Parent Dataset', 'Space Used By Parent', +// 'Space Available', 'Parent Quota' and 'Compression'. +func (d *Driver) Status() [][2]string { + parts := strings.Split(d.dataset.Name, "/") + pool, err := zfs.GetZpool(parts[0]) + + var poolName, poolHealth string + if err == nil { + poolName = pool.Name + poolHealth = pool.Health + } else { + poolName = fmt.Sprintf("error while getting pool information %v", err) + poolHealth = "not available" + } + + quota := "no" + if d.dataset.Quota != 0 { + quota = strconv.FormatUint(d.dataset.Quota, 10) + } + + return [][2]string{ + {"Zpool", poolName}, + {"Zpool Health", poolHealth}, + {"Parent Dataset", d.dataset.Name}, + {"Space Used By Parent", strconv.FormatUint(d.dataset.Used, 10)}, + {"Space Available", strconv.FormatUint(d.dataset.Avail, 10)}, + {"Parent Quota", quota}, + {"Compression", d.dataset.Compression}, + } +} + +// GetMetadata returns image/container metadata related to graph driver +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +func (d *Driver) cloneFilesystem(name, parentName string) error { + snapshotName := fmt.Sprintf("%d", time.Now().Nanosecond()) + parentDataset := zfs.Dataset{Name: parentName} + snapshot, err := parentDataset.Snapshot(snapshotName /*recursive */, false) + if err != nil { + return err + } + + _, err = snapshot.Clone(name, map[string]string{"mountpoint": "legacy"}) + if err == nil { + d.Lock() + d.filesystemsCache[name] = true + d.Unlock() + } + + if err != nil { + snapshot.Destroy(zfs.DestroyDeferDeletion) + return err + } + return snapshot.Destroy(zfs.DestroyDeferDeletion) +} + +func (d *Driver) zfsPath(id string) string { + return d.options.fsName + "/" + id +} + +func (d *Driver) mountPath(id string) string { + return path.Join(d.options.mountPath, "graph", getMountpoint(id)) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { + return d.Create(id, parent, mountLabel, storageOpt) +} + +// Create prepares the dataset and filesystem for the ZFS driver for the given id under the parent. +func (d *Driver) Create(id string, parent string, mountLabel string, storageOpt map[string]string) error { + err := d.create(id, parent, storageOpt) + if err == nil { + return nil + } + if zfsError, ok := err.(*zfs.Error); ok { + if !strings.HasSuffix(zfsError.Stderr, "dataset already exists\n") { + return err + } + // aborted build -> cleanup + } else { + return err + } + + dataset := zfs.Dataset{Name: d.zfsPath(id)} + if err := dataset.Destroy(zfs.DestroyRecursiveClones); err != nil { + return err + } + + // retry + return d.create(id, parent, storageOpt) +} + +func (d *Driver) create(id, parent string, storageOpt map[string]string) error { + name := d.zfsPath(id) + quota, err := parseStorageOpt(storageOpt) + if err != nil { + return err + } + if parent == "" { + mountoptions := map[string]string{"mountpoint": "legacy"} + fs, err := zfs.CreateFilesystem(name, mountoptions) + if err == nil { + err = setQuota(name, quota) + if err == nil { + d.Lock() + d.filesystemsCache[fs.Name] = true + d.Unlock() + } + } + return err + } + err = d.cloneFilesystem(name, d.zfsPath(parent)) + if err == nil { + err = setQuota(name, quota) + } + return err +} + +func parseStorageOpt(storageOpt map[string]string) (string, error) { + // Read size to change the disk quota per container + for k, v := range storageOpt { + key := strings.ToLower(k) + switch key { + case "size": + return v, nil + default: + return "0", fmt.Errorf("Unknown option %s", key) + } + } + return "0", nil +} + +func setQuota(name string, quota string) error { + if quota == "0" { + return nil + } + fs, err := zfs.GetDataset(name) + if err != nil { + return err + } + return fs.SetProperty("quota", quota) +} + +// Remove deletes the dataset, filesystem and the cache for the given id. +func (d *Driver) Remove(id string) error { + name := d.zfsPath(id) + dataset := zfs.Dataset{Name: name} + err := dataset.Destroy(zfs.DestroyRecursive) + if err == nil { + d.Lock() + delete(d.filesystemsCache, name) + d.Unlock() + } + return err +} + +// Get returns the mountpoint for the given id after creating the target directories if necessary. +func (d *Driver) Get(id, mountLabel string) (string, error) { + mountpoint := d.mountPath(id) + if count := d.ctr.Increment(mountpoint); count > 1 { + return mountpoint, nil + } + + filesystem := d.zfsPath(id) + options := label.FormatMountLabel("", mountLabel) + logrus.Debugf(`[zfs] mount("%s", "%s", "%s")`, filesystem, mountpoint, options) + + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + d.ctr.Decrement(mountpoint) + return "", err + } + // Create the target directories if they don't exist + if err := idtools.MkdirAllAs(mountpoint, 0755, rootUID, rootGID); err != nil { + d.ctr.Decrement(mountpoint) + return "", err + } + + if err := mount.Mount(filesystem, mountpoint, "zfs", options); err != nil { + d.ctr.Decrement(mountpoint) + return "", fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err) + } + + // this could be our first mount after creation of the filesystem, and the root dir may still have root + // permissions instead of the remapped root uid:gid (if user namespaces are enabled): + if err := os.Chown(mountpoint, rootUID, rootGID); err != nil { + mount.Unmount(mountpoint) + d.ctr.Decrement(mountpoint) + return "", fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err) + } + + return mountpoint, nil +} + +// Put removes the existing mountpoint for the given id if it exists. +func (d *Driver) Put(id string) error { + mountpoint := d.mountPath(id) + if count := d.ctr.Decrement(mountpoint); count > 0 { + return nil + } + mounted, err := graphdriver.Mounted(graphdriver.FsMagicZfs, mountpoint) + if err != nil || !mounted { + return err + } + + logrus.Debugf(`[zfs] unmount("%s")`, mountpoint) + + err = mount.Unmount(mountpoint) + if err != nil { + return fmt.Errorf("error unmounting to %s: %v", mountpoint, err) + } + return err +} + +// Exists checks to see if the cache entry exists for the given id. +func (d *Driver) Exists(id string) bool { + d.Lock() + defer d.Unlock() + return d.filesystemsCache[d.zfsPath(id)] == true +} diff --git a/vendor/src/github.com/containers/storage/drivers/zfs/zfs_freebsd.go b/vendor/src/github.com/containers/storage/drivers/zfs/zfs_freebsd.go new file mode 100644 index 00000000..a25e2a45 --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/zfs/zfs_freebsd.go @@ -0,0 +1,38 @@ +package zfs + +import ( + "fmt" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/containers/storage/drivers" +) + +func checkRootdirFs(rootdir string) error { + var buf syscall.Statfs_t + if err := syscall.Statfs(rootdir, &buf); err != nil { + return fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + + // on FreeBSD buf.Fstypename contains ['z', 'f', 's', 0 ... ] + if (buf.Fstypename[0] != 122) || (buf.Fstypename[1] != 102) || (buf.Fstypename[2] != 115) || (buf.Fstypename[3] != 0) { + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) + return graphdriver.ErrPrerequisites + } + + return nil +} + +func getMountpoint(id string) string { + maxlen := 12 + + // we need to preserve filesystem suffix + suffix := strings.SplitN(id, "-", 2) + + if len(suffix) > 1 { + return id[:maxlen] + "-" + suffix[1] + } + + return id[:maxlen] +} diff --git a/vendor/src/github.com/containers/storage/drivers/zfs/zfs_linux.go b/vendor/src/github.com/containers/storage/drivers/zfs/zfs_linux.go new file mode 100644 index 00000000..6aa41d90 --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/zfs/zfs_linux.go @@ -0,0 +1,27 @@ +package zfs + +import ( + "fmt" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/containers/storage/drivers" +) + +func checkRootdirFs(rootdir string) error { + var buf syscall.Statfs_t + if err := syscall.Statfs(rootdir, &buf); err != nil { + return fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + + if graphdriver.FsMagic(buf.Type) != graphdriver.FsMagicZfs { + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) + return graphdriver.ErrPrerequisites + } + + return nil +} + +func getMountpoint(id string) string { + return id +} diff --git a/vendor/src/github.com/containers/storage/drivers/zfs/zfs_solaris.go b/vendor/src/github.com/containers/storage/drivers/zfs/zfs_solaris.go new file mode 100644 index 00000000..56d09cac --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/zfs/zfs_solaris.go @@ -0,0 +1,59 @@ +// +build solaris,cgo + +package zfs + +/* +#include +#include + +static inline struct statvfs *getstatfs(char *s) { + struct statvfs *buf; + int err; + buf = (struct statvfs *)malloc(sizeof(struct statvfs)); + err = statvfs(s, buf); + return buf; +} +*/ +import "C" +import ( + "path/filepath" + "strings" + "unsafe" + + log "github.com/Sirupsen/logrus" + "github.com/containers/storage/drivers" +) + +func checkRootdirFs(rootdir string) error { + + cs := C.CString(filepath.Dir(rootdir)) + buf := C.getstatfs(cs) + + // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] + if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || + (buf.f_basetype[3] != 0) { + log.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) + C.free(unsafe.Pointer(buf)) + return graphdriver.ErrPrerequisites + } + + C.free(unsafe.Pointer(buf)) + C.free(unsafe.Pointer(cs)) + return nil +} + +/* rootfs is introduced to comply with the OCI spec +which states that root filesystem must be mounted at /rootfs/ instead of / +*/ +func getMountpoint(id string) string { + maxlen := 12 + + // we need to preserve filesystem suffix + suffix := strings.SplitN(id, "-", 2) + + if len(suffix) > 1 { + return filepath.Join(id[:maxlen]+"-"+suffix[1], "rootfs", "root") + } + + return filepath.Join(id[:maxlen], "rootfs", "root") +} diff --git a/vendor/src/github.com/containers/storage/drivers/zfs/zfs_unsupported.go b/vendor/src/github.com/containers/storage/drivers/zfs/zfs_unsupported.go new file mode 100644 index 00000000..ce8daada --- /dev/null +++ b/vendor/src/github.com/containers/storage/drivers/zfs/zfs_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux,!freebsd,!solaris + +package zfs + +func checkRootdirFs(rootdir string) error { + return nil +} + +func getMountpoint(id string) string { + return id +} diff --git a/vendor/src/github.com/containers/storage/pkg/archive/archive.go b/vendor/src/github.com/containers/storage/pkg/archive/archive.go new file mode 100644 index 00000000..98197a02 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/archive/archive.go @@ -0,0 +1,1147 @@ +package archive + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/bzip2" + "compress/gzip" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/containers/storage/pkg/fileutils" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/pools" + "github.com/containers/storage/pkg/promise" + "github.com/containers/storage/pkg/system" +) + +type ( + // Archive is a type of io.ReadCloser which has two interfaces Read and Closer. + Archive io.ReadCloser + // Reader is a type of io.Reader. + Reader io.Reader + // Compression is the state represents if compressed or not. + Compression int + // WhiteoutFormat is the format of whiteouts unpacked + WhiteoutFormat int + // TarChownOptions wraps the chown options UID and GID. + TarChownOptions struct { + UID, GID int + } + // TarOptions wraps the tar options. + TarOptions struct { + IncludeFiles []string + ExcludePatterns []string + Compression Compression + NoLchown bool + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + ChownOpts *TarChownOptions + IncludeSourceDir bool + // WhiteoutFormat is the expected on disk format for whiteout files. + // This format will be converted to the standard format on pack + // and from the standard format on unpack. + WhiteoutFormat WhiteoutFormat + // When unpacking, specifies whether overwriting a directory with a + // non-directory is allowed and vice versa. + NoOverwriteDirNonDir bool + // For each include when creating an archive, the included name will be + // replaced with the matching name from this map. + RebaseNames map[string]string + } + + // Archiver allows the reuse of most utility functions of this package + // with a pluggable Untar function. Also, to facilitate the passing of + // specific id mappings for untar, an archiver can be created with maps + // which will then be passed to Untar operations + Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + } + + // breakoutError is used to differentiate errors related to breaking out + // When testing archive breakout in the unit tests, this error is expected + // in order for the test to pass. + breakoutError error +) + +var ( + // ErrNotImplemented is the error message of function not implemented. + ErrNotImplemented = errors.New("Function not implemented") + defaultArchiver = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil} +) + +const ( + // HeaderSize is the size in bytes of a tar header + HeaderSize = 512 +) + +const ( + // Uncompressed represents the uncompressed. + Uncompressed Compression = iota + // Bzip2 is bzip2 compression algorithm. + Bzip2 + // Gzip is gzip compression algorithm. + Gzip + // Xz is xz compression algorithm. + Xz +) + +const ( + // AUFSWhiteoutFormat is the default format for whiteouts + AUFSWhiteoutFormat WhiteoutFormat = iota + // OverlayWhiteoutFormat formats whiteout according to the overlay + // standard. + OverlayWhiteoutFormat +) + +// IsArchive checks for the magic bytes of a tar or any supported compression +// algorithm. +func IsArchive(header []byte) bool { + compression := DetectCompression(header) + if compression != Uncompressed { + return true + } + r := tar.NewReader(bytes.NewBuffer(header)) + _, err := r.Next() + return err == nil +} + +// IsArchivePath checks if the (possibly compressed) file at the given path +// starts with a tar file header. +func IsArchivePath(path string) bool { + file, err := os.Open(path) + if err != nil { + return false + } + defer file.Close() + rdr, err := DecompressStream(file) + if err != nil { + return false + } + r := tar.NewReader(rdr) + _, err = r.Next() + return err == nil +} + +// DetectCompression detects the compression algorithm of the source. +func DetectCompression(source []byte) Compression { + for compression, m := range map[Compression][]byte{ + Bzip2: {0x42, 0x5A, 0x68}, + Gzip: {0x1F, 0x8B, 0x08}, + Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, + } { + if len(source) < len(m) { + logrus.Debug("Len too short") + continue + } + if bytes.Compare(m, source[:len(m)]) == 0 { + return compression + } + } + return Uncompressed +} + +func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) { + args := []string{"xz", "-d", "-c", "-q"} + + return cmdStream(exec.Command(args[0], args[1:]...), archive) +} + +// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. +func DecompressStream(archive io.Reader) (io.ReadCloser, error) { + p := pools.BufioReader32KPool + buf := p.Get(archive) + bs, err := buf.Peek(10) + if err != nil && err != io.EOF { + // Note: we'll ignore any io.EOF error because there are some odd + // cases where the layer.tar file will be empty (zero bytes) and + // that results in an io.EOF from the Peek() call. So, in those + // cases we'll just treat it as a non-compressed stream and + // that means just create an empty layer. + // See Issue 18170 + return nil, err + } + + compression := DetectCompression(bs) + switch compression { + case Uncompressed: + readBufWrapper := p.NewReadCloserWrapper(buf, buf) + return readBufWrapper, nil + case Gzip: + gzReader, err := gzip.NewReader(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) + return readBufWrapper, nil + case Bzip2: + bz2Reader := bzip2.NewReader(buf) + readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) + return readBufWrapper, nil + case Xz: + xzReader, chdone, err := xzDecompress(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) + return ioutils.NewReadCloserWrapper(readBufWrapper, func() error { + <-chdone + return readBufWrapper.Close() + }), nil + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// CompressStream compresseses the dest with specified compression algorithm. +func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { + p := pools.BufioWriter32KPool + buf := p.Get(dest) + switch compression { + case Uncompressed: + writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) + return writeBufWrapper, nil + case Gzip: + gzWriter := gzip.NewWriter(dest) + writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) + return writeBufWrapper, nil + case Bzip2, Xz: + // archive/bzip2 does not support writing, and there is no xz support at all + // However, this is not a problem as docker only currently generates gzipped tars + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// Extension returns the extension of a file that uses the specified compression algorithm. +func (compression *Compression) Extension() string { + switch *compression { + case Uncompressed: + return "tar" + case Bzip2: + return "tar.bz2" + case Gzip: + return "tar.gz" + case Xz: + return "tar.xz" + } + return "" +} + +type tarWhiteoutConverter interface { + ConvertWrite(*tar.Header, string, os.FileInfo) error + ConvertRead(*tar.Header, string) (bool, error) +} + +type tarAppender struct { + TarWriter *tar.Writer + Buffer *bufio.Writer + + // for hardlink mapping + SeenFiles map[uint64]string + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + + // For packing and unpacking whiteout files in the + // non standard format. The whiteout files defined + // by the AUFS standard are used as the tar whiteout + // standard. + WhiteoutConverter tarWhiteoutConverter +} + +// canonicalTarName provides a platform-independent and consistent posix-style +//path for files and directories to be archived regardless of the platform. +func canonicalTarName(name string, isDir bool) (string, error) { + name, err := CanonicalTarNameForPath(name) + if err != nil { + return "", err + } + + // suffix with '/' for directories + if isDir && !strings.HasSuffix(name, "/") { + name += "/" + } + return name, nil +} + +// addTarFile adds to the tar archive a file from `path` as `name` +func (ta *tarAppender) addTarFile(path, name string) error { + fi, err := os.Lstat(path) + if err != nil { + return err + } + + link := "" + if fi.Mode()&os.ModeSymlink != 0 { + if link, err = os.Readlink(path); err != nil { + return err + } + } + + hdr, err := tar.FileInfoHeader(fi, link) + if err != nil { + return err + } + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + name, err = canonicalTarName(name, fi.IsDir()) + if err != nil { + return fmt.Errorf("tar: cannot canonicalize path: %v", err) + } + hdr.Name = name + + inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys()) + if err != nil { + return err + } + + // if it's not a directory and has more than 1 link, + // it's hardlinked, so set the type flag accordingly + if !fi.IsDir() && hasHardlinks(fi) { + // a link should have a name that it links too + // and that linked name should be first in the tar archive + if oldpath, ok := ta.SeenFiles[inode]; ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = oldpath + hdr.Size = 0 // This Must be here for the writer math to add up! + } else { + ta.SeenFiles[inode] = name + } + } + + capability, _ := system.Lgetxattr(path, "security.capability") + if capability != nil { + hdr.Xattrs = make(map[string]string) + hdr.Xattrs["security.capability"] = string(capability) + } + + //handle re-mapping container ID mappings back to host ID mappings before + //writing tar headers/files. We skip whiteout files because they were written + //by the kernel and already have proper ownership relative to the host + if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && (ta.UIDMaps != nil || ta.GIDMaps != nil) { + uid, gid, err := getFileUIDGID(fi.Sys()) + if err != nil { + return err + } + xUID, err := idtools.ToContainer(uid, ta.UIDMaps) + if err != nil { + return err + } + xGID, err := idtools.ToContainer(gid, ta.GIDMaps) + if err != nil { + return err + } + hdr.Uid = xUID + hdr.Gid = xGID + } + + if ta.WhiteoutConverter != nil { + if err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi); err != nil { + return err + } + } + + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + file, err := os.Open(path) + if err != nil { + return err + } + + ta.Buffer.Reset(ta.TarWriter) + defer ta.Buffer.Reset(nil) + _, err = io.Copy(ta.Buffer, file) + file.Close() + if err != nil { + return err + } + err = ta.Buffer.Flush() + if err != nil { + return err + } + } + + return nil +} + +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions) error { + // hdr.Mode is in linux format, which we can use for sycalls, + // but for os.Foo() calls we need the mode converted to os.FileMode, + // so use hdrInfo.Mode() (they differ for e.g. setuid bits) + hdrInfo := hdr.FileInfo() + + switch hdr.Typeflag { + case tar.TypeDir: + // Create directory unless it exists as a directory already. + // In that case we just want to merge the two + if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { + if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { + return err + } + } + + case tar.TypeReg, tar.TypeRegA: + // Source is regular file + file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) + if err != nil { + return err + } + if _, err := io.Copy(file, reader); err != nil { + file.Close() + return err + } + file.Close() + + case tar.TypeBlock, tar.TypeChar, tar.TypeFifo: + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeLink: + targetPath := filepath.Join(extractDir, hdr.Linkname) + // check for hardlink breakout + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) + } + if err := os.Link(targetPath, path); err != nil { + return err + } + + case tar.TypeSymlink: + // path -> hdr.Linkname = targetPath + // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) + + // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because + // that symlink would first have to be created, which would be caught earlier, at this very check: + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) + } + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + + case tar.TypeXGlobalHeader: + logrus.Debug("PAX Global Extended Headers found and ignored") + return nil + + default: + return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) + } + + // Lchown is not supported on Windows. + if Lchown && runtime.GOOS != "windows" { + if chownOpts == nil { + chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid} + } + if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { + return err + } + } + + var errors []string + for key, value := range hdr.Xattrs { + if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { + if err == syscall.ENOTSUP { + // We ignore errors here because not all graphdrivers support + // xattrs *cough* old versions of AUFS *cough*. However only + // ENOTSUP should be emitted in that case, otherwise we still + // bail. + errors = append(errors, err.Error()) + continue + } + return err + } + + } + + if len(errors) > 0 { + logrus.WithFields(logrus.Fields{ + "errors": errors, + }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") + } + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if err := handleLChmod(hdr, path, hdrInfo); err != nil { + return err + } + + aTime := hdr.AccessTime + if aTime.Before(hdr.ModTime) { + // Last access time should never be before last modified time. + aTime = hdr.ModTime + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} + if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } + return nil +} + +// Tar creates an archive from the directory at `path`, and returns it as a +// stream of bytes. +func Tar(path string, compression Compression) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{Compression: compression}) +} + +// TarWithOptions creates an archive from the directory at `path`, only including files whose relative +// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. +func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + + // Fix the source path to work with long path names. This is a no-op + // on platforms other than Windows. + srcPath = fixVolumePathPrefix(srcPath) + + patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns) + + if err != nil { + return nil, err + } + + pipeReader, pipeWriter := io.Pipe() + + compressWriter, err := CompressStream(pipeWriter, options.Compression) + if err != nil { + return nil, err + } + + go func() { + ta := &tarAppender{ + TarWriter: tar.NewWriter(compressWriter), + Buffer: pools.BufioWriter32KPool.Get(nil), + SeenFiles: make(map[uint64]string), + UIDMaps: options.UIDMaps, + GIDMaps: options.GIDMaps, + WhiteoutConverter: getWhiteoutConverter(options.WhiteoutFormat), + } + + defer func() { + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Errorf("Can't close tar writer: %s", err) + } + if err := compressWriter.Close(); err != nil { + logrus.Errorf("Can't close compress writer: %s", err) + } + if err := pipeWriter.Close(); err != nil { + logrus.Errorf("Can't close pipe writer: %s", err) + } + }() + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + + stat, err := os.Lstat(srcPath) + if err != nil { + return + } + + if !stat.IsDir() { + // We can't later join a non-dir with any includes because the + // 'walk' will error if "file/." is stat-ed and "file" is not a + // directory. So, we must split the source path and use the + // basename as the include. + if len(options.IncludeFiles) > 0 { + logrus.Warn("Tar: Can't archive a file with includes") + } + + dir, base := SplitPathDirEntry(srcPath) + srcPath = dir + options.IncludeFiles = []string{base} + } + + if len(options.IncludeFiles) == 0 { + options.IncludeFiles = []string{"."} + } + + seen := make(map[string]bool) + + for _, include := range options.IncludeFiles { + rebaseName := options.RebaseNames[include] + + walkRoot := getWalkRoot(srcPath, include) + filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { + if err != nil { + logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) + return nil + } + + relFilePath, err := filepath.Rel(srcPath, filePath) + if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { + // Error getting relative path OR we are looking + // at the source directory path. Skip in both situations. + return nil + } + + if options.IncludeSourceDir && include == "." && relFilePath != "." { + relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) + } + + skip := false + + // If "include" is an exact match for the current file + // then even if there's an "excludePatterns" pattern that + // matches it, don't skip it. IOW, assume an explicit 'include' + // is asking for that file no matter what - which is true + // for some files, like .dockerignore and Dockerfile (sometimes) + if include != relFilePath { + skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs) + if err != nil { + logrus.Errorf("Error matching %s: %v", relFilePath, err) + return err + } + } + + if skip { + // If we want to skip this file and its a directory + // then we should first check to see if there's an + // excludes pattern (eg !dir/file) that starts with this + // dir. If so then we can't skip this dir. + + // Its not a dir then so we can just return/skip. + if !f.IsDir() { + return nil + } + + // No exceptions (!...) in patterns so just skip dir + if !exceptions { + return filepath.SkipDir + } + + dirSlash := relFilePath + string(filepath.Separator) + + for _, pat := range patterns { + if pat[0] != '!' { + continue + } + pat = pat[1:] + string(filepath.Separator) + if strings.HasPrefix(pat, dirSlash) { + // found a match - so can't skip this dir + return nil + } + } + + // No matching exclusion dir so just skip dir + return filepath.SkipDir + } + + if seen[relFilePath] { + return nil + } + seen[relFilePath] = true + + // Rename the base resource. + if rebaseName != "" { + var replacement string + if rebaseName != string(filepath.Separator) { + // Special case the root directory to replace with an + // empty string instead so that we don't end up with + // double slashes in the paths. + replacement = rebaseName + } + + relFilePath = strings.Replace(relFilePath, include, replacement, 1) + } + + if err := ta.addTarFile(filePath, relFilePath); err != nil { + logrus.Errorf("Can't add file %s to tar: %s", filePath, err) + // if pipe is broken, stop writing tar stream to it + if err == io.ErrClosedPipe { + return err + } + } + return nil + }) + } + }() + + return pipeReader, nil +} + +// Unpack unpacks the decompressedArchive to dest with options. +func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { + tr := tar.NewReader(decompressedArchive) + trBuf := pools.BufioReader32KPool.Get(nil) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return err + } + whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat) + + // Iterate through the files in the archive. +loop: + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return err + } + + // Normalize name, for safety and for a simple is-root check + // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: + // This keeps "..\" as-is, but normalizes "\..\" to "\". + hdr.Name = filepath.Clean(hdr.Name) + + for _, exclude := range options.ExcludePatterns { + if strings.HasPrefix(hdr.Name, exclude) { + continue loop + } + } + + // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in + // the filepath format for the OS on which the daemon is running. Hence + // the check for a slash-suffix MUST be done in an OS-agnostic way. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = idtools.MkdirAllNewAs(parentPath, 0777, remappedRootUID, remappedRootGID) + if err != nil { + return err + } + } + } + + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return err + } + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + + // If path exits we almost always just want to remove and replace it + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing directory with a non-directory from the archive. + return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) + } + + if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing non-directory with a directory from the archive. + return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) + } + + if fi.IsDir() && hdr.Name == "." { + continue + } + + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return err + } + } + } + trBuf.Reset(tr) + + // if the options contain a uid & gid maps, convert header uid/gid + // entries using the maps such that lchown sets the proper mapped + // uid/gid after writing the file. We only perform this mapping if + // the file isn't already owned by the remapped root UID or GID, as + // that specific uid/gid has no mapping from container -> host, and + // those files already have the proper ownership for inside the + // container. + if hdr.Uid != remappedRootUID { + xUID, err := idtools.ToHost(hdr.Uid, options.UIDMaps) + if err != nil { + return err + } + hdr.Uid = xUID + } + if hdr.Gid != remappedRootGID { + xGID, err := idtools.ToHost(hdr.Gid, options.GIDMaps) + if err != nil { + return err + } + hdr.Gid = xGID + } + + if whiteoutConverter != nil { + writeFile, err := whiteoutConverter.ConvertRead(hdr, path) + if err != nil { + return err + } + if !writeFile { + continue + } + } + + if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil { + return err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return err + } + } + return nil +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +// FIXME: specify behavior when target path exists vs. doesn't exist. +func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + dest = filepath.Clean(dest) + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + r := tarArchive + if decompress { + decompressedArchive, err := DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return Unpack(r, dest, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func (archiver *Archiver) TarUntar(src, dst string) error { + logrus.Debugf("TarUntar(%s %s)", src, dst) + archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) + if err != nil { + return err + } + defer archive.Close() + + var options *TarOptions + if archiver.UIDMaps != nil || archiver.GIDMaps != nil { + options = &TarOptions{ + UIDMaps: archiver.UIDMaps, + GIDMaps: archiver.GIDMaps, + } + } + return archiver.Untar(archive, dst, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func TarUntar(src, dst string) error { + return defaultArchiver.TarUntar(src, dst) +} + +// UntarPath untar a file from path to a destination, src is the source tar file path. +func (archiver *Archiver) UntarPath(src, dst string) error { + archive, err := os.Open(src) + if err != nil { + return err + } + defer archive.Close() + var options *TarOptions + if archiver.UIDMaps != nil || archiver.GIDMaps != nil { + options = &TarOptions{ + UIDMaps: archiver.UIDMaps, + GIDMaps: archiver.GIDMaps, + } + } + return archiver.Untar(archive, dst, options) +} + +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return defaultArchiver.UntarPath(src, dst) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func (archiver *Archiver) CopyWithTar(src, dst string) error { + srcSt, err := os.Stat(src) + if err != nil { + return err + } + if !srcSt.IsDir() { + return archiver.CopyFileWithTar(src, dst) + } + + // if this archiver is set up with ID mapping we need to create + // the new destination directory with the remapped root UID/GID pair + // as owner + rootUID, rootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps) + if err != nil { + return err + } + // Create dst, copy src's content into it + logrus.Debugf("Creating dest directory: %s", dst) + if err := idtools.MkdirAllNewAs(dst, 0755, rootUID, rootGID); err != nil { + return err + } + logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) + return archiver.TarUntar(src, dst) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func CopyWithTar(src, dst string) error { + return defaultArchiver.CopyWithTar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) + srcSt, err := os.Stat(src) + if err != nil { + return err + } + + if srcSt.IsDir() { + return fmt.Errorf("Can't copy a directory") + } + + // Clean up the trailing slash. This must be done in an operating + // system specific manner. + if dst[len(dst)-1] == os.PathSeparator { + dst = filepath.Join(dst, filepath.Base(src)) + } + // Create the holding directory if necessary + if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { + return err + } + + r, w := io.Pipe() + errC := promise.Go(func() error { + defer w.Close() + + srcF, err := os.Open(src) + if err != nil { + return err + } + defer srcF.Close() + + hdr, err := tar.FileInfoHeader(srcSt, "") + if err != nil { + return err + } + hdr.Name = filepath.Base(dst) + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps) + if err != nil { + return err + } + + // only perform mapping if the file being copied isn't already owned by the + // uid or gid of the remapped root in the container + if remappedRootUID != hdr.Uid { + xUID, err := idtools.ToHost(hdr.Uid, archiver.UIDMaps) + if err != nil { + return err + } + hdr.Uid = xUID + } + if remappedRootGID != hdr.Gid { + xGID, err := idtools.ToHost(hdr.Gid, archiver.GIDMaps) + if err != nil { + return err + } + hdr.Gid = xGID + } + + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }) + defer func() { + if er := <-errC; err != nil { + err = er + } + }() + + err = archiver.Untar(r, filepath.Dir(dst), nil) + if err != nil { + r.CloseWithError(err) + } + return err +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +// +// Destination handling is in an operating specific manner depending +// where the daemon is running. If `dst` ends with a trailing slash +// the final destination path will be `dst/base(src)` (Linux) or +// `dst\base(src)` (Windows). +func CopyFileWithTar(src, dst string) (err error) { + return defaultArchiver.CopyFileWithTar(src, dst) +} + +// cmdStream executes a command, and returns its stdout as a stream. +// If the command fails to run or doesn't complete successfully, an error +// will be returned, including anything written on stderr. +func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, error) { + chdone := make(chan struct{}) + cmd.Stdin = input + pipeR, pipeW := io.Pipe() + cmd.Stdout = pipeW + var errBuf bytes.Buffer + cmd.Stderr = &errBuf + + // Run the command and return the pipe + if err := cmd.Start(); err != nil { + return nil, nil, err + } + + // Copy stdout to the returned pipe + go func() { + if err := cmd.Wait(); err != nil { + pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) + } else { + pipeW.Close() + } + close(chdone) + }() + + return pipeR, chdone, nil +} + +// NewTempArchive reads the content of src into a temporary file, and returns the contents +// of that file as an archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +func NewTempArchive(src Archive, dir string) (*TempArchive, error) { + f, err := ioutil.TempFile(dir, "") + if err != nil { + return nil, err + } + if _, err := io.Copy(f, src); err != nil { + return nil, err + } + if _, err := f.Seek(0, 0); err != nil { + return nil, err + } + st, err := f.Stat() + if err != nil { + return nil, err + } + size := st.Size() + return &TempArchive{File: f, Size: size}, nil +} + +// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +type TempArchive struct { + *os.File + Size int64 // Pre-computed from Stat().Size() as a convenience + read int64 + closed bool +} + +// Close closes the underlying file if it's still open, or does a no-op +// to allow callers to try to close the TempArchive multiple times safely. +func (archive *TempArchive) Close() error { + if archive.closed { + return nil + } + + archive.closed = true + + return archive.File.Close() +} + +func (archive *TempArchive) Read(data []byte) (int, error) { + n, err := archive.File.Read(data) + archive.read += int64(n) + if err != nil || archive.read == archive.Size { + archive.Close() + os.Remove(archive.File.Name()) + } + return n, err +} diff --git a/vendor/src/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/src/github.com/containers/storage/pkg/archive/archive_linux.go new file mode 100644 index 00000000..e944ca2a --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/archive/archive_linux.go @@ -0,0 +1,91 @@ +package archive + +import ( + "archive/tar" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/containers/storage/pkg/system" +) + +func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { + if format == OverlayWhiteoutFormat { + return overlayWhiteoutConverter{} + } + return nil +} + +type overlayWhiteoutConverter struct{} + +func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) error { + // convert whiteouts to AUFS format + if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { + // we just rename the file and make it normal + dir, filename := filepath.Split(hdr.Name) + hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) + hdr.Mode = 0600 + hdr.Typeflag = tar.TypeReg + hdr.Size = 0 + } + + if fi.Mode()&os.ModeDir != 0 { + // convert opaque dirs to AUFS format by writing an empty file with the prefix + opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") + if err != nil { + return err + } + if opaque != nil && len(opaque) == 1 && opaque[0] == 'y' { + // create a header for the whiteout file + // it should inherit some properties from the parent, but be a regular file + *hdr = tar.Header{ + Typeflag: tar.TypeReg, + Mode: hdr.Mode & int64(os.ModePerm), + Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), + Size: 0, + Uid: hdr.Uid, + Uname: hdr.Uname, + Gid: hdr.Gid, + Gname: hdr.Gname, + AccessTime: hdr.AccessTime, + ChangeTime: hdr.ChangeTime, + } + } + } + + return nil +} + +func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { + base := filepath.Base(path) + dir := filepath.Dir(path) + + // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay + if base == WhiteoutOpaqueDir { + if err := syscall.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0); err != nil { + return false, err + } + + // don't write the file itself + return false, nil + } + + // if a file was deleted and we are using overlay, we need to create a character device + if strings.HasPrefix(base, WhiteoutPrefix) { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + + if err := syscall.Mknod(originalPath, syscall.S_IFCHR, 0); err != nil { + return false, err + } + if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { + return false, err + } + + // don't write the file itself + return false, nil + } + + return true, nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/archive/archive_other.go b/vendor/src/github.com/containers/storage/pkg/archive/archive_other.go new file mode 100644 index 00000000..54acbf28 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/archive/archive_other.go @@ -0,0 +1,7 @@ +// +build !linux + +package archive + +func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { + return nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/archive/archive_unix.go b/vendor/src/github.com/containers/storage/pkg/archive/archive_unix.go new file mode 100644 index 00000000..19d731fd --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/archive/archive_unix.go @@ -0,0 +1,112 @@ +// +build !windows + +package archive + +import ( + "archive/tar" + "errors" + "os" + "path/filepath" + "syscall" + + "github.com/containers/storage/pkg/system" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return srcPath +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. On Linux, we +// can't use filepath.Join(srcPath,include) because this will clean away +// a trailing "." or "/" which may be important. +func getWalkRoot(srcPath string, include string) string { + return srcPath + string(filepath.Separator) + include +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + return p, nil // already unix-style +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. + +func chmodTarEntry(perm os.FileMode) os.FileMode { + return perm // noop for unix as golang APIs provide perm bits correctly +} + +func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + err = errors.New("cannot convert stat value to syscall.Stat_t") + return + } + + inode = uint64(s.Ino) + + // Currently go does not fill in the major/minors + if s.Mode&syscall.S_IFBLK != 0 || + s.Mode&syscall.S_IFCHR != 0 { + hdr.Devmajor = int64(major(uint64(s.Rdev))) + hdr.Devminor = int64(minor(uint64(s.Rdev))) + } + + return +} + +func getFileUIDGID(stat interface{}) (int, int, error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t") + } + return int(s.Uid), int(s.Gid), nil +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + mode := uint32(hdr.Mode & 07777) + switch hdr.Typeflag { + case tar.TypeBlock: + mode |= syscall.S_IFBLK + case tar.TypeChar: + mode |= syscall.S_IFCHR + case tar.TypeFifo: + mode |= syscall.S_IFIFO + } + + if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { + return err + } + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + return nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/archive/archive_windows.go b/vendor/src/github.com/containers/storage/pkg/archive/archive_windows.go new file mode 100644 index 00000000..828d3b9d --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/archive/archive_windows.go @@ -0,0 +1,70 @@ +// +build windows + +package archive + +import ( + "archive/tar" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/containers/storage/pkg/longpath" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return longpath.AddPrefix(srcPath) +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. +func getWalkRoot(srcPath string, include string) string { + return filepath.Join(srcPath, include) +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + // windows: convert windows style relative path with backslashes + // into forward slashes. Since windows does not allow '/' or '\' + // in file names, it is mostly safe to replace however we must + // check just in case + if strings.Contains(p, "/") { + return "", fmt.Errorf("Windows path contains forward slash: %s", p) + } + return strings.Replace(p, string(os.PathSeparator), "/", -1), nil + +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + perm &= 0755 + // Add the x bit: make everything +x from windows + perm |= 0111 + + return perm +} + +func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { + // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows + return +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + return nil +} + +func getFileUIDGID(stat interface{}) (int, int, error) { + // no notion of file ownership mapping yet on Windows + return 0, 0, nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/archive/changes.go b/vendor/src/github.com/containers/storage/pkg/archive/changes.go new file mode 100644 index 00000000..234b0a68 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/archive/changes.go @@ -0,0 +1,446 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/pools" + "github.com/containers/storage/pkg/system" +) + +// ChangeType represents the change type. +type ChangeType int + +const ( + // ChangeModify represents the modify operation. + ChangeModify = iota + // ChangeAdd represents the add operation. + ChangeAdd + // ChangeDelete represents the delete operation. + ChangeDelete +) + +func (c ChangeType) String() string { + switch c { + case ChangeModify: + return "C" + case ChangeAdd: + return "A" + case ChangeDelete: + return "D" + } + return "" +} + +// Change represents a change, it wraps the change type and path. +// It describes changes of the files in the path respect to the +// parent layers. The change could be modify, add, delete. +// This is used for layer diff. +type Change struct { + Path string + Kind ChangeType +} + +func (change *Change) String() string { + return fmt.Sprintf("%s %s", change.Kind, change.Path) +} + +// for sort.Sort +type changesByPath []Change + +func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } +func (c changesByPath) Len() int { return len(c) } +func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } + +// Gnu tar and the go tar writer don't have sub-second mtime +// precision, which is problematic when we apply changes via tar +// files, we handle this by comparing for exact times, *or* same +// second count and either a or b having exactly 0 nanoseconds +func sameFsTime(a, b time.Time) bool { + return a == b || + (a.Unix() == b.Unix() && + (a.Nanosecond() == 0 || b.Nanosecond() == 0)) +} + +func sameFsTimeSpec(a, b syscall.Timespec) bool { + return a.Sec == b.Sec && + (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) +} + +// Changes walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func Changes(layers []string, rw string) ([]Change, error) { + return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip) +} + +func aufsMetadataSkip(path string) (skip bool, err error) { + skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) + if err != nil { + skip = true + } + return +} + +func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { + f := filepath.Base(path) + + // If there is a whiteout, then the file was removed + if strings.HasPrefix(f, WhiteoutPrefix) { + originalFile := f[len(WhiteoutPrefix):] + return filepath.Join(filepath.Dir(path), originalFile), nil + } + + return "", nil +} + +type skipChange func(string) (bool, error) +type deleteChange func(string, string, os.FileInfo) (string, error) + +func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) { + var ( + changes []Change + changedDirs = make(map[string]struct{}) + ) + + err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(rw, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + if sc != nil { + if skip, err := sc(path); skip { + return err + } + } + + change := Change{ + Path: path, + } + + deletedFile, err := dc(rw, path, f) + if err != nil { + return err + } + + // Find out what kind of modification happened + if deletedFile != "" { + change.Path = deletedFile + change.Kind = ChangeDelete + } else { + // Otherwise, the file was added + change.Kind = ChangeAdd + + // ...Unless it already existed in a top layer, in which case, it's a modification + for _, layer := range layers { + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // The file existed in the top layer, so that's a modification + + // However, if it's a directory, maybe it wasn't actually modified. + // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar + if stat.IsDir() && f.IsDir() { + if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { + // Both directories are the same, don't record the change + return nil + } + } + change.Kind = ChangeModify + break + } + } + } + + // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. + // This block is here to ensure the change is recorded even if the + // modify time, mode and size of the parent directory in the rw and ro layers are all equal. + // Check https://github.com/docker/docker/pull/13590 for details. + if f.IsDir() { + changedDirs[path] = struct{}{} + } + if change.Kind == ChangeAdd || change.Kind == ChangeDelete { + parent := filepath.Dir(path) + if _, ok := changedDirs[parent]; !ok && parent != "/" { + changes = append(changes, Change{Path: parent, Kind: ChangeModify}) + changedDirs[parent] = struct{}{} + } + } + + // Record change + changes = append(changes, change) + return nil + }) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + return changes, nil +} + +// FileInfo describes the information of a file. +type FileInfo struct { + parent *FileInfo + name string + stat *system.StatT + children map[string]*FileInfo + capability []byte + added bool +} + +// LookUp looks up the file information of a file. +func (info *FileInfo) LookUp(path string) *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + parent := info + if path == string(os.PathSeparator) { + return info + } + + pathElements := strings.Split(path, string(os.PathSeparator)) + for _, elem := range pathElements { + if elem != "" { + child := parent.children[elem] + if child == nil { + return nil + } + parent = child + } + } + return parent +} + +func (info *FileInfo) path() string { + if info.parent == nil { + // As this runs on the daemon side, file paths are OS specific. + return string(os.PathSeparator) + } + return filepath.Join(info.parent.path(), info.name) +} + +func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { + + sizeAtEntry := len(*changes) + + if oldInfo == nil { + // add + change := Change{ + Path: info.path(), + Kind: ChangeAdd, + } + *changes = append(*changes, change) + info.added = true + } + + // We make a copy so we can modify it to detect additions + // also, we only recurse on the old dir if the new info is a directory + // otherwise any previous delete/change is considered recursive + oldChildren := make(map[string]*FileInfo) + if oldInfo != nil && info.isDir() { + for k, v := range oldInfo.children { + oldChildren[k] = v + } + } + + for name, newChild := range info.children { + oldChild, _ := oldChildren[name] + if oldChild != nil { + // change? + oldStat := oldChild.stat + newStat := newChild.stat + // Note: We can't compare inode or ctime or blocksize here, because these change + // when copying a file into a container. However, that is not generally a problem + // because any content change will change mtime, and any status change should + // be visible when actually comparing the stat fields. The only time this + // breaks down is if some code intentionally hides a change by setting + // back mtime + if statDifferent(oldStat, newStat) || + bytes.Compare(oldChild.capability, newChild.capability) != 0 { + change := Change{ + Path: newChild.path(), + Kind: ChangeModify, + } + *changes = append(*changes, change) + newChild.added = true + } + + // Remove from copy so we can detect deletions + delete(oldChildren, name) + } + + newChild.addChanges(oldChild, changes) + } + for _, oldChild := range oldChildren { + // delete + change := Change{ + Path: oldChild.path(), + Kind: ChangeDelete, + } + *changes = append(*changes, change) + } + + // If there were changes inside this directory, we need to add it, even if the directory + // itself wasn't changed. This is needed to properly save and restore filesystem permissions. + // As this runs on the daemon side, file paths are OS specific. + if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { + change := Change{ + Path: info.path(), + Kind: ChangeModify, + } + // Let's insert the directory entry before the recently added entries located inside this dir + *changes = append(*changes, change) // just to resize the slice, will be overwritten + copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) + (*changes)[sizeAtEntry] = change + } + +} + +// Changes add changes to file information. +func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { + var changes []Change + + info.addChanges(oldInfo, &changes) + + return changes +} + +func newRootFileInfo() *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + root := &FileInfo{ + name: string(os.PathSeparator), + children: make(map[string]*FileInfo), + } + return root +} + +// ChangesDirs compares two directories and generates an array of Change objects describing the changes. +// If oldDir is "", then all files in newDir will be Add-Changes. +func ChangesDirs(newDir, oldDir string) ([]Change, error) { + var ( + oldRoot, newRoot *FileInfo + ) + if oldDir == "" { + emptyDir, err := ioutil.TempDir("", "empty") + if err != nil { + return nil, err + } + defer os.Remove(emptyDir) + oldDir = emptyDir + } + oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) + if err != nil { + return nil, err + } + + return newRoot.Changes(oldRoot), nil +} + +// ChangesSize calculates the size in bytes of the provided changes, based on newDir. +func ChangesSize(newDir string, changes []Change) int64 { + var ( + size int64 + sf = make(map[uint64]struct{}) + ) + for _, change := range changes { + if change.Kind == ChangeModify || change.Kind == ChangeAdd { + file := filepath.Join(newDir, change.Path) + fileInfo, err := os.Lstat(file) + if err != nil { + logrus.Errorf("Can not stat %q: %s", file, err) + continue + } + + if fileInfo != nil && !fileInfo.IsDir() { + if hasHardlinks(fileInfo) { + inode := getIno(fileInfo) + if _, ok := sf[inode]; !ok { + size += fileInfo.Size() + sf[inode] = struct{}{} + } + } else { + size += fileInfo.Size() + } + } + } + } + return size +} + +// ExportChanges produces an Archive from the provided changes, relative to dir. +func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (Archive, error) { + reader, writer := io.Pipe() + go func() { + ta := &tarAppender{ + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + SeenFiles: make(map[uint64]string), + UIDMaps: uidMaps, + GIDMaps: gidMaps, + } + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + sort.Sort(changesByPath(changes)) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + for _, change := range changes { + if change.Kind == ChangeDelete { + whiteOutDir := filepath.Dir(change.Path) + whiteOutBase := filepath.Base(change.Path) + whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) + timestamp := time.Now() + hdr := &tar.Header{ + Name: whiteOut[1:], + Size: 0, + ModTime: timestamp, + AccessTime: timestamp, + ChangeTime: timestamp, + } + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + logrus.Debugf("Can't write whiteout header: %s", err) + } + } else { + path := filepath.Join(dir, change.Path) + if err := ta.addTarFile(path, change.Path[1:]); err != nil { + logrus.Debugf("Can't add file %s to tar: %s", path, err) + } + } + } + + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Debugf("Can't close layer: %s", err) + } + if err := writer.Close(); err != nil { + logrus.Debugf("failed close Changes writer: %s", err) + } + }() + return reader, nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/archive/changes_linux.go b/vendor/src/github.com/containers/storage/pkg/archive/changes_linux.go new file mode 100644 index 00000000..798d7bfc --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/archive/changes_linux.go @@ -0,0 +1,312 @@ +package archive + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "sort" + "syscall" + "unsafe" + + "github.com/containers/storage/pkg/system" +) + +// walker is used to implement collectFileInfoForChanges on linux. Where this +// method in general returns the entire contents of two directory trees, we +// optimize some FS calls out on linux. In particular, we take advantage of the +// fact that getdents(2) returns the inode of each file in the directory being +// walked, which, when walking two trees in parallel to generate a list of +// changes, can be used to prune subtrees without ever having to lstat(2) them +// directly. Eliminating stat calls in this way can save up to seconds on large +// images. +type walker struct { + dir1 string + dir2 string + root1 *FileInfo + root2 *FileInfo +} + +// collectFileInfoForChanges returns a complete representation of the trees +// rooted at dir1 and dir2, with one important exception: any subtree or +// leaf where the inode and device numbers are an exact match between dir1 +// and dir2 will be pruned from the results. This method is *only* to be used +// to generating a list of changes between the two directories, as it does not +// reflect the full contents. +func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { + w := &walker{ + dir1: dir1, + dir2: dir2, + root1: newRootFileInfo(), + root2: newRootFileInfo(), + } + + i1, err := os.Lstat(w.dir1) + if err != nil { + return nil, nil, err + } + i2, err := os.Lstat(w.dir2) + if err != nil { + return nil, nil, err + } + + if err := w.walk("/", i1, i2); err != nil { + return nil, nil, err + } + + return w.root1, w.root2, nil +} + +// Given a FileInfo, its path info, and a reference to the root of the tree +// being constructed, register this file with the tree. +func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { + if fi == nil { + return nil + } + parent := root.LookUp(filepath.Dir(path)) + if parent == nil { + return fmt.Errorf("collectFileInfoForChanges: Unexpectedly no parent for %s", path) + } + info := &FileInfo{ + name: filepath.Base(path), + children: make(map[string]*FileInfo), + parent: parent, + } + cpath := filepath.Join(dir, path) + stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) + if err != nil { + return err + } + info.stat = stat + info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access + parent.children[info.name] = info + return nil +} + +// Walk a subtree rooted at the same path in both trees being iterated. For +// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d +func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { + // Register these nodes with the return trees, unless we're still at the + // (already-created) roots: + if path != "/" { + if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { + return err + } + if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { + return err + } + } + + is1Dir := i1 != nil && i1.IsDir() + is2Dir := i2 != nil && i2.IsDir() + + sameDevice := false + if i1 != nil && i2 != nil { + si1 := i1.Sys().(*syscall.Stat_t) + si2 := i2.Sys().(*syscall.Stat_t) + if si1.Dev == si2.Dev { + sameDevice = true + } + } + + // If these files are both non-existent, or leaves (non-dirs), we are done. + if !is1Dir && !is2Dir { + return nil + } + + // Fetch the names of all the files contained in both directories being walked: + var names1, names2 []nameIno + if is1Dir { + names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access + if err != nil { + return err + } + } + if is2Dir { + names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access + if err != nil { + return err + } + } + + // We have lists of the files contained in both parallel directories, sorted + // in the same order. Walk them in parallel, generating a unique merged list + // of all items present in either or both directories. + var names []string + ix1 := 0 + ix2 := 0 + + for { + if ix1 >= len(names1) { + break + } + if ix2 >= len(names2) { + break + } + + ni1 := names1[ix1] + ni2 := names2[ix2] + + switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { + case -1: // ni1 < ni2 -- advance ni1 + // we will not encounter ni1 in names2 + names = append(names, ni1.name) + ix1++ + case 0: // ni1 == ni2 + if ni1.ino != ni2.ino || !sameDevice { + names = append(names, ni1.name) + } + ix1++ + ix2++ + case 1: // ni1 > ni2 -- advance ni2 + // we will not encounter ni2 in names1 + names = append(names, ni2.name) + ix2++ + } + } + for ix1 < len(names1) { + names = append(names, names1[ix1].name) + ix1++ + } + for ix2 < len(names2) { + names = append(names, names2[ix2].name) + ix2++ + } + + // For each of the names present in either or both of the directories being + // iterated, stat the name under each root, and recurse the pair of them: + for _, name := range names { + fname := filepath.Join(path, name) + var cInfo1, cInfo2 os.FileInfo + if is1Dir { + cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if is2Dir { + cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if err = w.walk(fname, cInfo1, cInfo2); err != nil { + return err + } + } + return nil +} + +// {name,inode} pairs used to support the early-pruning logic of the walker type +type nameIno struct { + name string + ino uint64 +} + +type nameInoSlice []nameIno + +func (s nameInoSlice) Len() int { return len(s) } +func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } + +// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode +// numbers further up the stack when reading directory contents. Unlike +// os.Readdirnames, which returns a list of filenames, this function returns a +// list of {filename,inode} pairs. +func readdirnames(dirname string) (names []nameIno, err error) { + var ( + size = 100 + buf = make([]byte, 4096) + nbuf int + bufp int + nb int + ) + + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + names = make([]nameIno, 0, size) // Empty with room to grow. + for { + // Refill the buffer if necessary + if bufp >= nbuf { + bufp = 0 + nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux + if nbuf < 0 { + nbuf = 0 + } + if err != nil { + return nil, os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + break // EOF + } + } + + // Drain the buffer + nb, names = parseDirent(buf[bufp:nbuf], names) + bufp += nb + } + + sl := nameInoSlice(names) + sort.Sort(sl) + return sl, nil +} + +// parseDirent is a minor modification of syscall.ParseDirent (linux version) +// which returns {name,inode} pairs instead of just names. +func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { + origlen := len(buf) + for len(buf) > 0 { + dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0])) + buf = buf[dirent.Reclen:] + if dirent.Ino == 0 { // File absent in directory. + continue + } + bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) + var name = string(bytes[0:clen(bytes[:])]) + if name == "." || name == ".." { // Useless names + continue + } + names = append(names, nameIno{name, dirent.Ino}) + } + return origlen - len(buf), names +} + +func clen(n []byte) int { + for i := 0; i < len(n); i++ { + if n[i] == 0 { + return i + } + } + return len(n) +} + +// OverlayChanges walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func OverlayChanges(layers []string, rw string) ([]Change, error) { + return changes(layers, rw, overlayDeletedFile, nil) +} + +func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) { + if fi.Mode()&os.ModeCharDevice != 0 { + s := fi.Sys().(*syscall.Stat_t) + if major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 { + return path, nil + } + } + if fi.Mode()&os.ModeDir != 0 { + opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque") + if err != nil { + return "", err + } + if opaque != nil && len(opaque) == 1 && opaque[0] == 'y' { + return path, nil + } + } + + return "", nil + +} diff --git a/vendor/src/github.com/containers/storage/pkg/archive/changes_other.go b/vendor/src/github.com/containers/storage/pkg/archive/changes_other.go new file mode 100644 index 00000000..e1d1e7a9 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/archive/changes_other.go @@ -0,0 +1,97 @@ +// +build !linux + +package archive + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/containers/storage/pkg/system" +) + +func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { + var ( + oldRoot, newRoot *FileInfo + err1, err2 error + errs = make(chan error, 2) + ) + go func() { + oldRoot, err1 = collectFileInfo(oldDir) + errs <- err1 + }() + go func() { + newRoot, err2 = collectFileInfo(newDir) + errs <- err2 + }() + + // block until both routines have returned + for i := 0; i < 2; i++ { + if err := <-errs; err != nil { + return nil, nil, err + } + } + + return oldRoot, newRoot, nil +} + +func collectFileInfo(sourceDir string) (*FileInfo, error) { + root := newRootFileInfo() + + err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + relPath = filepath.Join(string(os.PathSeparator), relPath) + + // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. + // Temporary workaround. If the returned path starts with two backslashes, + // trim it down to a single backslash. Only relevant on Windows. + if runtime.GOOS == "windows" { + if strings.HasPrefix(relPath, `\\`) { + relPath = relPath[1:] + } + } + + if relPath == string(os.PathSeparator) { + return nil + } + + parent := root.LookUp(filepath.Dir(relPath)) + if parent == nil { + return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) + } + + info := &FileInfo{ + name: filepath.Base(relPath), + children: make(map[string]*FileInfo), + parent: parent, + } + + s, err := system.Lstat(path) + if err != nil { + return err + } + info.stat = s + + info.capability, _ = system.Lgetxattr(path, "security.capability") + + parent.children[info.name] = info + + return nil + }) + if err != nil { + return nil, err + } + return root, nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/archive/changes_unix.go b/vendor/src/github.com/containers/storage/pkg/archive/changes_unix.go new file mode 100644 index 00000000..43dd94e2 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/archive/changes_unix.go @@ -0,0 +1,36 @@ +// +build !windows + +package archive + +import ( + "os" + "syscall" + + "github.com/containers/storage/pkg/system" +) + +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + // Don't look at size for dirs, its not a good measure of change + if oldStat.Mode() != newStat.Mode() || + oldStat.UID() != newStat.UID() || + oldStat.GID() != newStat.GID() || + oldStat.Rdev() != newStat.Rdev() || + // Don't look at size for dirs, its not a good measure of change + (oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR && + (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0 +} + +func getIno(fi os.FileInfo) uint64 { + return uint64(fi.Sys().(*syscall.Stat_t).Ino) +} + +func hasHardlinks(fi os.FileInfo) bool { + return fi.Sys().(*syscall.Stat_t).Nlink > 1 +} diff --git a/vendor/src/github.com/containers/storage/pkg/archive/changes_windows.go b/vendor/src/github.com/containers/storage/pkg/archive/changes_windows.go new file mode 100644 index 00000000..06eadd66 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/archive/changes_windows.go @@ -0,0 +1,30 @@ +package archive + +import ( + "os" + + "github.com/containers/storage/pkg/system" +) + +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + + // Don't look at size for dirs, its not a good measure of change + if oldStat.ModTime() != newStat.ModTime() || + oldStat.Mode() != newStat.Mode() || + oldStat.Size() != newStat.Size() && !oldStat.IsDir() { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.IsDir() +} + +func getIno(fi os.FileInfo) (inode uint64) { + return +} + +func hasHardlinks(fi os.FileInfo) bool { + return false +} diff --git a/vendor/src/github.com/containers/storage/pkg/archive/copy.go b/vendor/src/github.com/containers/storage/pkg/archive/copy.go new file mode 100644 index 00000000..05c243d3 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/archive/copy.go @@ -0,0 +1,458 @@ +package archive + +import ( + "archive/tar" + "errors" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/containers/storage/pkg/system" +) + +// Errors used or returned by this file. +var ( + ErrNotDirectory = errors.New("not a directory") + ErrDirNotExists = errors.New("no such directory") + ErrCannotCopyDir = errors.New("cannot copy directory") + ErrInvalidCopySource = errors.New("invalid copy source content") +) + +// PreserveTrailingDotOrSeparator returns the given cleaned path (after +// processing using any utility functions from the path or filepath stdlib +// packages) and appends a trailing `/.` or `/` if its corresponding original +// path (from before being processed by utility functions from the path or +// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned +// path already ends in a `.` path segment, then another is not added. If the +// clean path already ends in a path separator, then another is not added. +func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string { + // Ensure paths are in platform semantics + cleanedPath = normalizePath(cleanedPath) + originalPath = normalizePath(originalPath) + + if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { + if !hasTrailingPathSeparator(cleanedPath) { + // Add a separator if it doesn't already end with one (a cleaned + // path would only end in a separator if it is the root). + cleanedPath += string(filepath.Separator) + } + cleanedPath += "." + } + + if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) { + cleanedPath += string(filepath.Separator) + } + + return cleanedPath +} + +// assertsDirectory returns whether the given path is +// asserted to be a directory, i.e., the path ends with +// a trailing '/' or `/.`, assuming a path separator of `/`. +func assertsDirectory(path string) bool { + return hasTrailingPathSeparator(path) || specifiesCurrentDir(path) +} + +// hasTrailingPathSeparator returns whether the given +// path ends with the system's path separator character. +func hasTrailingPathSeparator(path string) bool { + return len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) +} + +// specifiesCurrentDir returns whether the given path specifies +// a "current directory", i.e., the last path segment is `.`. +func specifiesCurrentDir(path string) bool { + return filepath.Base(path) == "." +} + +// SplitPathDirEntry splits the given path between its directory name and its +// basename by first cleaning the path but preserves a trailing "." if the +// original path specified the current directory. +func SplitPathDirEntry(path string) (dir, base string) { + cleanedPath := filepath.Clean(normalizePath(path)) + + if specifiesCurrentDir(path) { + cleanedPath += string(filepath.Separator) + "." + } + + return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) +} + +// TarResource archives the resource described by the given CopyInfo to a Tar +// archive. A non-nil error is returned if sourcePath does not exist or is +// asserted to be a directory but exists as another type of file. +// +// This function acts as a convenient wrapper around TarWithOptions, which +// requires a directory as the source path. TarResource accepts either a +// directory or a file path and correctly sets the Tar options. +func TarResource(sourceInfo CopyInfo) (content Archive, err error) { + return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) +} + +// TarResourceRebase is like TarResource but renames the first path element of +// items in the resulting tar archive to match the given rebaseName if not "". +func TarResourceRebase(sourcePath, rebaseName string) (content Archive, err error) { + sourcePath = normalizePath(sourcePath) + if _, err = os.Lstat(sourcePath); err != nil { + // Catches the case where the source does not exist or is not a + // directory if asserted to be a directory, as this also causes an + // error. + return + } + + // Separate the source path between it's directory and + // the entry in that directory which we are archiving. + sourceDir, sourceBase := SplitPathDirEntry(sourcePath) + + filter := []string{sourceBase} + + logrus.Debugf("copying %q from %q", sourceBase, sourceDir) + + return TarWithOptions(sourceDir, &TarOptions{ + Compression: Uncompressed, + IncludeFiles: filter, + IncludeSourceDir: true, + RebaseNames: map[string]string{ + sourceBase: rebaseName, + }, + }) +} + +// CopyInfo holds basic info about the source +// or destination path of a copy operation. +type CopyInfo struct { + Path string + Exists bool + IsDir bool + RebaseName string +} + +// CopyInfoSourcePath stats the given path to create a CopyInfo +// struct representing that resource for the source of an archive copy +// operation. The given path should be an absolute local path. A source path +// has all symlinks evaluated that appear before the last path separator ("/" +// on Unix). As it is to be a copy source, the path must exist. +func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { + // normalize the file path and then evaluate the symbol link + // we will use the target file instead of the symbol link if + // followLink is set + path = normalizePath(path) + + resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) + if err != nil { + return CopyInfo{}, err + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return CopyInfo{}, err + } + + return CopyInfo{ + Path: resolvedPath, + Exists: true, + IsDir: stat.IsDir(), + RebaseName: rebaseName, + }, nil +} + +// CopyInfoDestinationPath stats the given path to create a CopyInfo +// struct representing that resource for the destination of an archive copy +// operation. The given path should be an absolute local path. +func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { + maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. + path = normalizePath(path) + originalPath := path + + stat, err := os.Lstat(path) + + if err == nil && stat.Mode()&os.ModeSymlink == 0 { + // The path exists and is not a symlink. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil + } + + // While the path is a symlink. + for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { + if n > maxSymlinkIter { + // Don't follow symlinks more than this arbitrary number of times. + return CopyInfo{}, errors.New("too many symlinks in " + originalPath) + } + + // The path is a symbolic link. We need to evaluate it so that the + // destination of the copy operation is the link target and not the + // link itself. This is notably different than CopyInfoSourcePath which + // only evaluates symlinks before the last appearing path separator. + // Also note that it is okay if the last path element is a broken + // symlink as the copy operation should create the target. + var linkTarget string + + linkTarget, err = os.Readlink(path) + if err != nil { + return CopyInfo{}, err + } + + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := SplitPathDirEntry(path) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + path = linkTarget + stat, err = os.Lstat(path) + } + + if err != nil { + // It's okay if the destination path doesn't exist. We can still + // continue the copy operation if the parent directory exists. + if !os.IsNotExist(err) { + return CopyInfo{}, err + } + + // Ensure destination parent dir exists. + dstParent, _ := SplitPathDirEntry(path) + + parentDirStat, err := os.Lstat(dstParent) + if err != nil { + return CopyInfo{}, err + } + if !parentDirStat.IsDir() { + return CopyInfo{}, ErrNotDirectory + } + + return CopyInfo{Path: path}, nil + } + + // The path exists after resolving symlinks. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil +} + +// PrepareArchiveCopy prepares the given srcContent archive, which should +// contain the archived resource described by srcInfo, to the destination +// described by dstInfo. Returns the possibly modified content archive along +// with the path to the destination directory which it should be extracted to. +func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) { + // Ensure in platform semantics + srcInfo.Path = normalizePath(srcInfo.Path) + dstInfo.Path = normalizePath(dstInfo.Path) + + // Separate the destination path between its directory and base + // components in case the source archive contents need to be rebased. + dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) + _, srcBase := SplitPathDirEntry(srcInfo.Path) + + switch { + case dstInfo.Exists && dstInfo.IsDir: + // The destination exists as a directory. No alteration + // to srcContent is needed as its contents can be + // simply extracted to the destination directory. + return dstInfo.Path, ioutil.NopCloser(srcContent), nil + case dstInfo.Exists && srcInfo.IsDir: + // The destination exists as some type of file and the source + // content is a directory. This is an error condition since + // you cannot copy a directory to an existing file location. + return "", nil, ErrCannotCopyDir + case dstInfo.Exists: + // The destination exists as some type of file and the source content + // is also a file. The source content entry will have to be renamed to + // have a basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case srcInfo.IsDir: + // The destination does not exist and the source content is an archive + // of a directory. The archive should be extracted to the parent of + // the destination path instead, and when it is, the directory that is + // created as a result should take the name of the destination path. + // The source content entries will have to be renamed to have a + // basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case assertsDirectory(dstInfo.Path): + // The destination does not exist and is asserted to be created as a + // directory, but the source content is not a directory. This is an + // error condition since you cannot create a directory from a file + // source. + return "", nil, ErrDirNotExists + default: + // The last remaining case is when the destination does not exist, is + // not asserted to be a directory, and the source content is not an + // archive of a directory. It this case, the destination file will need + // to be created when the archive is extracted and the source content + // entry will have to be renamed to have a basename which matches the + // destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + } + +} + +// RebaseArchiveEntries rewrites the given srcContent archive replacing +// an occurrence of oldBase with newBase at the beginning of entry names. +func RebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive { + if oldBase == string(os.PathSeparator) { + // If oldBase specifies the root directory, use an empty string as + // oldBase instead so that newBase doesn't replace the path separator + // that all paths will start with. + oldBase = "" + } + + rebased, w := io.Pipe() + + go func() { + srcTar := tar.NewReader(srcContent) + rebasedTar := tar.NewWriter(w) + + for { + hdr, err := srcTar.Next() + if err == io.EOF { + // Signals end of archive. + rebasedTar.Close() + w.Close() + return + } + if err != nil { + w.CloseWithError(err) + return + } + + hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) + + if err = rebasedTar.WriteHeader(hdr); err != nil { + w.CloseWithError(err) + return + } + + if _, err = io.Copy(rebasedTar, srcTar); err != nil { + w.CloseWithError(err) + return + } + } + }() + + return rebased +} + +// CopyResource performs an archive copy from the given source path to the +// given destination path. The source path MUST exist and the destination +// path's parent directory must exist. +func CopyResource(srcPath, dstPath string, followLink bool) error { + var ( + srcInfo CopyInfo + err error + ) + + // Ensure in platform semantics + srcPath = normalizePath(srcPath) + dstPath = normalizePath(dstPath) + + // Clean the source and destination paths. + srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) + dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) + + if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { + return err + } + + content, err := TarResource(srcInfo) + if err != nil { + return err + } + defer content.Close() + + return CopyTo(content, srcInfo, dstPath) +} + +// CopyTo handles extracting the given content whose +// entries should be sourced from srcInfo to dstPath. +func CopyTo(content Reader, srcInfo CopyInfo, dstPath string) error { + // The destination path need not exist, but CopyInfoDestinationPath will + // ensure that at least the parent directory exists. + dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) + if err != nil { + return err + } + + dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) + if err != nil { + return err + } + defer copyArchive.Close() + + options := &TarOptions{ + NoLchown: true, + NoOverwriteDirNonDir: true, + } + + return Untar(copyArchive, dstDir, options) +} + +// ResolveHostSourcePath decides real path need to be copied with parameters such as +// whether to follow symbol link or not, if followLink is true, resolvedPath will return +// link target of any symbol link file, else it will only resolve symlink of directory +// but return symbol link file itself without resolving. +func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { + if followLink { + resolvedPath, err = filepath.EvalSymlinks(path) + if err != nil { + return + } + + resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) + } else { + dirPath, basePath := filepath.Split(path) + + // if not follow symbol link, then resolve symbol link of parent dir + var resolvedDirPath string + resolvedDirPath, err = filepath.EvalSymlinks(dirPath) + if err != nil { + return + } + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) { + rebaseName = filepath.Base(path) + } + } + return resolvedPath, rebaseName, nil +} + +// GetRebaseName normalizes and compares path and resolvedPath, +// return completed resolved path and rebased file name +func GetRebaseName(path, resolvedPath string) (string, string) { + // linkTarget will have been cleaned (no trailing path separators and dot) so + // we can manually join it with them + var rebaseName string + if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) { + resolvedPath += string(filepath.Separator) + "." + } + + if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) { + resolvedPath += string(filepath.Separator) + } + + if filepath.Base(path) != filepath.Base(resolvedPath) { + // In the case where the path had a trailing separator and a symlink + // evaluation has changed the last path component, we will need to + // rebase the name in the archive that is being copied to match the + // originally requested name. + rebaseName = filepath.Base(path) + } + return resolvedPath, rebaseName +} diff --git a/vendor/src/github.com/containers/storage/pkg/archive/copy_unix.go b/vendor/src/github.com/containers/storage/pkg/archive/copy_unix.go new file mode 100644 index 00000000..e305b5e4 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/archive/copy_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.ToSlash(path) +} diff --git a/vendor/src/github.com/containers/storage/pkg/archive/copy_windows.go b/vendor/src/github.com/containers/storage/pkg/archive/copy_windows.go new file mode 100644 index 00000000..2b775b45 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/archive/copy_windows.go @@ -0,0 +1,9 @@ +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.FromSlash(path) +} diff --git a/vendor/src/github.com/containers/storage/pkg/archive/diff.go b/vendor/src/github.com/containers/storage/pkg/archive/diff.go new file mode 100644 index 00000000..eba71738 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/archive/diff.go @@ -0,0 +1,279 @@ +package archive + +import ( + "archive/tar" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/pools" + "github.com/containers/storage/pkg/system" +) + +// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, err error) { + tr := tar.NewReader(layer) + trBuf := pools.BufioReader32KPool.Get(tr) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + unpackedPaths := make(map[string]struct{}) + + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return 0, err + } + + aufsTempdir := "" + aufsHardlinks := make(map[string]*tar.Header) + + if options == nil { + options = &TarOptions{} + } + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return 0, err + } + + size += hdr.Size + + // Normalize name, for safety and for a simple is-root check + hdr.Name = filepath.Clean(hdr.Name) + + // Windows does not support filenames with colons in them. Ignore + // these files. This is not a problem though (although it might + // appear that it is). Let's suppose a client is running docker pull. + // The daemon it points to is Windows. Would it make sense for the + // client to be doing a docker pull Ubuntu for example (which has files + // with colons in the name under /usr/share/man/man3)? No, absolutely + // not as it would really only make sense that they were pulling a + // Windows image. However, for development, it is necessary to be able + // to pull Linux images which are in the repository. + // + // TODO Windows. Once the registry is aware of what images are Windows- + // specific or Linux-specific, this warning should be changed to an error + // to cater for the situation where someone does manage to upload a Linux + // image but have it tagged as Windows inadvertently. + if runtime.GOOS == "windows" { + if strings.Contains(hdr.Name, ":") { + logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) + continue + } + } + + // Note as these operations are platform specific, so must the slash be. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists. + // This happened in some tests where an image had a tarfile without any + // parent directories. + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = system.MkdirAll(parentPath, 0600) + if err != nil { + return 0, err + } + } + } + + // Skip AUFS metadata dirs + if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { + // Regular files inside /.wh..wh.plnk can be used as hardlink targets + // We don't want this directory, but we need the files in them so that + // such hardlinks can be resolved. + if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { + basename := filepath.Base(hdr.Name) + aufsHardlinks[basename] = hdr + if aufsTempdir == "" { + if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { + return 0, err + } + defer os.RemoveAll(aufsTempdir) + } + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil); err != nil { + return 0, err + } + } + + if hdr.Name != WhiteoutOpaqueDir { + continue + } + } + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return 0, err + } + + // Note as these operations are platform specific, so must the slash be. + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + base := filepath.Base(path) + + if strings.HasPrefix(base, WhiteoutPrefix) { + dir := filepath.Dir(path) + if base == WhiteoutOpaqueDir { + _, err := os.Lstat(dir) + if err != nil { + return 0, err + } + err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) { + err = nil // parent was deleted + } + return err + } + if path == dir { + return nil + } + if _, exists := unpackedPaths[path]; !exists { + err := os.RemoveAll(path) + return err + } + return nil + }) + if err != nil { + return 0, err + } + } else { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + if err := os.RemoveAll(originalPath); err != nil { + return 0, err + } + } + } else { + // If path exits we almost always just want to remove and replace it. + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return 0, err + } + } + } + + trBuf.Reset(tr) + srcData := io.Reader(trBuf) + srcHdr := hdr + + // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so + // we manually retarget these into the temporary files we extracted them into + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { + linkBasename := filepath.Base(hdr.Linkname) + srcHdr = aufsHardlinks[linkBasename] + if srcHdr == nil { + return 0, fmt.Errorf("Invalid aufs hardlink") + } + tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) + if err != nil { + return 0, err + } + defer tmpFile.Close() + srcData = tmpFile + } + + // if the options contain a uid & gid maps, convert header uid/gid + // entries using the maps such that lchown sets the proper mapped + // uid/gid after writing the file. We only perform this mapping if + // the file isn't already owned by the remapped root UID or GID, as + // that specific uid/gid has no mapping from container -> host, and + // those files already have the proper ownership for inside the + // container. + if srcHdr.Uid != remappedRootUID { + xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps) + if err != nil { + return 0, err + } + srcHdr.Uid = xUID + } + if srcHdr.Gid != remappedRootGID { + xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps) + if err != nil { + return 0, err + } + srcHdr.Gid = xGID + } + if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil { + return 0, err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + unpackedPaths[path] = struct{}{} + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return 0, err + } + } + + return size, nil +} + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer Reader) (int64, error) { + return applyLayerHandler(dest, layer, &TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer Reader, options *TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} + +// do the bulk load of ApplyLayer, but allow for not calling DecompressStream +func applyLayerHandler(dest string, layer Reader, options *TarOptions, decompress bool) (int64, error) { + dest = filepath.Clean(dest) + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + if err != nil { + return 0, err + } + defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform + + if decompress { + layer, err = DecompressStream(layer) + if err != nil { + return 0, err + } + } + return UnpackLayer(dest, layer, options) +} diff --git a/vendor/src/github.com/containers/storage/pkg/archive/example_changes.go b/vendor/src/github.com/containers/storage/pkg/archive/example_changes.go new file mode 100644 index 00000000..ef339446 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/archive/example_changes.go @@ -0,0 +1,97 @@ +// +build ignore + +// Simple tool to create an archive stream from an old and new directory +// +// By default it will stream the comparison of two temporary directories with junk files +package main + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path" + + "github.com/Sirupsen/logrus" + "github.com/containers/storage/pkg/archive" +) + +var ( + flDebug = flag.Bool("D", false, "debugging output") + flNewDir = flag.String("newdir", "", "") + flOldDir = flag.String("olddir", "", "") + log = logrus.New() +) + +func main() { + flag.Usage = func() { + fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") + fmt.Printf("%s [OPTIONS]\n", os.Args[0]) + flag.PrintDefaults() + } + flag.Parse() + log.Out = os.Stderr + if (len(os.Getenv("DEBUG")) > 0) || *flDebug { + logrus.SetLevel(logrus.DebugLevel) + } + var newDir, oldDir string + + if len(*flNewDir) == 0 { + var err error + newDir, err = ioutil.TempDir("", "docker-test-newDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(newDir) + if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { + log.Fatal(err) + } + } else { + newDir = *flNewDir + } + + if len(*flOldDir) == 0 { + oldDir, err := ioutil.TempDir("", "docker-test-oldDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(oldDir) + } else { + oldDir = *flOldDir + } + + changes, err := archive.ChangesDirs(newDir, oldDir) + if err != nil { + log.Fatal(err) + } + + a, err := archive.ExportChanges(newDir, changes) + if err != nil { + log.Fatal(err) + } + defer a.Close() + + i, err := io.Copy(os.Stdout, a) + if err != nil && err != io.EOF { + log.Fatal(err) + } + fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) +} + +func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + if makeLinks { + if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/archive/time_linux.go b/vendor/src/github.com/containers/storage/pkg/archive/time_linux.go new file mode 100644 index 00000000..3448569b --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/archive/time_linux.go @@ -0,0 +1,16 @@ +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + if time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = ((1 << 30) - 2) + return + } + return syscall.NsecToTimespec(time.UnixNano()) +} diff --git a/vendor/src/github.com/containers/storage/pkg/archive/time_unsupported.go b/vendor/src/github.com/containers/storage/pkg/archive/time_unsupported.go new file mode 100644 index 00000000..e85aac05 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/archive/time_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux + +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + nsec := int64(0) + if !time.IsZero() { + nsec = time.UnixNano() + } + return syscall.NsecToTimespec(nsec) +} diff --git a/vendor/src/github.com/containers/storage/pkg/archive/whiteouts.go b/vendor/src/github.com/containers/storage/pkg/archive/whiteouts.go new file mode 100644 index 00000000..d20478a1 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/archive/whiteouts.go @@ -0,0 +1,23 @@ +package archive + +// Whiteouts are files with a special meaning for the layered filesystem. +// Docker uses AUFS whiteout files inside exported archives. In other +// filesystems these files are generated/handled on tar creation/extraction. + +// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a +// filename this means that file has been removed from the base layer. +const WhiteoutPrefix = ".wh." + +// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not +// for removing an actual file. Normally these files are excluded from exported +// archives. +const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix + +// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other +// layers. Normally these should not go into exported archives and all changed +// hardlinks should be copied to the top layer. +const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" + +// WhiteoutOpaqueDir file means directory has been made opaque - meaning +// readdir calls to this directory do not follow to lower layers. +const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/vendor/src/github.com/containers/storage/pkg/archive/wrap.go b/vendor/src/github.com/containers/storage/pkg/archive/wrap.go new file mode 100644 index 00000000..dfb335c0 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/archive/wrap.go @@ -0,0 +1,59 @@ +package archive + +import ( + "archive/tar" + "bytes" + "io/ioutil" +) + +// Generate generates a new archive from the content provided +// as input. +// +// `files` is a sequence of path/content pairs. A new file is +// added to the archive for each pair. +// If the last pair is incomplete, the file is created with an +// empty content. For example: +// +// Generate("foo.txt", "hello world", "emptyfile") +// +// The above call will return an archive with 2 files: +// * ./foo.txt with content "hello world" +// * ./empty with empty content +// +// FIXME: stream content instead of buffering +// FIXME: specify permissions and other archive metadata +func Generate(input ...string) (Archive, error) { + files := parseStringPairs(input...) + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, file := range files { + name, content := file[0], file[1] + hdr := &tar.Header{ + Name: name, + Size: int64(len(content)), + } + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + if _, err := tw.Write([]byte(content)); err != nil { + return nil, err + } + } + if err := tw.Close(); err != nil { + return nil, err + } + return ioutil.NopCloser(buf), nil +} + +func parseStringPairs(input ...string) (output [][2]string) { + output = make([][2]string, 0, len(input)/2+1) + for i := 0; i < len(input); i += 2 { + var pair [2]string + pair[0] = input[i] + if i+1 < len(input) { + pair[1] = input[i+1] + } + output = append(output, pair) + } + return +} diff --git a/vendor/src/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/src/github.com/containers/storage/pkg/chrootarchive/archive.go new file mode 100644 index 00000000..649575c0 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/chrootarchive/archive.go @@ -0,0 +1,97 @@ +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" +) + +var chrootArchiver = &archive.Archiver{Untar: Untar} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error { + + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + if options == nil { + options = &archive.TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return err + } + + dest = filepath.Clean(dest) + if _, err := os.Stat(dest); os.IsNotExist(err) { + if err := idtools.MkdirAllNewAs(dest, 0755, rootUID, rootGID); err != nil { + return err + } + } + + r := ioutil.NopCloser(tarArchive) + if decompress { + decompressedArchive, err := archive.DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return invokeUnpack(r, dest, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func TarUntar(src, dst string) error { + return chrootArchiver.TarUntar(src, dst) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func CopyWithTar(src, dst string) error { + return chrootArchiver.CopyWithTar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +// +// If `dst` ends with a trailing slash '/' ('\' on Windows), the final +// destination path will be `dst/base(src)` or `dst\base(src)` +func CopyFileWithTar(src, dst string) (err error) { + return chrootArchiver.CopyFileWithTar(src, dst) +} + +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return chrootArchiver.UntarPath(src, dst) +} diff --git a/vendor/src/github.com/containers/storage/pkg/chrootarchive/archive_unix.go b/vendor/src/github.com/containers/storage/pkg/chrootarchive/archive_unix.go new file mode 100644 index 00000000..5b26a520 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/chrootarchive/archive_unix.go @@ -0,0 +1,86 @@ +// +build !windows + +package chrootarchive + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/reexec" +) + +// untar is the entry-point for docker-untar on re-exec. This is not used on +// Windows as it does not support chroot, hence no point sandboxing through +// chroot and rexec. +func untar() { + runtime.LockOSThread() + flag.Parse() + + var options *archive.TarOptions + + //read the options from the pipe "ExtraFiles" + if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil { + fatal(err) + } + + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + + if err := archive.Unpack(os.Stdin, "/", options); err != nil { + fatal(err) + } + // fully consume stdin in case it is zero padded + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error { + + // We can't pass a potentially large exclude list directly via cmd line + // because we easily overrun the kernel's max argument/environment size + // when the full image list is passed (e.g. when this is used by + // `docker load`). We will marshall the options via a pipe to the + // child + r, w, err := os.Pipe() + if err != nil { + return fmt.Errorf("Untar pipe failure: %v", err) + } + + cmd := reexec.Command("docker-untar", dest) + cmd.Stdin = decompressedArchive + + cmd.ExtraFiles = append(cmd.ExtraFiles, r) + output := bytes.NewBuffer(nil) + cmd.Stdout = output + cmd.Stderr = output + + if err := cmd.Start(); err != nil { + return fmt.Errorf("Untar error on re-exec cmd: %v", err) + } + //write the options to the pipe for the untar exec to read + if err := json.NewEncoder(w).Encode(options); err != nil { + return fmt.Errorf("Untar json encode to pipe failed: %v", err) + } + w.Close() + + if err := cmd.Wait(); err != nil { + // when `xz -d -c -q | docker-untar ...` failed on docker-untar side, + // we need to exhaust `xz`'s output, otherwise the `xz` side will be + // pending on write pipe forever + io.Copy(ioutil.Discard, decompressedArchive) + + return fmt.Errorf("Error processing tar file(%v): %s", err, output) + } + return nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/chrootarchive/archive_windows.go b/vendor/src/github.com/containers/storage/pkg/chrootarchive/archive_windows.go new file mode 100644 index 00000000..93fde422 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/chrootarchive/archive_windows.go @@ -0,0 +1,22 @@ +package chrootarchive + +import ( + "io" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/longpath" +) + +// chroot is not supported by Windows +func chroot(path string) error { + return nil +} + +func invokeUnpack(decompressedArchive io.ReadCloser, + dest string, + options *archive.TarOptions) error { + // Windows is different to Linux here because Windows does not support + // chroot. Hence there is no point sandboxing a chrooted process to + // do the unpack. We call inline instead within the daemon process. + return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options) +} diff --git a/vendor/src/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go b/vendor/src/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go new file mode 100644 index 00000000..54b5ff48 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go @@ -0,0 +1,103 @@ +package chrootarchive + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "syscall" + + "github.com/containers/storage/pkg/mount" +) + +// chroot on linux uses pivot_root instead of chroot +// pivot_root takes a new root and an old root. +// Old root must be a sub-dir of new root, it is where the current rootfs will reside after the call to pivot_root. +// New root is where the new rootfs is set to. +// Old root is removed after the call to pivot_root so it is no longer available under the new root. +// This is similar to how libcontainer sets up a container's rootfs +func chroot(path string) (err error) { + if err := syscall.Unshare(syscall.CLONE_NEWNS); err != nil { + return fmt.Errorf("Error creating mount namespace before pivot: %v", err) + } + + if err := mount.MakeRPrivate(path); err != nil { + return err + } + + // setup oldRoot for pivot_root + pivotDir, err := ioutil.TempDir(path, ".pivot_root") + if err != nil { + return fmt.Errorf("Error setting up pivot dir: %v", err) + } + + var mounted bool + defer func() { + if mounted { + // make sure pivotDir is not mounted before we try to remove it + if errCleanup := syscall.Unmount(pivotDir, syscall.MNT_DETACH); errCleanup != nil { + if err == nil { + err = errCleanup + } + return + } + } + + errCleanup := os.Remove(pivotDir) + // pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful + // because we already cleaned it up on failed pivot_root + if errCleanup != nil && !os.IsNotExist(errCleanup) { + errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup) + if err == nil { + err = errCleanup + } + } + + if errCleanup := syscall.Unmount("/", syscall.MNT_DETACH); errCleanup != nil { + if err == nil { + err = fmt.Errorf("error unmounting root: %v", errCleanup) + } + return + } + }() + + if err := syscall.PivotRoot(path, pivotDir); err != nil { + // If pivot fails, fall back to the normal chroot after cleaning up temp dir + if err := os.Remove(pivotDir); err != nil { + return fmt.Errorf("Error cleaning up after failed pivot: %v", err) + } + return realChroot(path) + } + mounted = true + + // This is the new path for where the old root (prior to the pivot) has been moved to + // This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction + pivotDir = filepath.Join("/", filepath.Base(pivotDir)) + + if err := syscall.Chdir("/"); err != nil { + return fmt.Errorf("Error changing to new root: %v", err) + } + + // Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host + if err := syscall.Mount("", pivotDir, "", syscall.MS_PRIVATE|syscall.MS_REC, ""); err != nil { + return fmt.Errorf("Error making old root private after pivot: %v", err) + } + + // Now unmount the old root so it's no longer visible from the new root + if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil { + return fmt.Errorf("Error while unmounting old root after pivot: %v", err) + } + mounted = false + + return nil +} + +func realChroot(path string) error { + if err := syscall.Chroot(path); err != nil { + return fmt.Errorf("Error after fallback to chroot: %v", err) + } + if err := syscall.Chdir("/"); err != nil { + return fmt.Errorf("Error changing to new root after chroot: %v", err) + } + return nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go b/vendor/src/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go new file mode 100644 index 00000000..16354bf6 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go @@ -0,0 +1,12 @@ +// +build !windows,!linux + +package chrootarchive + +import "syscall" + +func chroot(path string) error { + if err := syscall.Chroot(path); err != nil { + return err + } + return syscall.Chdir("/") +} diff --git a/vendor/src/github.com/containers/storage/pkg/chrootarchive/diff.go b/vendor/src/github.com/containers/storage/pkg/chrootarchive/diff.go new file mode 100644 index 00000000..377aeb9f --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/chrootarchive/diff.go @@ -0,0 +1,19 @@ +package chrootarchive + +import "github.com/containers/storage/pkg/archive" + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can only be +// uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer archive.Reader) (size int64, err error) { + return applyLayerHandler(dest, layer, &archive.TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer archive.Reader, options *archive.TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} diff --git a/vendor/src/github.com/containers/storage/pkg/chrootarchive/diff_unix.go b/vendor/src/github.com/containers/storage/pkg/chrootarchive/diff_unix.go new file mode 100644 index 00000000..23daa721 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/chrootarchive/diff_unix.go @@ -0,0 +1,120 @@ +//+build !windows + +package chrootarchive + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/reexec" + "github.com/containers/storage/pkg/system" +) + +type applyLayerResponse struct { + LayerSize int64 `json:"layerSize"` +} + +// applyLayer is the entry-point for docker-applylayer on re-exec. This is not +// used on Windows as it does not support chroot, hence no point sandboxing +// through chroot and rexec. +func applyLayer() { + + var ( + tmpDir = "" + err error + options *archive.TarOptions + ) + runtime.LockOSThread() + flag.Parse() + + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + defer system.Umask(oldmask) + if err != nil { + fatal(err) + } + + if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil { + fatal(err) + } + + if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil { + fatal(err) + } + + os.Setenv("TMPDIR", tmpDir) + size, err := archive.UnpackLayer("/", os.Stdin, options) + os.RemoveAll(tmpDir) + if err != nil { + fatal(err) + } + + encoder := json.NewEncoder(os.Stdout) + if err := encoder.Encode(applyLayerResponse{size}); err != nil { + fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err)) + } + + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer archive.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + if options == nil { + options = &archive.TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + data, err := json.Marshal(options) + if err != nil { + return 0, fmt.Errorf("ApplyLayer json encode: %v", err) + } + + cmd := reexec.Command("docker-applyLayer", dest) + cmd.Stdin = layer + cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) + + outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) + cmd.Stdout, cmd.Stderr = outBuf, errBuf + + if err = cmd.Run(); err != nil { + return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf) + } + + // Stdout should be a valid JSON struct representing an applyLayerResponse. + response := applyLayerResponse{} + decoder := json.NewDecoder(outBuf) + if err = decoder.Decode(&response); err != nil { + return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err) + } + + return response.LayerSize, nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/chrootarchive/diff_windows.go b/vendor/src/github.com/containers/storage/pkg/chrootarchive/diff_windows.go new file mode 100644 index 00000000..cfada9b1 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/chrootarchive/diff_windows.go @@ -0,0 +1,44 @@ +package chrootarchive + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/longpath" +) + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer archive.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + + // Ensure it is a Windows-style volume path + dest = longpath.AddPrefix(dest) + + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + + tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-docker-extract") + if err != nil { + return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err) + } + + s, err := archive.UnpackLayer(dest, layer, nil) + os.RemoveAll(tmpDir) + if err != nil { + return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s", err, dest) + } + + return s, nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/chrootarchive/init_unix.go b/vendor/src/github.com/containers/storage/pkg/chrootarchive/init_unix.go new file mode 100644 index 00000000..7b8a46f3 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/chrootarchive/init_unix.go @@ -0,0 +1,28 @@ +// +build !windows + +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/containers/storage/pkg/reexec" +) + +func init() { + reexec.Register("docker-applyLayer", applyLayer) + reexec.Register("docker-untar", untar) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} + +// flush consumes all the bytes from the reader discarding +// any errors +func flush(r io.Reader) (bytes int64, err error) { + return io.Copy(ioutil.Discard, r) +} diff --git a/vendor/src/github.com/containers/storage/pkg/chrootarchive/init_windows.go b/vendor/src/github.com/containers/storage/pkg/chrootarchive/init_windows.go new file mode 100644 index 00000000..fa17c9bf --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/chrootarchive/init_windows.go @@ -0,0 +1,4 @@ +package chrootarchive + +func init() { +} diff --git a/vendor/src/github.com/containers/storage/pkg/devicemapper/devmapper.go b/vendor/src/github.com/containers/storage/pkg/devicemapper/devmapper.go new file mode 100644 index 00000000..838b06af --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/devicemapper/devmapper.go @@ -0,0 +1,807 @@ +// +build linux + +package devicemapper + +import ( + "errors" + "fmt" + "os" + "runtime" + "syscall" + "unsafe" + + "github.com/Sirupsen/logrus" +) + +// DevmapperLogger defines methods for logging with devicemapper. +type DevmapperLogger interface { + DMLog(level int, file string, line int, dmError int, message string) +} + +const ( + deviceCreate TaskType = iota + deviceReload + deviceRemove + deviceRemoveAll + deviceSuspend + deviceResume + deviceInfo + deviceDeps + deviceRename + deviceVersion + deviceStatus + deviceTable + deviceWaitevent + deviceList + deviceClear + deviceMknodes + deviceListVersions + deviceTargetMsg + deviceSetGeometry +) + +const ( + addNodeOnResume AddNodeType = iota + addNodeOnCreate +) + +// List of errors returned when using devicemapper. +var ( + ErrTaskRun = errors.New("dm_task_run failed") + ErrTaskSetName = errors.New("dm_task_set_name failed") + ErrTaskSetMessage = errors.New("dm_task_set_message failed") + ErrTaskSetAddNode = errors.New("dm_task_set_add_node failed") + ErrTaskSetRo = errors.New("dm_task_set_ro failed") + ErrTaskAddTarget = errors.New("dm_task_add_target failed") + ErrTaskSetSector = errors.New("dm_task_set_sector failed") + ErrTaskGetDeps = errors.New("dm_task_get_deps failed") + ErrTaskGetInfo = errors.New("dm_task_get_info failed") + ErrTaskGetDriverVersion = errors.New("dm_task_get_driver_version failed") + ErrTaskDeferredRemove = errors.New("dm_task_deferred_remove failed") + ErrTaskSetCookie = errors.New("dm_task_set_cookie failed") + ErrNilCookie = errors.New("cookie ptr can't be nil") + ErrGetBlockSize = errors.New("Can't get block size") + ErrUdevWait = errors.New("wait on udev cookie failed") + ErrSetDevDir = errors.New("dm_set_dev_dir failed") + ErrGetLibraryVersion = errors.New("dm_get_library_version failed") + ErrCreateRemoveTask = errors.New("Can't create task of type deviceRemove") + ErrRunRemoveDevice = errors.New("running RemoveDevice failed") + ErrInvalidAddNode = errors.New("Invalid AddNode type") + ErrBusy = errors.New("Device is Busy") + ErrDeviceIDExists = errors.New("Device Id Exists") + ErrEnxio = errors.New("No such device or address") +) + +var ( + dmSawBusy bool + dmSawExist bool + dmSawEnxio bool // No Such Device or Address +) + +type ( + // Task represents a devicemapper task (like lvcreate, etc.) ; a task is needed for each ioctl + // command to execute. + Task struct { + unmanaged *cdmTask + } + // Deps represents dependents (layer) of a device. + Deps struct { + Count uint32 + Filler uint32 + Device []uint64 + } + // Info represents information about a device. + Info struct { + Exists int + Suspended int + LiveTable int + InactiveTable int + OpenCount int32 + EventNr uint32 + Major uint32 + Minor uint32 + ReadOnly int + TargetCount int32 + DeferredRemove int + } + // TaskType represents a type of task + TaskType int + // AddNodeType represents a type of node to be added + AddNodeType int +) + +// DeviceIDExists returns whether error conveys the information about device Id already +// exist or not. This will be true if device creation or snap creation +// operation fails if device or snap device already exists in pool. +// Current implementation is little crude as it scans the error string +// for exact pattern match. Replacing it with more robust implementation +// is desirable. +func DeviceIDExists(err error) bool { + return fmt.Sprint(err) == fmt.Sprint(ErrDeviceIDExists) +} + +func (t *Task) destroy() { + if t != nil { + DmTaskDestroy(t.unmanaged) + runtime.SetFinalizer(t, nil) + } +} + +// TaskCreateNamed is a convenience function for TaskCreate when a name +// will be set on the task as well +func TaskCreateNamed(t TaskType, name string) (*Task, error) { + task := TaskCreate(t) + if task == nil { + return nil, fmt.Errorf("devicemapper: Can't create task of type %d", int(t)) + } + if err := task.setName(name); err != nil { + return nil, fmt.Errorf("devicemapper: Can't set task name %s", name) + } + return task, nil +} + +// TaskCreate initializes a devicemapper task of tasktype +func TaskCreate(tasktype TaskType) *Task { + Ctask := DmTaskCreate(int(tasktype)) + if Ctask == nil { + return nil + } + task := &Task{unmanaged: Ctask} + runtime.SetFinalizer(task, (*Task).destroy) + return task +} + +func (t *Task) run() error { + if res := DmTaskRun(t.unmanaged); res != 1 { + return ErrTaskRun + } + return nil +} + +func (t *Task) setName(name string) error { + if res := DmTaskSetName(t.unmanaged, name); res != 1 { + return ErrTaskSetName + } + return nil +} + +func (t *Task) setMessage(message string) error { + if res := DmTaskSetMessage(t.unmanaged, message); res != 1 { + return ErrTaskSetMessage + } + return nil +} + +func (t *Task) setSector(sector uint64) error { + if res := DmTaskSetSector(t.unmanaged, sector); res != 1 { + return ErrTaskSetSector + } + return nil +} + +func (t *Task) setCookie(cookie *uint, flags uint16) error { + if cookie == nil { + return ErrNilCookie + } + if res := DmTaskSetCookie(t.unmanaged, cookie, flags); res != 1 { + return ErrTaskSetCookie + } + return nil +} + +func (t *Task) setAddNode(addNode AddNodeType) error { + if addNode != addNodeOnResume && addNode != addNodeOnCreate { + return ErrInvalidAddNode + } + if res := DmTaskSetAddNode(t.unmanaged, addNode); res != 1 { + return ErrTaskSetAddNode + } + return nil +} + +func (t *Task) setRo() error { + if res := DmTaskSetRo(t.unmanaged); res != 1 { + return ErrTaskSetRo + } + return nil +} + +func (t *Task) addTarget(start, size uint64, ttype, params string) error { + if res := DmTaskAddTarget(t.unmanaged, start, size, + ttype, params); res != 1 { + return ErrTaskAddTarget + } + return nil +} + +func (t *Task) getDeps() (*Deps, error) { + var deps *Deps + if deps = DmTaskGetDeps(t.unmanaged); deps == nil { + return nil, ErrTaskGetDeps + } + return deps, nil +} + +func (t *Task) getInfo() (*Info, error) { + info := &Info{} + if res := DmTaskGetInfo(t.unmanaged, info); res != 1 { + return nil, ErrTaskGetInfo + } + return info, nil +} + +func (t *Task) getInfoWithDeferred() (*Info, error) { + info := &Info{} + if res := DmTaskGetInfoWithDeferred(t.unmanaged, info); res != 1 { + return nil, ErrTaskGetInfo + } + return info, nil +} + +func (t *Task) getDriverVersion() (string, error) { + res := DmTaskGetDriverVersion(t.unmanaged) + if res == "" { + return "", ErrTaskGetDriverVersion + } + return res, nil +} + +func (t *Task) getNextTarget(next unsafe.Pointer) (nextPtr unsafe.Pointer, start uint64, + length uint64, targetType string, params string) { + + return DmGetNextTarget(t.unmanaged, next, &start, &length, + &targetType, ¶ms), + start, length, targetType, params +} + +// UdevWait waits for any processes that are waiting for udev to complete the specified cookie. +func UdevWait(cookie *uint) error { + if res := DmUdevWait(*cookie); res != 1 { + logrus.Debugf("devicemapper: Failed to wait on udev cookie %d", *cookie) + return ErrUdevWait + } + return nil +} + +// LogInitVerbose is an interface to initialize the verbose logger for the device mapper library. +func LogInitVerbose(level int) { + DmLogInitVerbose(level) +} + +var dmLogger DevmapperLogger + +// LogInit initializes the logger for the device mapper library. +func LogInit(logger DevmapperLogger) { + dmLogger = logger + LogWithErrnoInit() +} + +// SetDevDir sets the dev folder for the device mapper library (usually /dev). +func SetDevDir(dir string) error { + if res := DmSetDevDir(dir); res != 1 { + logrus.Debug("devicemapper: Error dm_set_dev_dir") + return ErrSetDevDir + } + return nil +} + +// GetLibraryVersion returns the device mapper library version. +func GetLibraryVersion() (string, error) { + var version string + if res := DmGetLibraryVersion(&version); res != 1 { + return "", ErrGetLibraryVersion + } + return version, nil +} + +// UdevSyncSupported returns whether device-mapper is able to sync with udev +// +// This is essential otherwise race conditions can arise where both udev and +// device-mapper attempt to create and destroy devices. +func UdevSyncSupported() bool { + return DmUdevGetSyncSupport() != 0 +} + +// UdevSetSyncSupport allows setting whether the udev sync should be enabled. +// The return bool indicates the state of whether the sync is enabled. +func UdevSetSyncSupport(enable bool) bool { + if enable { + DmUdevSetSyncSupport(1) + } else { + DmUdevSetSyncSupport(0) + } + + return UdevSyncSupported() +} + +// CookieSupported returns whether the version of device-mapper supports the +// use of cookie's in the tasks. +// This is largely a lower level call that other functions use. +func CookieSupported() bool { + return DmCookieSupported() != 0 +} + +// RemoveDevice is a useful helper for cleaning up a device. +func RemoveDevice(name string) error { + task, err := TaskCreateNamed(deviceRemove, name) + if task == nil { + return err + } + + var cookie uint + if err := task.setCookie(&cookie, 0); err != nil { + return fmt.Errorf("devicemapper: Can not set cookie: %s", err) + } + defer UdevWait(&cookie) + + dmSawBusy = false // reset before the task is run + if err = task.run(); err != nil { + if dmSawBusy { + return ErrBusy + } + return fmt.Errorf("devicemapper: Error running RemoveDevice %s", err) + } + + return nil +} + +// RemoveDeviceDeferred is a useful helper for cleaning up a device, but deferred. +func RemoveDeviceDeferred(name string) error { + logrus.Debugf("devicemapper: RemoveDeviceDeferred START(%s)", name) + defer logrus.Debugf("devicemapper: RemoveDeviceDeferred END(%s)", name) + task, err := TaskCreateNamed(deviceRemove, name) + if task == nil { + return err + } + + if err := DmTaskDeferredRemove(task.unmanaged); err != 1 { + return ErrTaskDeferredRemove + } + + if err = task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running RemoveDeviceDeferred %s", err) + } + + return nil +} + +// CancelDeferredRemove cancels a deferred remove for a device. +func CancelDeferredRemove(deviceName string) error { + task, err := TaskCreateNamed(deviceTargetMsg, deviceName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("@cancel_deferred_remove")); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawBusy = false + dmSawEnxio = false + if err := task.run(); err != nil { + // A device might be being deleted already + if dmSawBusy { + return ErrBusy + } else if dmSawEnxio { + return ErrEnxio + } + return fmt.Errorf("devicemapper: Error running CancelDeferredRemove %s", err) + + } + return nil +} + +// GetBlockDeviceSize returns the size of a block device identified by the specified file. +func GetBlockDeviceSize(file *os.File) (uint64, error) { + size, err := ioctlBlkGetSize64(file.Fd()) + if err != nil { + logrus.Errorf("devicemapper: Error getblockdevicesize: %s", err) + return 0, ErrGetBlockSize + } + return uint64(size), nil +} + +// BlockDeviceDiscard runs discard for the given path. +// This is used as a workaround for the kernel not discarding block so +// on the thin pool when we remove a thinp device, so we do it +// manually +func BlockDeviceDiscard(path string) error { + file, err := os.OpenFile(path, os.O_RDWR, 0) + if err != nil { + return err + } + defer file.Close() + + size, err := GetBlockDeviceSize(file) + if err != nil { + return err + } + + if err := ioctlBlkDiscard(file.Fd(), 0, size); err != nil { + return err + } + + // Without this sometimes the remove of the device that happens after + // discard fails with EBUSY. + syscall.Sync() + + return nil +} + +// CreatePool is the programmatic example of "dmsetup create". +// It creates a device with the specified poolName, data and metadata file and block size. +func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { + task, err := TaskCreateNamed(deviceCreate, poolName) + if task == nil { + return err + } + + size, err := GetBlockDeviceSize(dataFile) + if err != nil { + return fmt.Errorf("devicemapper: Can't get data size %s", err) + } + + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) + if err := task.addTarget(0, size/512, "thin-pool", params); err != nil { + return fmt.Errorf("devicemapper: Can't add target %s", err) + } + + var cookie uint + var flags uint16 + flags = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag + if err := task.setCookie(&cookie, flags); err != nil { + return fmt.Errorf("devicemapper: Can't set cookie %s", err) + } + defer UdevWait(&cookie) + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceCreate (CreatePool) %s", err) + } + + return nil +} + +// ReloadPool is the programmatic example of "dmsetup reload". +// It reloads the table with the specified poolName, data and metadata file and block size. +func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { + task, err := TaskCreateNamed(deviceReload, poolName) + if task == nil { + return err + } + + size, err := GetBlockDeviceSize(dataFile) + if err != nil { + return fmt.Errorf("devicemapper: Can't get data size %s", err) + } + + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) + if err := task.addTarget(0, size/512, "thin-pool", params); err != nil { + return fmt.Errorf("devicemapper: Can't add target %s", err) + } + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceCreate %s", err) + } + + return nil +} + +// GetDeps is the programmatic example of "dmsetup deps". +// It outputs a list of devices referenced by the live table for the specified device. +func GetDeps(name string) (*Deps, error) { + task, err := TaskCreateNamed(deviceDeps, name) + if task == nil { + return nil, err + } + if err := task.run(); err != nil { + return nil, err + } + return task.getDeps() +} + +// GetInfo is the programmatic example of "dmsetup info". +// It outputs some brief information about the device. +func GetInfo(name string) (*Info, error) { + task, err := TaskCreateNamed(deviceInfo, name) + if task == nil { + return nil, err + } + if err := task.run(); err != nil { + return nil, err + } + return task.getInfo() +} + +// GetInfoWithDeferred is the programmatic example of "dmsetup info", but deferred. +// It outputs some brief information about the device. +func GetInfoWithDeferred(name string) (*Info, error) { + task, err := TaskCreateNamed(deviceInfo, name) + if task == nil { + return nil, err + } + if err := task.run(); err != nil { + return nil, err + } + return task.getInfoWithDeferred() +} + +// GetDriverVersion is the programmatic example of "dmsetup version". +// It outputs version information of the driver. +func GetDriverVersion() (string, error) { + task := TaskCreate(deviceVersion) + if task == nil { + return "", fmt.Errorf("devicemapper: Can't create deviceVersion task") + } + if err := task.run(); err != nil { + return "", err + } + return task.getDriverVersion() +} + +// GetStatus is the programmatic example of "dmsetup status". +// It outputs status information for the specified device name. +func GetStatus(name string) (uint64, uint64, string, string, error) { + task, err := TaskCreateNamed(deviceStatus, name) + if task == nil { + logrus.Debugf("devicemapper: GetStatus() Error TaskCreateNamed: %s", err) + return 0, 0, "", "", err + } + if err := task.run(); err != nil { + logrus.Debugf("devicemapper: GetStatus() Error Run: %s", err) + return 0, 0, "", "", err + } + + devinfo, err := task.getInfo() + if err != nil { + logrus.Debugf("devicemapper: GetStatus() Error GetInfo: %s", err) + return 0, 0, "", "", err + } + if devinfo.Exists == 0 { + logrus.Debugf("devicemapper: GetStatus() Non existing device %s", name) + return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name) + } + + _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil)) + return start, length, targetType, params, nil +} + +// GetTable is the programmatic example for "dmsetup table". +// It outputs the current table for the specified device name. +func GetTable(name string) (uint64, uint64, string, string, error) { + task, err := TaskCreateNamed(deviceTable, name) + if task == nil { + logrus.Debugf("devicemapper: GetTable() Error TaskCreateNamed: %s", err) + return 0, 0, "", "", err + } + if err := task.run(); err != nil { + logrus.Debugf("devicemapper: GetTable() Error Run: %s", err) + return 0, 0, "", "", err + } + + devinfo, err := task.getInfo() + if err != nil { + logrus.Debugf("devicemapper: GetTable() Error GetInfo: %s", err) + return 0, 0, "", "", err + } + if devinfo.Exists == 0 { + logrus.Debugf("devicemapper: GetTable() Non existing device %s", name) + return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name) + } + + _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil)) + return start, length, targetType, params, nil +} + +// SetTransactionID sets a transaction id for the specified device name. +func SetTransactionID(poolName string, oldID uint64, newID uint64) error { + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("set_transaction_id %d %d", oldID, newID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running SetTransactionID %s", err) + } + return nil +} + +// SuspendDevice is the programmatic example of "dmsetup suspend". +// It suspends the specified device. +func SuspendDevice(name string) error { + task, err := TaskCreateNamed(deviceSuspend, name) + if task == nil { + return err + } + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceSuspend %s", err) + } + return nil +} + +// ResumeDevice is the programmatic example of "dmsetup resume". +// It un-suspends the specified device. +func ResumeDevice(name string) error { + task, err := TaskCreateNamed(deviceResume, name) + if task == nil { + return err + } + + var cookie uint + if err := task.setCookie(&cookie, 0); err != nil { + return fmt.Errorf("devicemapper: Can't set cookie %s", err) + } + defer UdevWait(&cookie) + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceResume %s", err) + } + + return nil +} + +// CreateDevice creates a device with the specified poolName with the specified device id. +func CreateDevice(poolName string, deviceID int) error { + logrus.Debugf("devicemapper: CreateDevice(poolName=%v, deviceID=%v)", poolName, deviceID) + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("create_thin %d", deviceID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawExist = false // reset before the task is run + if err := task.run(); err != nil { + // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. + if dmSawExist { + return ErrDeviceIDExists + } + + return fmt.Errorf("devicemapper: Error running CreateDevice %s", err) + + } + return nil +} + +// DeleteDevice deletes a device with the specified poolName with the specified device id. +func DeleteDevice(poolName string, deviceID int) error { + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("delete %d", deviceID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawBusy = false + if err := task.run(); err != nil { + if dmSawBusy { + return ErrBusy + } + return fmt.Errorf("devicemapper: Error running DeleteDevice %s", err) + } + return nil +} + +// ActivateDevice activates the device identified by the specified +// poolName, name and deviceID with the specified size. +func ActivateDevice(poolName string, name string, deviceID int, size uint64) error { + return activateDevice(poolName, name, deviceID, size, "") +} + +// ActivateDeviceWithExternal activates the device identified by the specified +// poolName, name and deviceID with the specified size. +func ActivateDeviceWithExternal(poolName string, name string, deviceID int, size uint64, external string) error { + return activateDevice(poolName, name, deviceID, size, external) +} + +func activateDevice(poolName string, name string, deviceID int, size uint64, external string) error { + task, err := TaskCreateNamed(deviceCreate, name) + if task == nil { + return err + } + + var params string + if len(external) > 0 { + params = fmt.Sprintf("%s %d %s", poolName, deviceID, external) + } else { + params = fmt.Sprintf("%s %d", poolName, deviceID) + } + if err := task.addTarget(0, size/512, "thin", params); err != nil { + return fmt.Errorf("devicemapper: Can't add target %s", err) + } + if err := task.setAddNode(addNodeOnCreate); err != nil { + return fmt.Errorf("devicemapper: Can't add node %s", err) + } + + var cookie uint + if err := task.setCookie(&cookie, 0); err != nil { + return fmt.Errorf("devicemapper: Can't set cookie %s", err) + } + + defer UdevWait(&cookie) + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceCreate (ActivateDevice) %s", err) + } + + return nil +} + +// CreateSnapDevice creates a snapshot based on the device identified by the baseName and baseDeviceId, +func CreateSnapDevice(poolName string, deviceID int, baseName string, baseDeviceID int) error { + devinfo, _ := GetInfo(baseName) + doSuspend := devinfo != nil && devinfo.Exists != 0 + + if doSuspend { + if err := SuspendDevice(baseName); err != nil { + return err + } + } + + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + if doSuspend { + ResumeDevice(baseName) + } + return err + } + + if err := task.setSector(0); err != nil { + if doSuspend { + ResumeDevice(baseName) + } + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("create_snap %d %d", deviceID, baseDeviceID)); err != nil { + if doSuspend { + ResumeDevice(baseName) + } + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawExist = false // reset before the task is run + if err := task.run(); err != nil { + if doSuspend { + ResumeDevice(baseName) + } + // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. + if dmSawExist { + return ErrDeviceIDExists + } + + return fmt.Errorf("devicemapper: Error running deviceCreate (createSnapDevice) %s", err) + + } + + if doSuspend { + if err := ResumeDevice(baseName); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/devicemapper/devmapper_log.go b/vendor/src/github.com/containers/storage/pkg/devicemapper/devmapper_log.go new file mode 100644 index 00000000..8477e36f --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/devicemapper/devmapper_log.go @@ -0,0 +1,35 @@ +// +build linux + +package devicemapper + +import "C" + +import ( + "strings" +) + +// Due to the way cgo works this has to be in a separate file, as devmapper.go has +// definitions in the cgo block, which is incompatible with using "//export" + +// DevmapperLogCallback exports the devmapper log callback for cgo. +//export DevmapperLogCallback +func DevmapperLogCallback(level C.int, file *C.char, line C.int, dmErrnoOrClass C.int, message *C.char) { + msg := C.GoString(message) + if level < 7 { + if strings.Contains(msg, "busy") { + dmSawBusy = true + } + + if strings.Contains(msg, "File exists") { + dmSawExist = true + } + + if strings.Contains(msg, "No such device or address") { + dmSawEnxio = true + } + } + + if dmLogger != nil { + dmLogger.DMLog(int(level), C.GoString(file), int(line), int(dmErrnoOrClass), msg) + } +} diff --git a/vendor/src/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go b/vendor/src/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go new file mode 100644 index 00000000..91fbc85b --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go @@ -0,0 +1,251 @@ +// +build linux + +package devicemapper + +/* +#cgo LDFLAGS: -L. -ldevmapper +#include +#include // FIXME: present only for BLKGETSIZE64, maybe we can remove it? + +// FIXME: Can't we find a way to do the logging in pure Go? +extern void DevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str); + +static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...) +{ + char buffer[256]; + va_list ap; + + va_start(ap, f); + vsnprintf(buffer, 256, f, ap); + va_end(ap); + + DevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer); +} + +static void log_with_errno_init() +{ + dm_log_with_errno_init(log_cb); +} +*/ +import "C" + +import ( + "reflect" + "unsafe" +) + +type ( + cdmTask C.struct_dm_task +) + +// IOCTL consts +const ( + BlkGetSize64 = C.BLKGETSIZE64 + BlkDiscard = C.BLKDISCARD +) + +// Devicemapper cookie flags. +const ( + DmUdevDisableSubsystemRulesFlag = C.DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG + DmUdevDisableDiskRulesFlag = C.DM_UDEV_DISABLE_DISK_RULES_FLAG + DmUdevDisableOtherRulesFlag = C.DM_UDEV_DISABLE_OTHER_RULES_FLAG + DmUdevDisableLibraryFallback = C.DM_UDEV_DISABLE_LIBRARY_FALLBACK +) + +// DeviceMapper mapped functions. +var ( + DmGetLibraryVersion = dmGetLibraryVersionFct + DmGetNextTarget = dmGetNextTargetFct + DmLogInitVerbose = dmLogInitVerboseFct + DmSetDevDir = dmSetDevDirFct + DmTaskAddTarget = dmTaskAddTargetFct + DmTaskCreate = dmTaskCreateFct + DmTaskDestroy = dmTaskDestroyFct + DmTaskGetDeps = dmTaskGetDepsFct + DmTaskGetInfo = dmTaskGetInfoFct + DmTaskGetDriverVersion = dmTaskGetDriverVersionFct + DmTaskRun = dmTaskRunFct + DmTaskSetAddNode = dmTaskSetAddNodeFct + DmTaskSetCookie = dmTaskSetCookieFct + DmTaskSetMessage = dmTaskSetMessageFct + DmTaskSetName = dmTaskSetNameFct + DmTaskSetRo = dmTaskSetRoFct + DmTaskSetSector = dmTaskSetSectorFct + DmUdevWait = dmUdevWaitFct + DmUdevSetSyncSupport = dmUdevSetSyncSupportFct + DmUdevGetSyncSupport = dmUdevGetSyncSupportFct + DmCookieSupported = dmCookieSupportedFct + LogWithErrnoInit = logWithErrnoInitFct + DmTaskDeferredRemove = dmTaskDeferredRemoveFct + DmTaskGetInfoWithDeferred = dmTaskGetInfoWithDeferredFct +) + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func dmTaskDestroyFct(task *cdmTask) { + C.dm_task_destroy((*C.struct_dm_task)(task)) +} + +func dmTaskCreateFct(taskType int) *cdmTask { + return (*cdmTask)(C.dm_task_create(C.int(taskType))) +} + +func dmTaskRunFct(task *cdmTask) int { + ret, _ := C.dm_task_run((*C.struct_dm_task)(task)) + return int(ret) +} + +func dmTaskSetNameFct(task *cdmTask, name string) int { + Cname := C.CString(name) + defer free(Cname) + + return int(C.dm_task_set_name((*C.struct_dm_task)(task), Cname)) +} + +func dmTaskSetMessageFct(task *cdmTask, message string) int { + Cmessage := C.CString(message) + defer free(Cmessage) + + return int(C.dm_task_set_message((*C.struct_dm_task)(task), Cmessage)) +} + +func dmTaskSetSectorFct(task *cdmTask, sector uint64) int { + return int(C.dm_task_set_sector((*C.struct_dm_task)(task), C.uint64_t(sector))) +} + +func dmTaskSetCookieFct(task *cdmTask, cookie *uint, flags uint16) int { + cCookie := C.uint32_t(*cookie) + defer func() { + *cookie = uint(cCookie) + }() + return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie, C.uint16_t(flags))) +} + +func dmTaskSetAddNodeFct(task *cdmTask, addNode AddNodeType) int { + return int(C.dm_task_set_add_node((*C.struct_dm_task)(task), C.dm_add_node_t(addNode))) +} + +func dmTaskSetRoFct(task *cdmTask) int { + return int(C.dm_task_set_ro((*C.struct_dm_task)(task))) +} + +func dmTaskAddTargetFct(task *cdmTask, + start, size uint64, ttype, params string) int { + + Cttype := C.CString(ttype) + defer free(Cttype) + + Cparams := C.CString(params) + defer free(Cparams) + + return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams)) +} + +func dmTaskGetDepsFct(task *cdmTask) *Deps { + Cdeps := C.dm_task_get_deps((*C.struct_dm_task)(task)) + if Cdeps == nil { + return nil + } + + // golang issue: https://github.com/golang/go/issues/11925 + hdr := reflect.SliceHeader{ + Data: uintptr(unsafe.Pointer(uintptr(unsafe.Pointer(Cdeps)) + unsafe.Sizeof(*Cdeps))), + Len: int(Cdeps.count), + Cap: int(Cdeps.count), + } + devices := *(*[]C.uint64_t)(unsafe.Pointer(&hdr)) + + deps := &Deps{ + Count: uint32(Cdeps.count), + Filler: uint32(Cdeps.filler), + } + for _, device := range devices { + deps.Device = append(deps.Device, uint64(device)) + } + return deps +} + +func dmTaskGetInfoFct(task *cdmTask, info *Info) int { + Cinfo := C.struct_dm_info{} + defer func() { + info.Exists = int(Cinfo.exists) + info.Suspended = int(Cinfo.suspended) + info.LiveTable = int(Cinfo.live_table) + info.InactiveTable = int(Cinfo.inactive_table) + info.OpenCount = int32(Cinfo.open_count) + info.EventNr = uint32(Cinfo.event_nr) + info.Major = uint32(Cinfo.major) + info.Minor = uint32(Cinfo.minor) + info.ReadOnly = int(Cinfo.read_only) + info.TargetCount = int32(Cinfo.target_count) + }() + return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) +} + +func dmTaskGetDriverVersionFct(task *cdmTask) string { + buffer := C.malloc(128) + defer C.free(buffer) + res := C.dm_task_get_driver_version((*C.struct_dm_task)(task), (*C.char)(buffer), 128) + if res == 0 { + return "" + } + return C.GoString((*C.char)(buffer)) +} + +func dmGetNextTargetFct(task *cdmTask, next unsafe.Pointer, start, length *uint64, target, params *string) unsafe.Pointer { + var ( + Cstart, Clength C.uint64_t + CtargetType, Cparams *C.char + ) + defer func() { + *start = uint64(Cstart) + *length = uint64(Clength) + *target = C.GoString(CtargetType) + *params = C.GoString(Cparams) + }() + + nextp := C.dm_get_next_target((*C.struct_dm_task)(task), next, &Cstart, &Clength, &CtargetType, &Cparams) + return nextp +} + +func dmUdevSetSyncSupportFct(syncWithUdev int) { + (C.dm_udev_set_sync_support(C.int(syncWithUdev))) +} + +func dmUdevGetSyncSupportFct() int { + return int(C.dm_udev_get_sync_support()) +} + +func dmUdevWaitFct(cookie uint) int { + return int(C.dm_udev_wait(C.uint32_t(cookie))) +} + +func dmCookieSupportedFct() int { + return int(C.dm_cookie_supported()) +} + +func dmLogInitVerboseFct(level int) { + C.dm_log_init_verbose(C.int(level)) +} + +func logWithErrnoInitFct() { + C.log_with_errno_init() +} + +func dmSetDevDirFct(dir string) int { + Cdir := C.CString(dir) + defer free(Cdir) + + return int(C.dm_set_dev_dir(Cdir)) +} + +func dmGetLibraryVersionFct(version *string) int { + buffer := C.CString(string(make([]byte, 128))) + defer free(buffer) + defer func() { + *version = C.GoString(buffer) + }() + return int(C.dm_get_library_version(buffer, 128)) +} diff --git a/vendor/src/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go b/vendor/src/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go new file mode 100644 index 00000000..dc361eab --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go @@ -0,0 +1,34 @@ +// +build linux,!libdm_no_deferred_remove + +package devicemapper + +/* +#cgo LDFLAGS: -L. -ldevmapper +#include +*/ +import "C" + +// LibraryDeferredRemovalSupport is supported when statically linked. +const LibraryDeferredRemovalSupport = true + +func dmTaskDeferredRemoveFct(task *cdmTask) int { + return int(C.dm_task_deferred_remove((*C.struct_dm_task)(task))) +} + +func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { + Cinfo := C.struct_dm_info{} + defer func() { + info.Exists = int(Cinfo.exists) + info.Suspended = int(Cinfo.suspended) + info.LiveTable = int(Cinfo.live_table) + info.InactiveTable = int(Cinfo.inactive_table) + info.OpenCount = int32(Cinfo.open_count) + info.EventNr = uint32(Cinfo.event_nr) + info.Major = uint32(Cinfo.major) + info.Minor = uint32(Cinfo.minor) + info.ReadOnly = int(Cinfo.read_only) + info.TargetCount = int32(Cinfo.target_count) + info.DeferredRemove = int(Cinfo.deferred_remove) + }() + return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) +} diff --git a/vendor/src/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go b/vendor/src/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go new file mode 100644 index 00000000..4a6665de --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go @@ -0,0 +1,15 @@ +// +build linux,libdm_no_deferred_remove + +package devicemapper + +// LibraryDeferredRemovalsupport is not supported when statically linked. +const LibraryDeferredRemovalSupport = false + +func dmTaskDeferredRemoveFct(task *cdmTask) int { + // Error. Nobody should be calling it. + return -1 +} + +func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { + return -1 +} diff --git a/vendor/src/github.com/containers/storage/pkg/devicemapper/ioctl.go b/vendor/src/github.com/containers/storage/pkg/devicemapper/ioctl.go new file mode 100644 index 00000000..581b57eb --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/devicemapper/ioctl.go @@ -0,0 +1,27 @@ +// +build linux + +package devicemapper + +import ( + "syscall" + "unsafe" +) + +func ioctlBlkGetSize64(fd uintptr) (int64, error) { + var size int64 + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { + return 0, err + } + return size, nil +} + +func ioctlBlkDiscard(fd uintptr, offset, length uint64) error { + var r [2]uint64 + r[0] = offset + r[1] = length + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { + return err + } + return nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/devicemapper/log.go b/vendor/src/github.com/containers/storage/pkg/devicemapper/log.go new file mode 100644 index 00000000..cee5e545 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/devicemapper/log.go @@ -0,0 +1,11 @@ +package devicemapper + +// definitions from lvm2 lib/log/log.h +const ( + LogLevelFatal = 2 + iota // _LOG_FATAL + LogLevelErr // _LOG_ERR + LogLevelWarn // _LOG_WARN + LogLevelNotice // _LOG_NOTICE + LogLevelInfo // _LOG_INFO + LogLevelDebug // _LOG_DEBUG +) diff --git a/vendor/src/github.com/containers/storage/pkg/directory/directory.go b/vendor/src/github.com/containers/storage/pkg/directory/directory.go new file mode 100644 index 00000000..1715ef45 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/directory/directory.go @@ -0,0 +1,26 @@ +package directory + +import ( + "io/ioutil" + "os" + "path/filepath" +) + +// MoveToSubdir moves all contents of a directory to a subdirectory underneath the original path +func MoveToSubdir(oldpath, subdir string) error { + + infos, err := ioutil.ReadDir(oldpath) + if err != nil { + return err + } + for _, info := range infos { + if info.Name() != subdir { + oldName := filepath.Join(oldpath, info.Name()) + newName := filepath.Join(oldpath, subdir, info.Name()) + if err := os.Rename(oldName, newName); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/directory/directory_unix.go b/vendor/src/github.com/containers/storage/pkg/directory/directory_unix.go new file mode 100644 index 00000000..397251bd --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/directory/directory_unix.go @@ -0,0 +1,48 @@ +// +build linux freebsd solaris + +package directory + +import ( + "os" + "path/filepath" + "syscall" +) + +// Size walks a directory tree and returns its total size in bytes. +func Size(dir string) (size int64, err error) { + data := make(map[uint64]struct{}) + err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { + if err != nil { + // if dir does not exist, Size() returns the error. + // if dir/x disappeared while walking, Size() ignores dir/x. + if os.IsNotExist(err) && d != dir { + return nil + } + return err + } + + // Ignore directory sizes + if fileInfo == nil { + return nil + } + + s := fileInfo.Size() + if fileInfo.IsDir() || s == 0 { + return nil + } + + // Check inode to handle hard links correctly + inode := fileInfo.Sys().(*syscall.Stat_t).Ino + // inode is not a uint64 on all platforms. Cast it to avoid issues. + if _, exists := data[uint64(inode)]; exists { + return nil + } + // inode is not a uint64 on all platforms. Cast it to avoid issues. + data[uint64(inode)] = struct{}{} + + size += s + + return nil + }) + return +} diff --git a/vendor/src/github.com/containers/storage/pkg/directory/directory_windows.go b/vendor/src/github.com/containers/storage/pkg/directory/directory_windows.go new file mode 100644 index 00000000..6fb0917c --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/directory/directory_windows.go @@ -0,0 +1,37 @@ +// +build windows + +package directory + +import ( + "os" + "path/filepath" +) + +// Size walks a directory tree and returns its total size in bytes. +func Size(dir string) (size int64, err error) { + err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { + if err != nil { + // if dir does not exist, Size() returns the error. + // if dir/x disappeared while walking, Size() ignores dir/x. + if os.IsNotExist(err) && d != dir { + return nil + } + return err + } + + // Ignore directory sizes + if fileInfo == nil { + return nil + } + + s := fileInfo.Size() + if fileInfo.IsDir() || s == 0 { + return nil + } + + size += s + + return nil + }) + return +} diff --git a/vendor/src/github.com/containers/storage/pkg/fileutils/fileutils.go b/vendor/src/github.com/containers/storage/pkg/fileutils/fileutils.go new file mode 100644 index 00000000..763d8d27 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/fileutils/fileutils.go @@ -0,0 +1,283 @@ +package fileutils + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "strings" + "text/scanner" + + "github.com/Sirupsen/logrus" +) + +// exclusion returns true if the specified pattern is an exclusion +func exclusion(pattern string) bool { + return pattern[0] == '!' +} + +// empty returns true if the specified pattern is empty +func empty(pattern string) bool { + return pattern == "" +} + +// CleanPatterns takes a slice of patterns returns a new +// slice of patterns cleaned with filepath.Clean, stripped +// of any empty patterns and lets the caller know whether the +// slice contains any exception patterns (prefixed with !). +func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) { + // Loop over exclusion patterns and: + // 1. Clean them up. + // 2. Indicate whether we are dealing with any exception rules. + // 3. Error if we see a single exclusion marker on it's own (!). + cleanedPatterns := []string{} + patternDirs := [][]string{} + exceptions := false + for _, pattern := range patterns { + // Eliminate leading and trailing whitespace. + pattern = strings.TrimSpace(pattern) + if empty(pattern) { + continue + } + if exclusion(pattern) { + if len(pattern) == 1 { + return nil, nil, false, errors.New("Illegal exclusion pattern: !") + } + exceptions = true + } + pattern = filepath.Clean(pattern) + cleanedPatterns = append(cleanedPatterns, pattern) + if exclusion(pattern) { + pattern = pattern[1:] + } + patternDirs = append(patternDirs, strings.Split(pattern, string(os.PathSeparator))) + } + + return cleanedPatterns, patternDirs, exceptions, nil +} + +// Matches returns true if file matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +func Matches(file string, patterns []string) (bool, error) { + file = filepath.Clean(file) + + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil + } + + patterns, patDirs, _, err := CleanPatterns(patterns) + if err != nil { + return false, err + } + + return OptimizedMatches(file, patterns, patDirs) +} + +// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go. +// It will assume that the inputs have been preprocessed and therefore the function +// doesn't need to do as much error checking and clean-up. This was done to avoid +// repeating these steps on each file being checked during the archive process. +// The more generic fileutils.Matches() can't make these assumptions. +func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) { + matched := false + file = filepath.FromSlash(file) + parentPath := filepath.Dir(file) + parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) + + for i, pattern := range patterns { + negative := false + + if exclusion(pattern) { + negative = true + pattern = pattern[1:] + } + + match, err := regexpMatch(pattern, file) + if err != nil { + return false, fmt.Errorf("Error in pattern (%s): %s", pattern, err) + } + + if !match && parentPath != "." { + // Check to see if the pattern matches one of our parent dirs. + if len(patDirs[i]) <= len(parentPathDirs) { + match, _ = regexpMatch(strings.Join(patDirs[i], string(os.PathSeparator)), + strings.Join(parentPathDirs[:len(patDirs[i])], string(os.PathSeparator))) + } + } + + if match { + matched = !negative + } + } + + if matched { + logrus.Debugf("Skipping excluded path: %s", file) + } + + return matched, nil +} + +// regexpMatch tries to match the logic of filepath.Match but +// does so using regexp logic. We do this so that we can expand the +// wildcard set to include other things, like "**" to mean any number +// of directories. This means that we should be backwards compatible +// with filepath.Match(). We'll end up supporting more stuff, due to +// the fact that we're using regexp, but that's ok - it does no harm. +// +// As per the comment in golangs filepath.Match, on Windows, escaping +// is disabled. Instead, '\\' is treated as path separator. +func regexpMatch(pattern, path string) (bool, error) { + regStr := "^" + + // Do some syntax checking on the pattern. + // filepath's Match() has some really weird rules that are inconsistent + // so instead of trying to dup their logic, just call Match() for its + // error state and if there is an error in the pattern return it. + // If this becomes an issue we can remove this since its really only + // needed in the error (syntax) case - which isn't really critical. + if _, err := filepath.Match(pattern, path); err != nil { + return false, err + } + + // Go through the pattern and convert it to a regexp. + // We use a scanner so we can support utf-8 chars. + var scan scanner.Scanner + scan.Init(strings.NewReader(pattern)) + + sl := string(os.PathSeparator) + escSL := sl + if sl == `\` { + escSL += `\` + } + + for scan.Peek() != scanner.EOF { + ch := scan.Next() + + if ch == '*' { + if scan.Peek() == '*' { + // is some flavor of "**" + scan.Next() + + if scan.Peek() == scanner.EOF { + // is "**EOF" - to align with .gitignore just accept all + regStr += ".*" + } else { + // is "**" + regStr += "((.*" + escSL + ")|([^" + escSL + "]*))" + } + + // Treat **/ as ** so eat the "/" + if string(scan.Peek()) == sl { + scan.Next() + } + } else { + // is "*" so map it to anything but "/" + regStr += "[^" + escSL + "]*" + } + } else if ch == '?' { + // "?" is any char except "/" + regStr += "[^" + escSL + "]" + } else if strings.Index(".$", string(ch)) != -1 { + // Escape some regexp special chars that have no meaning + // in golang's filepath.Match + regStr += `\` + string(ch) + } else if ch == '\\' { + // escape next char. Note that a trailing \ in the pattern + // will be left alone (but need to escape it) + if sl == `\` { + // On windows map "\" to "\\", meaning an escaped backslash, + // and then just continue because filepath.Match on + // Windows doesn't allow escaping at all + regStr += escSL + continue + } + if scan.Peek() != scanner.EOF { + regStr += `\` + string(scan.Next()) + } else { + regStr += `\` + } + } else { + regStr += string(ch) + } + } + + regStr += "$" + + res, err := regexp.MatchString(regStr, path) + + // Map regexp's error to filepath's so no one knows we're not using filepath + if err != nil { + err = filepath.ErrBadPattern + } + + return res, err +} + +// CopyFile copies from src to dst until either EOF is reached +// on src or an error occurs. It verifies src exists and removes +// the dst if it exists. +func CopyFile(src, dst string) (int64, error) { + cleanSrc := filepath.Clean(src) + cleanDst := filepath.Clean(dst) + if cleanSrc == cleanDst { + return 0, nil + } + sf, err := os.Open(cleanSrc) + if err != nil { + return 0, err + } + defer sf.Close() + if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { + return 0, err + } + df, err := os.Create(cleanDst) + if err != nil { + return 0, err + } + defer df.Close() + return io.Copy(df, sf) +} + +// ReadSymlinkedDirectory returns the target directory of a symlink. +// The target of the symbolic link may not be a file. +func ReadSymlinkedDirectory(path string) (string, error) { + var realPath string + var err error + if realPath, err = filepath.Abs(path); err != nil { + return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) + } + if realPath, err = filepath.EvalSymlinks(realPath); err != nil { + return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) + } + realPathInfo, err := os.Stat(realPath) + if err != nil { + return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) + } + if !realPathInfo.Mode().IsDir() { + return "", fmt.Errorf("canonical path points to a file '%s'", realPath) + } + return realPath, nil +} + +// CreateIfNotExists creates a file or a directory only if it does not already exist. +func CreateIfNotExists(path string, isDir bool) error { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + if isDir { + return os.MkdirAll(path, 0755) + } + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + f, err := os.OpenFile(path, os.O_CREATE, 0755) + if err != nil { + return err + } + f.Close() + } + } + return nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go b/vendor/src/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go new file mode 100644 index 00000000..ccd648fa --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go @@ -0,0 +1,27 @@ +package fileutils + +import ( + "os" + "os/exec" + "strconv" + "strings" +) + +// GetTotalUsedFds returns the number of used File Descriptors by +// executing `lsof -p PID` +func GetTotalUsedFds() int { + pid := os.Getpid() + + cmd := exec.Command("lsof", "-p", strconv.Itoa(pid)) + + output, err := cmd.CombinedOutput() + if err != nil { + return -1 + } + + outputStr := strings.TrimSpace(string(output)) + + fds := strings.Split(outputStr, "\n") + + return len(fds) - 1 +} diff --git a/vendor/src/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go b/vendor/src/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go new file mode 100644 index 00000000..0f2cb7ab --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go @@ -0,0 +1,7 @@ +package fileutils + +// GetTotalUsedFds Returns the number of used File Descriptors. +// On Solaris these limits are per process and not systemwide +func GetTotalUsedFds() int { + return -1 +} diff --git a/vendor/src/github.com/containers/storage/pkg/fileutils/fileutils_unix.go b/vendor/src/github.com/containers/storage/pkg/fileutils/fileutils_unix.go new file mode 100644 index 00000000..d5c3abf5 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/fileutils/fileutils_unix.go @@ -0,0 +1,22 @@ +// +build linux freebsd + +package fileutils + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/Sirupsen/logrus" +) + +// GetTotalUsedFds Returns the number of used File Descriptors by +// reading it via /proc filesystem. +func GetTotalUsedFds() int { + if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { + logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) + } else { + return len(fds) + } + return -1 +} diff --git a/vendor/src/github.com/containers/storage/pkg/fileutils/fileutils_windows.go b/vendor/src/github.com/containers/storage/pkg/fileutils/fileutils_windows.go new file mode 100644 index 00000000..5ec21cac --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/fileutils/fileutils_windows.go @@ -0,0 +1,7 @@ +package fileutils + +// GetTotalUsedFds Returns the number of used File Descriptors. Not supported +// on Windows. +func GetTotalUsedFds() int { + return -1 +} diff --git a/vendor/src/github.com/containers/storage/pkg/homedir/homedir.go b/vendor/src/github.com/containers/storage/pkg/homedir/homedir.go new file mode 100644 index 00000000..8154e83f --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/homedir/homedir.go @@ -0,0 +1,39 @@ +package homedir + +import ( + "os" + "runtime" + + "github.com/opencontainers/runc/libcontainer/user" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + if runtime.GOOS == "windows" { + return "USERPROFILE" + } + return "HOME" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + home := os.Getenv(Key()) + if home == "" && runtime.GOOS != "windows" { + if u, err := user.CurrentUser(); err == nil { + return u.Home + } + } + return home +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + if runtime.GOOS == "windows" { + return "%USERPROFILE%" // be careful while using in format functions + } + return "~" +} diff --git a/vendor/src/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/src/github.com/containers/storage/pkg/idtools/idtools.go new file mode 100644 index 00000000..6bca4662 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/idtools/idtools.go @@ -0,0 +1,197 @@ +package idtools + +import ( + "bufio" + "fmt" + "os" + "sort" + "strconv" + "strings" +) + +// IDMap contains a single entry for user namespace range remapping. An array +// of IDMap entries represents the structure that will be provided to the Linux +// kernel for creating a user namespace. +type IDMap struct { + ContainerID int `json:"container_id"` + HostID int `json:"host_id"` + Size int `json:"size"` +} + +type subIDRange struct { + Start int + Length int +} + +type ranges []subIDRange + +func (e ranges) Len() int { return len(e) } +func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } + +const ( + subuidFileName string = "/etc/subuid" + subgidFileName string = "/etc/subgid" +) + +// MkdirAllAs creates a directory (include any along the path) and then modifies +// ownership to the requested uid/gid. If the directory already exists, this +// function will still change ownership to the requested uid/gid pair. +func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, true, true) +} + +// MkdirAllNewAs creates a directory (include any along the path) and then modifies +// ownership ONLY of newly created directories to the requested uid/gid. If the +// directories along the path exist, no change of ownership will be performed +func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, true, false) +} + +// MkdirAs creates a directory and then modifies ownership to the requested uid/gid. +// If the directory already exists, this function still changes ownership +func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, false, true) +} + +// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. +// If the maps are empty, then the root uid/gid will default to "real" 0/0 +func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { + var uid, gid int + + if uidMap != nil { + xUID, err := ToHost(0, uidMap) + if err != nil { + return -1, -1, err + } + uid = xUID + } + if gidMap != nil { + xGID, err := ToHost(0, gidMap) + if err != nil { + return -1, -1, err + } + gid = xGID + } + return uid, gid, nil +} + +// ToContainer takes an id mapping, and uses it to translate a +// host ID to the remapped ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id +func ToContainer(hostID int, idMap []IDMap) (int, error) { + if idMap == nil { + return hostID, nil + } + for _, m := range idMap { + if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { + contID := m.ContainerID + (hostID - m.HostID) + return contID, nil + } + } + return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) +} + +// ToHost takes an id mapping and a remapped ID, and translates the +// ID to the mapped host ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id # +func ToHost(contID int, idMap []IDMap) (int, error) { + if idMap == nil { + return contID, nil + } + for _, m := range idMap { + if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { + hostID := m.HostID + (contID - m.ContainerID) + return hostID, nil + } + } + return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) +} + +// CreateIDMappings takes a requested user and group name and +// using the data from /etc/sub{uid,gid} ranges, creates the +// proper uid and gid remapping ranges for that user/group pair +func CreateIDMappings(username, groupname string) ([]IDMap, []IDMap, error) { + subuidRanges, err := parseSubuid(username) + if err != nil { + return nil, nil, err + } + subgidRanges, err := parseSubgid(groupname) + if err != nil { + return nil, nil, err + } + if len(subuidRanges) == 0 { + return nil, nil, fmt.Errorf("No subuid ranges found for user %q", username) + } + if len(subgidRanges) == 0 { + return nil, nil, fmt.Errorf("No subgid ranges found for group %q", groupname) + } + + return createIDMap(subuidRanges), createIDMap(subgidRanges), nil +} + +func createIDMap(subidRanges ranges) []IDMap { + idMap := []IDMap{} + + // sort the ranges by lowest ID first + sort.Sort(subidRanges) + containerID := 0 + for _, idrange := range subidRanges { + idMap = append(idMap, IDMap{ + ContainerID: containerID, + HostID: idrange.Start, + Size: idrange.Length, + }) + containerID = containerID + idrange.Length + } + return idMap +} + +func parseSubuid(username string) (ranges, error) { + return parseSubidFile(subuidFileName, username) +} + +func parseSubgid(username string) (ranges, error) { + return parseSubidFile(subgidFileName, username) +} + +// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid) +// and return all found ranges for a specified username. If the special value +// "ALL" is supplied for username, then all ranges in the file will be returned +func parseSubidFile(path, username string) (ranges, error) { + var rangeList ranges + + subidFile, err := os.Open(path) + if err != nil { + return rangeList, err + } + defer subidFile.Close() + + s := bufio.NewScanner(subidFile) + for s.Scan() { + if err := s.Err(); err != nil { + return rangeList, err + } + + text := strings.TrimSpace(s.Text()) + if text == "" || strings.HasPrefix(text, "#") { + continue + } + parts := strings.Split(text, ":") + if len(parts) != 3 { + return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) + } + if parts[0] == username || username == "ALL" { + startid, err := strconv.Atoi(parts[1]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + length, err := strconv.Atoi(parts[2]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + rangeList = append(rangeList, subIDRange{startid, length}) + } + } + return rangeList, nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/src/github.com/containers/storage/pkg/idtools/idtools_unix.go new file mode 100644 index 00000000..b2cfb05e --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/idtools/idtools_unix.go @@ -0,0 +1,60 @@ +// +build !windows + +package idtools + +import ( + "os" + "path/filepath" + + "github.com/containers/storage/pkg/system" +) + +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { + // make an array containing the original path asked for, plus (for mkAll == true) + // all path components leading up to the complete path that don't exist before we MkdirAll + // so that we can chown all of them properly at the end. If chownExisting is false, we won't + // chown the full directory path if it exists + var paths []string + if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { + paths = []string{path} + } else if err == nil && chownExisting { + if err := os.Chown(path, ownerUID, ownerGID); err != nil { + return err + } + // short-circuit--we were called with an existing directory and chown was requested + return nil + } else if err == nil { + // nothing to do; directory path fully exists already and chown was NOT requested + return nil + } + + if mkAll { + // walk back to "/" looking for directories which do not exist + // and add them to the paths array for chown after creation + dirPath := path + for { + dirPath = filepath.Dir(dirPath) + if dirPath == "/" { + break + } + if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { + paths = append(paths, dirPath) + } + } + if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { + return err + } + } else { + if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { + return err + } + } + // even if it existed, we will chown the requested path + any subpaths that + // didn't exist when we called MkdirAll + for _, pathComponent := range paths { + if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil { + return err + } + } + return nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/idtools/idtools_windows.go b/vendor/src/github.com/containers/storage/pkg/idtools/idtools_windows.go new file mode 100644 index 00000000..0cad1736 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/idtools/idtools_windows.go @@ -0,0 +1,18 @@ +// +build windows + +package idtools + +import ( + "os" + + "github.com/containers/storage/pkg/system" +) + +// Platforms such as Windows do not support the UID/GID concept. So make this +// just a wrapper around system.MkdirAll. +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { + if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { + return err + } + return nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go b/vendor/src/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go new file mode 100644 index 00000000..4a4aaed0 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go @@ -0,0 +1,188 @@ +package idtools + +import ( + "fmt" + "os/exec" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + "sync" +) + +// add a user and/or group to Linux /etc/passwd, /etc/group using standard +// Linux distribution commands: +// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group +// useradd -r -s /bin/false + +var ( + once sync.Once + userCommand string + + cmdTemplates = map[string]string{ + "adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s", + "useradd": "-r -s /bin/false %s", + "usermod": "-%s %d-%d %s", + } + + idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`) + // default length for a UID/GID subordinate range + defaultRangeLen = 65536 + defaultRangeStart = 100000 + userMod = "usermod" +) + +func resolveBinary(binname string) (string, error) { + binaryPath, err := exec.LookPath(binname) + if err != nil { + return "", err + } + resolvedPath, err := filepath.EvalSymlinks(binaryPath) + if err != nil { + return "", err + } + //only return no error if the final resolved binary basename + //matches what was searched for + if filepath.Base(resolvedPath) == binname { + return resolvedPath, nil + } + return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) +} + +// AddNamespaceRangesUser takes a username and uses the standard system +// utility to create a system user/group pair used to hold the +// /etc/sub{uid,gid} ranges which will be used for user namespace +// mapping ranges in containers. +func AddNamespaceRangesUser(name string) (int, int, error) { + if err := addUser(name); err != nil { + return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) + } + + // Query the system for the created uid and gid pair + out, err := execCmd("id", name) + if err != nil { + return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err) + } + matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out))) + if len(matches) != 3 { + return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out)) + } + uid, err := strconv.Atoi(matches[1]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err) + } + gid, err := strconv.Atoi(matches[2]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err) + } + + // Now we need to create the subuid/subgid ranges for our new user/group (system users + // do not get auto-created ranges in subuid/subgid) + + if err := createSubordinateRanges(name); err != nil { + return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err) + } + return uid, gid, nil +} + +func addUser(userName string) error { + once.Do(func() { + // set up which commands are used for adding users/groups dependent on distro + if _, err := resolveBinary("adduser"); err == nil { + userCommand = "adduser" + } else if _, err := resolveBinary("useradd"); err == nil { + userCommand = "useradd" + } + }) + if userCommand == "" { + return fmt.Errorf("Cannot add user; no useradd/adduser binary found") + } + args := fmt.Sprintf(cmdTemplates[userCommand], userName) + out, err := execCmd(userCommand, args) + if err != nil { + return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out)) + } + return nil +} + +func createSubordinateRanges(name string) error { + + // first, we should verify that ranges weren't automatically created + // by the distro tooling + ranges, err := parseSubuid(name) + if err != nil { + return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no UID ranges; let's create one + startID, err := findNextUIDRange() + if err != nil { + return fmt.Errorf("Can't find available subuid range: %v", err) + } + out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name)) + if err != nil { + return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err) + } + } + + ranges, err = parseSubgid(name) + if err != nil { + return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no GID ranges; let's create one + startID, err := findNextGIDRange() + if err != nil { + return fmt.Errorf("Can't find available subgid range: %v", err) + } + out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name)) + if err != nil { + return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err) + } + } + return nil +} + +func findNextUIDRange() (int, error) { + ranges, err := parseSubuid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextGIDRange() (int, error) { + ranges, err := parseSubgid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextRangeStart(rangeList ranges) (int, error) { + startID := defaultRangeStart + for _, arange := range rangeList { + if wouldOverlap(arange, startID) { + startID = arange.Start + arange.Length + } + } + return startID, nil +} + +func wouldOverlap(arange subIDRange, ID int) bool { + low := ID + high := ID + defaultRangeLen + if (low >= arange.Start && low <= arange.Start+arange.Length) || + (high <= arange.Start+arange.Length && high >= arange.Start) { + return true + } + return false +} + +func execCmd(cmd, args string) ([]byte, error) { + execCmd := exec.Command(cmd, strings.Split(args, " ")...) + return execCmd.CombinedOutput() +} diff --git a/vendor/src/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go b/vendor/src/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go new file mode 100644 index 00000000..d98b354c --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux + +package idtools + +import "fmt" + +// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair +// and calls the appropriate helper function to add the group and then +// the user to the group in /etc/group and /etc/passwd respectively. +func AddNamespaceRangesUser(name string) (int, int, error) { + return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") +} diff --git a/vendor/src/github.com/containers/storage/pkg/ioutils/buffer.go b/vendor/src/github.com/containers/storage/pkg/ioutils/buffer.go new file mode 100644 index 00000000..3d737b3e --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/ioutils/buffer.go @@ -0,0 +1,51 @@ +package ioutils + +import ( + "errors" + "io" +) + +var errBufferFull = errors.New("buffer is full") + +type fixedBuffer struct { + buf []byte + pos int + lastRead int +} + +func (b *fixedBuffer) Write(p []byte) (int, error) { + n := copy(b.buf[b.pos:cap(b.buf)], p) + b.pos += n + + if n < len(p) { + if b.pos == cap(b.buf) { + return n, errBufferFull + } + return n, io.ErrShortWrite + } + return n, nil +} + +func (b *fixedBuffer) Read(p []byte) (int, error) { + n := copy(p, b.buf[b.lastRead:b.pos]) + b.lastRead += n + return n, nil +} + +func (b *fixedBuffer) Len() int { + return b.pos - b.lastRead +} + +func (b *fixedBuffer) Cap() int { + return cap(b.buf) +} + +func (b *fixedBuffer) Reset() { + b.pos = 0 + b.lastRead = 0 + b.buf = b.buf[:0] +} + +func (b *fixedBuffer) String() string { + return string(b.buf[b.lastRead:b.pos]) +} diff --git a/vendor/src/github.com/containers/storage/pkg/ioutils/fmt.go b/vendor/src/github.com/containers/storage/pkg/ioutils/fmt.go new file mode 100644 index 00000000..0b04b0ba --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/ioutils/fmt.go @@ -0,0 +1,22 @@ +package ioutils + +import ( + "fmt" + "io" +) + +// FprintfIfNotEmpty prints the string value if it's not empty +func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) { + if value != "" { + return fmt.Fprintf(w, format, value) + } + return 0, nil +} + +// FprintfIfTrue prints the boolean value if it's true +func FprintfIfTrue(w io.Writer, format string, ok bool) (int, error) { + if ok { + return fmt.Fprintf(w, format, ok) + } + return 0, nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/ioutils/fswriters.go b/vendor/src/github.com/containers/storage/pkg/ioutils/fswriters.go new file mode 100644 index 00000000..6dc50a03 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/ioutils/fswriters.go @@ -0,0 +1,82 @@ +package ioutils + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" +) + +// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a +// temporary file and closing it atomically changes the temporary file to +// destination path. Writing and closing concurrently is not allowed. +func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) { + f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) + if err != nil { + return nil, err + } + + abspath, err := filepath.Abs(filename) + if err != nil { + return nil, err + } + return &atomicFileWriter{ + f: f, + fn: abspath, + perm: perm, + }, nil +} + +// AtomicWriteFile atomically writes data to a file named by filename. +func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := NewAtomicFileWriter(filename, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + f.(*atomicFileWriter).writeErr = err + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +type atomicFileWriter struct { + f *os.File + fn string + writeErr error + perm os.FileMode +} + +func (w *atomicFileWriter) Write(dt []byte) (int, error) { + n, err := w.f.Write(dt) + if err != nil { + w.writeErr = err + } + return n, err +} + +func (w *atomicFileWriter) Close() (retErr error) { + defer func() { + if retErr != nil || w.writeErr != nil { + os.Remove(w.f.Name()) + } + }() + if err := w.f.Sync(); err != nil { + w.f.Close() + return err + } + if err := w.f.Close(); err != nil { + return err + } + if err := os.Chmod(w.f.Name(), w.perm); err != nil { + return err + } + if w.writeErr == nil { + return os.Rename(w.f.Name(), w.fn) + } + return nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/ioutils/multireader.go b/vendor/src/github.com/containers/storage/pkg/ioutils/multireader.go new file mode 100644 index 00000000..0d2d76b4 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/ioutils/multireader.go @@ -0,0 +1,226 @@ +package ioutils + +import ( + "bytes" + "fmt" + "io" + "os" +) + +type pos struct { + idx int + offset int64 +} + +type multiReadSeeker struct { + readers []io.ReadSeeker + pos *pos + posIdx map[io.ReadSeeker]int +} + +func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) { + var tmpOffset int64 + switch whence { + case os.SEEK_SET: + for i, rdr := range r.readers { + // get size of the current reader + s, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + + if offset > tmpOffset+s { + if i == len(r.readers)-1 { + rdrOffset := s + (offset - tmpOffset) + if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil { + return -1, err + } + r.pos = &pos{i, rdrOffset} + return offset, nil + } + + tmpOffset += s + continue + } + + rdrOffset := offset - tmpOffset + idx := i + + rdr.Seek(rdrOffset, os.SEEK_SET) + // make sure all following readers are at 0 + for _, rdr := range r.readers[i+1:] { + rdr.Seek(0, os.SEEK_SET) + } + + if rdrOffset == s && i != len(r.readers)-1 { + idx++ + rdrOffset = 0 + } + r.pos = &pos{idx, rdrOffset} + return offset, nil + } + case os.SEEK_END: + for _, rdr := range r.readers { + s, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + tmpOffset += s + } + r.Seek(tmpOffset+offset, os.SEEK_SET) + return tmpOffset + offset, nil + case os.SEEK_CUR: + if r.pos == nil { + return r.Seek(offset, os.SEEK_SET) + } + // Just return the current offset + if offset == 0 { + return r.getCurOffset() + } + + curOffset, err := r.getCurOffset() + if err != nil { + return -1, err + } + rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset) + if err != nil { + return -1, err + } + + r.pos = &pos{r.posIdx[rdr], rdrOffset} + return curOffset + offset, nil + default: + return -1, fmt.Errorf("Invalid whence: %d", whence) + } + + return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset) +} + +func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) { + var rdr io.ReadSeeker + var rdrOffset int64 + + for i, rdr := range r.readers { + offsetTo, err := r.getOffsetToReader(rdr) + if err != nil { + return nil, -1, err + } + if offsetTo > offset { + rdr = r.readers[i-1] + rdrOffset = offsetTo - offset + break + } + + if rdr == r.readers[len(r.readers)-1] { + rdrOffset = offsetTo + offset + break + } + } + + return rdr, rdrOffset, nil +} + +func (r *multiReadSeeker) getCurOffset() (int64, error) { + var totalSize int64 + for _, rdr := range r.readers[:r.pos.idx+1] { + if r.posIdx[rdr] == r.pos.idx { + totalSize += r.pos.offset + break + } + + size, err := getReadSeekerSize(rdr) + if err != nil { + return -1, fmt.Errorf("error getting seeker size: %v", err) + } + totalSize += size + } + return totalSize, nil +} + +func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) { + var offset int64 + for _, r := range r.readers { + if r == rdr { + break + } + + size, err := getReadSeekerSize(rdr) + if err != nil { + return -1, err + } + offset += size + } + return offset, nil +} + +func (r *multiReadSeeker) Read(b []byte) (int, error) { + if r.pos == nil { + r.pos = &pos{0, 0} + } + + bCap := int64(cap(b)) + buf := bytes.NewBuffer(nil) + var rdr io.ReadSeeker + + for _, rdr = range r.readers[r.pos.idx:] { + readBytes, err := io.CopyN(buf, rdr, bCap) + if err != nil && err != io.EOF { + return -1, err + } + bCap -= readBytes + + if bCap == 0 { + break + } + } + + rdrPos, err := rdr.Seek(0, os.SEEK_CUR) + if err != nil { + return -1, err + } + r.pos = &pos{r.posIdx[rdr], rdrPos} + return buf.Read(b) +} + +func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) { + // save the current position + pos, err := rdr.Seek(0, os.SEEK_CUR) + if err != nil { + return -1, err + } + + // get the size + size, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + + // reset the position + if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil { + return -1, err + } + return size, nil +} + +// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided +// input readseekers. After calling this method the initial position is set to the +// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances +// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker. +// Seek can be used over the sum of lengths of all readseekers. +// +// When a MultiReadSeeker is used, no Read and Seek operations should be made on +// its ReadSeeker components. Also, users should make no assumption on the state +// of individual readseekers while the MultiReadSeeker is used. +func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker { + if len(readers) == 1 { + return readers[0] + } + idx := make(map[io.ReadSeeker]int) + for i, rdr := range readers { + idx[rdr] = i + } + return &multiReadSeeker{ + readers: readers, + posIdx: idx, + } +} diff --git a/vendor/src/github.com/containers/storage/pkg/ioutils/readers.go b/vendor/src/github.com/containers/storage/pkg/ioutils/readers.go new file mode 100644 index 00000000..63f3c07f --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/ioutils/readers.go @@ -0,0 +1,154 @@ +package ioutils + +import ( + "crypto/sha256" + "encoding/hex" + "io" + + "golang.org/x/net/context" +) + +type readCloserWrapper struct { + io.Reader + closer func() error +} + +func (r *readCloserWrapper) Close() error { + return r.closer() +} + +// NewReadCloserWrapper returns a new io.ReadCloser. +func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { + return &readCloserWrapper{ + Reader: r, + closer: closer, + } +} + +type readerErrWrapper struct { + reader io.Reader + closer func() +} + +func (r *readerErrWrapper) Read(p []byte) (int, error) { + n, err := r.reader.Read(p) + if err != nil { + r.closer() + } + return n, err +} + +// NewReaderErrWrapper returns a new io.Reader. +func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { + return &readerErrWrapper{ + reader: r, + closer: closer, + } +} + +// HashData returns the sha256 sum of src. +func HashData(src io.Reader) (string, error) { + h := sha256.New() + if _, err := io.Copy(h, src); err != nil { + return "", err + } + return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil +} + +// OnEOFReader wraps an io.ReadCloser and a function +// the function will run at the end of file or close the file. +type OnEOFReader struct { + Rc io.ReadCloser + Fn func() +} + +func (r *OnEOFReader) Read(p []byte) (n int, err error) { + n, err = r.Rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +// Close closes the file and run the function. +func (r *OnEOFReader) Close() error { + err := r.Rc.Close() + r.runFunc() + return err +} + +func (r *OnEOFReader) runFunc() { + if fn := r.Fn; fn != nil { + fn() + r.Fn = nil + } +} + +// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read +// operations. +type cancelReadCloser struct { + cancel func() + pR *io.PipeReader // Stream to read from + pW *io.PipeWriter +} + +// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the +// context is cancelled. The returned io.ReadCloser must be closed when it is +// no longer needed. +func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { + pR, pW := io.Pipe() + + // Create a context used to signal when the pipe is closed + doneCtx, cancel := context.WithCancel(context.Background()) + + p := &cancelReadCloser{ + cancel: cancel, + pR: pR, + pW: pW, + } + + go func() { + _, err := io.Copy(pW, in) + select { + case <-ctx.Done(): + // If the context was closed, p.closeWithError + // was already called. Calling it again would + // change the error that Read returns. + default: + p.closeWithError(err) + } + in.Close() + }() + go func() { + for { + select { + case <-ctx.Done(): + p.closeWithError(ctx.Err()) + case <-doneCtx.Done(): + return + } + } + }() + + return p +} + +// Read wraps the Read method of the pipe that provides data from the wrapped +// ReadCloser. +func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { + return p.pR.Read(buf) +} + +// closeWithError closes the wrapper and its underlying reader. It will +// cause future calls to Read to return err. +func (p *cancelReadCloser) closeWithError(err error) { + p.pW.CloseWithError(err) + p.cancel() +} + +// Close closes the wrapper its underlying reader. It will cause +// future calls to Read to return io.EOF. +func (p *cancelReadCloser) Close() error { + p.closeWithError(io.EOF) + return nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/ioutils/temp_unix.go b/vendor/src/github.com/containers/storage/pkg/ioutils/temp_unix.go new file mode 100644 index 00000000..1539ad21 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/ioutils/temp_unix.go @@ -0,0 +1,10 @@ +// +build !windows + +package ioutils + +import "io/ioutil" + +// TempDir on Unix systems is equivalent to ioutil.TempDir. +func TempDir(dir, prefix string) (string, error) { + return ioutil.TempDir(dir, prefix) +} diff --git a/vendor/src/github.com/containers/storage/pkg/ioutils/temp_windows.go b/vendor/src/github.com/containers/storage/pkg/ioutils/temp_windows.go new file mode 100644 index 00000000..c719c120 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/ioutils/temp_windows.go @@ -0,0 +1,18 @@ +// +build windows + +package ioutils + +import ( + "io/ioutil" + + "github.com/containers/storage/pkg/longpath" +) + +// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. +func TempDir(dir, prefix string) (string, error) { + tempDir, err := ioutil.TempDir(dir, prefix) + if err != nil { + return "", err + } + return longpath.AddPrefix(tempDir), nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/ioutils/writeflusher.go b/vendor/src/github.com/containers/storage/pkg/ioutils/writeflusher.go new file mode 100644 index 00000000..52a4901a --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/ioutils/writeflusher.go @@ -0,0 +1,92 @@ +package ioutils + +import ( + "io" + "sync" +) + +// WriteFlusher wraps the Write and Flush operation ensuring that every write +// is a flush. In addition, the Close method can be called to intercept +// Read/Write calls if the targets lifecycle has already ended. +type WriteFlusher struct { + w io.Writer + flusher flusher + flushed chan struct{} + flushedOnce sync.Once + closed chan struct{} + closeLock sync.Mutex +} + +type flusher interface { + Flush() +} + +var errWriteFlusherClosed = io.EOF + +func (wf *WriteFlusher) Write(b []byte) (n int, err error) { + select { + case <-wf.closed: + return 0, errWriteFlusherClosed + default: + } + + n, err = wf.w.Write(b) + wf.Flush() // every write is a flush. + return n, err +} + +// Flush the stream immediately. +func (wf *WriteFlusher) Flush() { + select { + case <-wf.closed: + return + default: + } + + wf.flushedOnce.Do(func() { + close(wf.flushed) + }) + wf.flusher.Flush() +} + +// Flushed returns the state of flushed. +// If it's flushed, return true, or else it return false. +func (wf *WriteFlusher) Flushed() bool { + // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to + // be used to detect whether or a response code has been issued or not. + // Another hook should be used instead. + var flushed bool + select { + case <-wf.flushed: + flushed = true + default: + } + return flushed +} + +// Close closes the write flusher, disallowing any further writes to the +// target. After the flusher is closed, all calls to write or flush will +// result in an error. +func (wf *WriteFlusher) Close() error { + wf.closeLock.Lock() + defer wf.closeLock.Unlock() + + select { + case <-wf.closed: + return errWriteFlusherClosed + default: + close(wf.closed) + } + return nil +} + +// NewWriteFlusher returns a new WriteFlusher. +func NewWriteFlusher(w io.Writer) *WriteFlusher { + var fl flusher + if f, ok := w.(flusher); ok { + fl = f + } else { + fl = &NopFlusher{} + } + return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})} +} diff --git a/vendor/src/github.com/containers/storage/pkg/ioutils/writers.go b/vendor/src/github.com/containers/storage/pkg/ioutils/writers.go new file mode 100644 index 00000000..ccc7f9c2 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/ioutils/writers.go @@ -0,0 +1,66 @@ +package ioutils + +import "io" + +// NopWriter represents a type which write operation is nop. +type NopWriter struct{} + +func (*NopWriter) Write(buf []byte) (int, error) { + return len(buf), nil +} + +type nopWriteCloser struct { + io.Writer +} + +func (w *nopWriteCloser) Close() error { return nil } + +// NopWriteCloser returns a nopWriteCloser. +func NopWriteCloser(w io.Writer) io.WriteCloser { + return &nopWriteCloser{w} +} + +// NopFlusher represents a type which flush operation is nop. +type NopFlusher struct{} + +// Flush is a nop operation. +func (f *NopFlusher) Flush() {} + +type writeCloserWrapper struct { + io.Writer + closer func() error +} + +func (r *writeCloserWrapper) Close() error { + return r.closer() +} + +// NewWriteCloserWrapper returns a new io.WriteCloser. +func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { + return &writeCloserWrapper{ + Writer: r, + closer: closer, + } +} + +// WriteCounter wraps a concrete io.Writer and hold a count of the number +// of bytes written to the writer during a "session". +// This can be convenient when write return is masked +// (e.g., json.Encoder.Encode()) +type WriteCounter struct { + Count int64 + Writer io.Writer +} + +// NewWriteCounter returns a new WriteCounter. +func NewWriteCounter(w io.Writer) *WriteCounter { + return &WriteCounter{ + Writer: w, + } +} + +func (wc *WriteCounter) Write(p []byte) (count int, err error) { + count, err = wc.Writer.Write(p) + wc.Count += int64(count) + return +} diff --git a/vendor/src/github.com/containers/storage/pkg/loopback/attach_loopback.go b/vendor/src/github.com/containers/storage/pkg/loopback/attach_loopback.go new file mode 100644 index 00000000..971f45eb --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/loopback/attach_loopback.go @@ -0,0 +1,137 @@ +// +build linux + +package loopback + +import ( + "errors" + "fmt" + "os" + "syscall" + + "github.com/Sirupsen/logrus" +) + +// Loopback related errors +var ( + ErrAttachLoopbackDevice = errors.New("loopback attach failed") + ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") + ErrSetCapacity = errors.New("Unable set loopback capacity") +) + +func stringToLoopName(src string) [LoNameSize]uint8 { + var dst [LoNameSize]uint8 + copy(dst[:], src[:]) + return dst +} + +func getNextFreeLoopbackIndex() (int, error) { + f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0644) + if err != nil { + return 0, err + } + defer f.Close() + + index, err := ioctlLoopCtlGetFree(f.Fd()) + if index < 0 { + index = 0 + } + return index, err +} + +func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.File, err error) { + // Start looking for a free /dev/loop + for { + target := fmt.Sprintf("/dev/loop%d", index) + index++ + + fi, err := os.Stat(target) + if err != nil { + if os.IsNotExist(err) { + logrus.Error("There are no more loopback devices available.") + } + return nil, ErrAttachLoopbackDevice + } + + if fi.Mode()&os.ModeDevice != os.ModeDevice { + logrus.Errorf("Loopback device %s is not a block device.", target) + continue + } + + // OpenFile adds O_CLOEXEC + loopFile, err = os.OpenFile(target, os.O_RDWR, 0644) + if err != nil { + logrus.Errorf("Error opening loopback device: %s", err) + return nil, ErrAttachLoopbackDevice + } + + // Try to attach to the loop file + if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil { + loopFile.Close() + + // If the error is EBUSY, then try the next loopback + if err != syscall.EBUSY { + logrus.Errorf("Cannot set up loopback device %s: %s", target, err) + return nil, ErrAttachLoopbackDevice + } + + // Otherwise, we keep going with the loop + continue + } + // In case of success, we finished. Break the loop. + break + } + + // This can't happen, but let's be sure + if loopFile == nil { + logrus.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name()) + return nil, ErrAttachLoopbackDevice + } + + return loopFile, nil +} + +// AttachLoopDevice attaches the given sparse file to the next +// available loopback device. It returns an opened *os.File. +func AttachLoopDevice(sparseName string) (loop *os.File, err error) { + + // Try to retrieve the next available loopback device via syscall. + // If it fails, we discard error and start looping for a + // loopback from index 0. + startIndex, err := getNextFreeLoopbackIndex() + if err != nil { + logrus.Debugf("Error retrieving the next available loopback: %s", err) + } + + // OpenFile adds O_CLOEXEC + sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644) + if err != nil { + logrus.Errorf("Error opening sparse file %s: %s", sparseName, err) + return nil, ErrAttachLoopbackDevice + } + defer sparseFile.Close() + + loopFile, err := openNextAvailableLoopback(startIndex, sparseFile) + if err != nil { + return nil, err + } + + // Set the status of the loopback device + loopInfo := &loopInfo64{ + loFileName: stringToLoopName(loopFile.Name()), + loOffset: 0, + loFlags: LoFlagsAutoClear, + } + + if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil { + logrus.Errorf("Cannot set up loopback device info: %s", err) + + // If the call failed, then free the loopback device + if err := ioctlLoopClrFd(loopFile.Fd()); err != nil { + logrus.Error("Error while cleaning up the loopback device") + } + loopFile.Close() + return nil, ErrAttachLoopbackDevice + } + + return loopFile, nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/loopback/ioctl.go b/vendor/src/github.com/containers/storage/pkg/loopback/ioctl.go new file mode 100644 index 00000000..0714eb5f --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/loopback/ioctl.go @@ -0,0 +1,53 @@ +// +build linux + +package loopback + +import ( + "syscall" + "unsafe" +) + +func ioctlLoopCtlGetFree(fd uintptr) (int, error) { + index, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, LoopCtlGetFree, 0) + if err != 0 { + return 0, err + } + return int(index), nil +} + +func ioctlLoopSetFd(loopFd, sparseFd uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetFd, sparseFd); err != 0 { + return err + } + return nil +} + +func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *loopInfo64) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + return err + } + return nil +} + +func ioctlLoopClrFd(loopFd uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopClrFd, 0); err != 0 { + return err + } + return nil +} + +func ioctlLoopGetStatus64(loopFd uintptr) (*loopInfo64, error) { + loopInfo := &loopInfo64{} + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + return nil, err + } + return loopInfo, nil +} + +func ioctlLoopSetCapacity(loopFd uintptr, value int) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetCapacity, uintptr(value)); err != 0 { + return err + } + return nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/loopback/loop_wrapper.go b/vendor/src/github.com/containers/storage/pkg/loopback/loop_wrapper.go new file mode 100644 index 00000000..e1100ce1 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/loopback/loop_wrapper.go @@ -0,0 +1,52 @@ +// +build linux + +package loopback + +/* +#include // FIXME: present only for defines, maybe we can remove it? + +#ifndef LOOP_CTL_GET_FREE + #define LOOP_CTL_GET_FREE 0x4C82 +#endif + +#ifndef LO_FLAGS_PARTSCAN + #define LO_FLAGS_PARTSCAN 8 +#endif + +*/ +import "C" + +type loopInfo64 struct { + loDevice uint64 /* ioctl r/o */ + loInode uint64 /* ioctl r/o */ + loRdevice uint64 /* ioctl r/o */ + loOffset uint64 + loSizelimit uint64 /* bytes, 0 == max available */ + loNumber uint32 /* ioctl r/o */ + loEncryptType uint32 + loEncryptKeySize uint32 /* ioctl w/o */ + loFlags uint32 /* ioctl r/o */ + loFileName [LoNameSize]uint8 + loCryptName [LoNameSize]uint8 + loEncryptKey [LoKeySize]uint8 /* ioctl w/o */ + loInit [2]uint64 +} + +// IOCTL consts +const ( + LoopSetFd = C.LOOP_SET_FD + LoopCtlGetFree = C.LOOP_CTL_GET_FREE + LoopGetStatus64 = C.LOOP_GET_STATUS64 + LoopSetStatus64 = C.LOOP_SET_STATUS64 + LoopClrFd = C.LOOP_CLR_FD + LoopSetCapacity = C.LOOP_SET_CAPACITY +) + +// LOOP consts. +const ( + LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR + LoFlagsReadOnly = C.LO_FLAGS_READ_ONLY + LoFlagsPartScan = C.LO_FLAGS_PARTSCAN + LoKeySize = C.LO_KEY_SIZE + LoNameSize = C.LO_NAME_SIZE +) diff --git a/vendor/src/github.com/containers/storage/pkg/loopback/loopback.go b/vendor/src/github.com/containers/storage/pkg/loopback/loopback.go new file mode 100644 index 00000000..bc047928 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/loopback/loopback.go @@ -0,0 +1,63 @@ +// +build linux + +package loopback + +import ( + "fmt" + "os" + "syscall" + + "github.com/Sirupsen/logrus" +) + +func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) { + loopInfo, err := ioctlLoopGetStatus64(file.Fd()) + if err != nil { + logrus.Errorf("Error get loopback backing file: %s", err) + return 0, 0, ErrGetLoopbackBackingFile + } + return loopInfo.loDevice, loopInfo.loInode, nil +} + +// SetCapacity reloads the size for the loopback device. +func SetCapacity(file *os.File) error { + if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil { + logrus.Errorf("Error loopbackSetCapacity: %s", err) + return ErrSetCapacity + } + return nil +} + +// FindLoopDeviceFor returns a loopback device file for the specified file which +// is backing file of a loop back device. +func FindLoopDeviceFor(file *os.File) *os.File { + stat, err := file.Stat() + if err != nil { + return nil + } + targetInode := stat.Sys().(*syscall.Stat_t).Ino + targetDevice := stat.Sys().(*syscall.Stat_t).Dev + + for i := 0; true; i++ { + path := fmt.Sprintf("/dev/loop%d", i) + + file, err := os.OpenFile(path, os.O_RDWR, 0) + if err != nil { + if os.IsNotExist(err) { + return nil + } + + // Ignore all errors until the first not-exist + // we want to continue looking for the file + continue + } + + dev, inode, err := getLoopbackBackingFile(file) + if err == nil && dev == targetDevice && inode == targetInode { + return file + } + file.Close() + } + + return nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/mflag/LICENSE b/vendor/src/github.com/containers/storage/pkg/mflag/LICENSE new file mode 100644 index 00000000..9b4f4a29 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/mflag/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2014-2016 The Docker & Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/src/github.com/containers/storage/pkg/mount/flags.go b/vendor/src/github.com/containers/storage/pkg/mount/flags.go new file mode 100644 index 00000000..607dbed4 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/mount/flags.go @@ -0,0 +1,149 @@ +package mount + +import ( + "fmt" + "strings" +) + +var flags = map[string]struct { + clear bool + flag int +}{ + "defaults": {false, 0}, + "ro": {false, RDONLY}, + "rw": {true, RDONLY}, + "suid": {true, NOSUID}, + "nosuid": {false, NOSUID}, + "dev": {true, NODEV}, + "nodev": {false, NODEV}, + "exec": {true, NOEXEC}, + "noexec": {false, NOEXEC}, + "sync": {false, SYNCHRONOUS}, + "async": {true, SYNCHRONOUS}, + "dirsync": {false, DIRSYNC}, + "remount": {false, REMOUNT}, + "mand": {false, MANDLOCK}, + "nomand": {true, MANDLOCK}, + "atime": {true, NOATIME}, + "noatime": {false, NOATIME}, + "diratime": {true, NODIRATIME}, + "nodiratime": {false, NODIRATIME}, + "bind": {false, BIND}, + "rbind": {false, RBIND}, + "unbindable": {false, UNBINDABLE}, + "runbindable": {false, RUNBINDABLE}, + "private": {false, PRIVATE}, + "rprivate": {false, RPRIVATE}, + "shared": {false, SHARED}, + "rshared": {false, RSHARED}, + "slave": {false, SLAVE}, + "rslave": {false, RSLAVE}, + "relatime": {false, RELATIME}, + "norelatime": {true, RELATIME}, + "strictatime": {false, STRICTATIME}, + "nostrictatime": {true, STRICTATIME}, +} + +var validFlags = map[string]bool{ + "": true, + "size": true, + "mode": true, + "uid": true, + "gid": true, + "nr_inodes": true, + "nr_blocks": true, + "mpol": true, +} + +var propagationFlags = map[string]bool{ + "bind": true, + "rbind": true, + "unbindable": true, + "runbindable": true, + "private": true, + "rprivate": true, + "shared": true, + "rshared": true, + "slave": true, + "rslave": true, +} + +// MergeTmpfsOptions merge mount options to make sure there is no duplicate. +func MergeTmpfsOptions(options []string) ([]string, error) { + // We use collisions maps to remove duplicates. + // For flag, the key is the flag value (the key for propagation flag is -1) + // For data=value, the key is the data + flagCollisions := map[int]bool{} + dataCollisions := map[string]bool{} + + var newOptions []string + // We process in reverse order + for i := len(options) - 1; i >= 0; i-- { + option := options[i] + if option == "defaults" { + continue + } + if f, ok := flags[option]; ok && f.flag != 0 { + // There is only one propagation mode + key := f.flag + if propagationFlags[option] { + key = -1 + } + // Check to see if there is collision for flag + if !flagCollisions[key] { + // We prepend the option and add to collision map + newOptions = append([]string{option}, newOptions...) + flagCollisions[key] = true + } + continue + } + opt := strings.SplitN(option, "=", 2) + if len(opt) != 2 || !validFlags[opt[0]] { + return nil, fmt.Errorf("Invalid tmpfs option %q", opt) + } + if !dataCollisions[opt[0]] { + // We prepend the option and add to collision map + newOptions = append([]string{option}, newOptions...) + dataCollisions[opt[0]] = true + } + } + + return newOptions, nil +} + +// Parse fstab type mount options into mount() flags +// and device specific data +func parseOptions(options string) (int, string) { + var ( + flag int + data []string + ) + + for _, o := range strings.Split(options, ",") { + // If the option does not exist in the flags table or the flag + // is not supported on the platform, + // then it is a data value for a specific fs type + if f, exists := flags[o]; exists && f.flag != 0 { + if f.clear { + flag &= ^f.flag + } else { + flag |= f.flag + } + } else { + data = append(data, o) + } + } + return flag, strings.Join(data, ",") +} + +// ParseTmpfsOptions parse fstab type mount options into flags and data +func ParseTmpfsOptions(options string) (int, string, error) { + flags, data := parseOptions(options) + for _, o := range strings.Split(data, ",") { + opt := strings.SplitN(o, "=", 2) + if !validFlags[opt[0]] { + return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt) + } + } + return flags, data, nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/mount/flags_freebsd.go b/vendor/src/github.com/containers/storage/pkg/mount/flags_freebsd.go new file mode 100644 index 00000000..f166cb2f --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/mount/flags_freebsd.go @@ -0,0 +1,48 @@ +// +build freebsd,cgo + +package mount + +/* +#include +*/ +import "C" + +const ( + // RDONLY will mount the filesystem as read-only. + RDONLY = C.MNT_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = C.MNT_NOSUID + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = C.MNT_NOEXEC + + // SYNCHRONOUS will allow any I/O to the file system to be done synchronously. + SYNCHRONOUS = C.MNT_SYNCHRONOUS + + // NOATIME will not update the file access time when reading from a file. + NOATIME = C.MNT_NOATIME +) + +// These flags are unsupported. +const ( + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NODEV = 0 + NODIRATIME = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 + RBIND = 0 + RELATIVE = 0 + RELATIME = 0 + REMOUNT = 0 + STRICTATIME = 0 +) diff --git a/vendor/src/github.com/containers/storage/pkg/mount/flags_linux.go b/vendor/src/github.com/containers/storage/pkg/mount/flags_linux.go new file mode 100644 index 00000000..dc696dce --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/mount/flags_linux.go @@ -0,0 +1,85 @@ +package mount + +import ( + "syscall" +) + +const ( + // RDONLY will mount the file system read-only. + RDONLY = syscall.MS_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = syscall.MS_NOSUID + + // NODEV will not interpret character or block special devices on the file + // system. + NODEV = syscall.MS_NODEV + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = syscall.MS_NOEXEC + + // SYNCHRONOUS will allow I/O to the file system to be done synchronously. + SYNCHRONOUS = syscall.MS_SYNCHRONOUS + + // DIRSYNC will force all directory updates within the file system to be done + // synchronously. This affects the following system calls: create, link, + // unlink, symlink, mkdir, rmdir, mknod and rename. + DIRSYNC = syscall.MS_DIRSYNC + + // REMOUNT will attempt to remount an already-mounted file system. This is + // commonly used to change the mount flags for a file system, especially to + // make a readonly file system writeable. It does not change device or mount + // point. + REMOUNT = syscall.MS_REMOUNT + + // MANDLOCK will force mandatory locks on a filesystem. + MANDLOCK = syscall.MS_MANDLOCK + + // NOATIME will not update the file access time when reading from a file. + NOATIME = syscall.MS_NOATIME + + // NODIRATIME will not update the directory access time. + NODIRATIME = syscall.MS_NODIRATIME + + // BIND remounts a subtree somewhere else. + BIND = syscall.MS_BIND + + // RBIND remounts a subtree and all possible submounts somewhere else. + RBIND = syscall.MS_BIND | syscall.MS_REC + + // UNBINDABLE creates a mount which cannot be cloned through a bind operation. + UNBINDABLE = syscall.MS_UNBINDABLE + + // RUNBINDABLE marks the entire mount tree as UNBINDABLE. + RUNBINDABLE = syscall.MS_UNBINDABLE | syscall.MS_REC + + // PRIVATE creates a mount which carries no propagation abilities. + PRIVATE = syscall.MS_PRIVATE + + // RPRIVATE marks the entire mount tree as PRIVATE. + RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC + + // SLAVE creates a mount which receives propagation from its master, but not + // vice versa. + SLAVE = syscall.MS_SLAVE + + // RSLAVE marks the entire mount tree as SLAVE. + RSLAVE = syscall.MS_SLAVE | syscall.MS_REC + + // SHARED creates a mount which provides the ability to create mirrors of + // that mount such that mounts and unmounts within any of the mirrors + // propagate to the other mirrors. + SHARED = syscall.MS_SHARED + + // RSHARED marks the entire mount tree as SHARED. + RSHARED = syscall.MS_SHARED | syscall.MS_REC + + // RELATIME updates inode access times relative to modify or change time. + RELATIME = syscall.MS_RELATIME + + // STRICTATIME allows to explicitly request full atime updates. This makes + // it possible for the kernel to default to relatime or noatime but still + // allow userspace to override it. + STRICTATIME = syscall.MS_STRICTATIME +) diff --git a/vendor/src/github.com/containers/storage/pkg/mount/flags_unsupported.go b/vendor/src/github.com/containers/storage/pkg/mount/flags_unsupported.go new file mode 100644 index 00000000..5564f7b3 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/mount/flags_unsupported.go @@ -0,0 +1,30 @@ +// +build !linux,!freebsd freebsd,!cgo solaris,!cgo + +package mount + +// These flags are unsupported. +const ( + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NOATIME = 0 + NODEV = 0 + NODIRATIME = 0 + NOEXEC = 0 + NOSUID = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 + RBIND = 0 + RELATIME = 0 + RELATIVE = 0 + REMOUNT = 0 + STRICTATIME = 0 + SYNCHRONOUS = 0 + RDONLY = 0 +) diff --git a/vendor/src/github.com/containers/storage/pkg/mount/mount.go b/vendor/src/github.com/containers/storage/pkg/mount/mount.go new file mode 100644 index 00000000..66ac4bf4 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/mount/mount.go @@ -0,0 +1,74 @@ +package mount + +import ( + "time" +) + +// GetMounts retrieves a list of mounts for the current running process. +func GetMounts() ([]*Info, error) { + return parseMountTable() +} + +// Mounted determines if a specified mountpoint has been mounted. +// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab. +func Mounted(mountpoint string) (bool, error) { + entries, err := parseMountTable() + if err != nil { + return false, err + } + + // Search the table for the mountpoint + for _, e := range entries { + if e.Mountpoint == mountpoint { + return true, nil + } + } + return false, nil +} + +// Mount will mount filesystem according to the specified configuration, on the +// condition that the target path is *not* already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func Mount(device, target, mType, options string) error { + flag, _ := parseOptions(options) + if flag&REMOUNT != REMOUNT { + if mounted, err := Mounted(target); err != nil || mounted { + return err + } + } + return ForceMount(device, target, mType, options) +} + +// ForceMount will mount a filesystem according to the specified configuration, +// *regardless* if the target path is not already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func ForceMount(device, target, mType, options string) error { + flag, data := parseOptions(options) + if err := mount(device, target, mType, uintptr(flag), data); err != nil { + return err + } + return nil +} + +// Unmount will unmount the target filesystem, so long as it is mounted. +func Unmount(target string) error { + if mounted, err := Mounted(target); err != nil || !mounted { + return err + } + return ForceUnmount(target) +} + +// ForceUnmount will force an unmount of the target filesystem, regardless if +// it is mounted or not. +func ForceUnmount(target string) (err error) { + // Simple retry logic for unmount + for i := 0; i < 10; i++ { + if err = unmount(target, 0); err == nil { + return nil + } + time.Sleep(100 * time.Millisecond) + } + return +} diff --git a/vendor/src/github.com/containers/storage/pkg/mount/mounter_freebsd.go b/vendor/src/github.com/containers/storage/pkg/mount/mounter_freebsd.go new file mode 100644 index 00000000..bb870e6f --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/mount/mounter_freebsd.go @@ -0,0 +1,59 @@ +package mount + +/* +#include +#include +#include +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "strings" + "syscall" + "unsafe" +) + +func allocateIOVecs(options []string) []C.struct_iovec { + out := make([]C.struct_iovec, len(options)) + for i, option := range options { + out[i].iov_base = unsafe.Pointer(C.CString(option)) + out[i].iov_len = C.size_t(len(option) + 1) + } + return out +} + +func mount(device, target, mType string, flag uintptr, data string) error { + isNullFS := false + + xs := strings.Split(data, ",") + for _, x := range xs { + if x == "bind" { + isNullFS = true + } + } + + options := []string{"fspath", target} + if isNullFS { + options = append(options, "fstype", "nullfs", "target", device) + } else { + options = append(options, "fstype", mType, "from", device) + } + rawOptions := allocateIOVecs(options) + for _, rawOption := range rawOptions { + defer C.free(rawOption.iov_base) + } + + if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { + reason := C.GoString(C.strerror(*C.__error())) + return fmt.Errorf("Failed to call nmount: %s", reason) + } + return nil +} + +func unmount(target string, flag int) error { + return syscall.Unmount(target, flag) +} diff --git a/vendor/src/github.com/containers/storage/pkg/mount/mounter_linux.go b/vendor/src/github.com/containers/storage/pkg/mount/mounter_linux.go new file mode 100644 index 00000000..dd4280c7 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/mount/mounter_linux.go @@ -0,0 +1,21 @@ +package mount + +import ( + "syscall" +) + +func mount(device, target, mType string, flag uintptr, data string) error { + if err := syscall.Mount(device, target, mType, flag, data); err != nil { + return err + } + + // If we have a bind mount or remount, remount... + if flag&syscall.MS_BIND == syscall.MS_BIND && flag&syscall.MS_RDONLY == syscall.MS_RDONLY { + return syscall.Mount(device, target, mType, flag|syscall.MS_REMOUNT, data) + } + return nil +} + +func unmount(target string, flag int) error { + return syscall.Unmount(target, flag) +} diff --git a/vendor/src/github.com/containers/storage/pkg/mount/mounter_solaris.go b/vendor/src/github.com/containers/storage/pkg/mount/mounter_solaris.go new file mode 100644 index 00000000..c684aa81 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/mount/mounter_solaris.go @@ -0,0 +1,33 @@ +// +build solaris,cgo + +package mount + +import ( + "golang.org/x/sys/unix" + "unsafe" +) + +// #include +// #include +// #include +// int Mount(const char *spec, const char *dir, int mflag, +// char *fstype, char *dataptr, int datalen, char *optptr, int optlen) { +// return mount(spec, dir, mflag, fstype, dataptr, datalen, optptr, optlen); +// } +import "C" + +func mount(device, target, mType string, flag uintptr, data string) error { + spec := C.CString(device) + dir := C.CString(target) + fstype := C.CString(mType) + _, err := C.Mount(spec, dir, C.int(flag), fstype, nil, 0, nil, 0) + C.free(unsafe.Pointer(spec)) + C.free(unsafe.Pointer(dir)) + C.free(unsafe.Pointer(fstype)) + return err +} + +func unmount(target string, flag int) error { + err := unix.Unmount(target, flag) + return err +} diff --git a/vendor/src/github.com/containers/storage/pkg/mount/mounter_unsupported.go b/vendor/src/github.com/containers/storage/pkg/mount/mounter_unsupported.go new file mode 100644 index 00000000..a2a3bb45 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/mount/mounter_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo + +package mount + +func mount(device, target, mType string, flag uintptr, data string) error { + panic("Not implemented") +} + +func unmount(target string, flag int) error { + panic("Not implemented") +} diff --git a/vendor/src/github.com/containers/storage/pkg/mount/mountinfo.go b/vendor/src/github.com/containers/storage/pkg/mount/mountinfo.go new file mode 100644 index 00000000..e3fc3535 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/mount/mountinfo.go @@ -0,0 +1,40 @@ +package mount + +// Info reveals information about a particular mounted filesystem. This +// struct is populated from the content in the /proc//mountinfo file. +type Info struct { + // ID is a unique identifier of the mount (may be reused after umount). + ID int + + // Parent indicates the ID of the mount parent (or of self for the top of the + // mount tree). + Parent int + + // Major indicates one half of the device ID which identifies the device class. + Major int + + // Minor indicates one half of the device ID which identifies a specific + // instance of device. + Minor int + + // Root of the mount within the filesystem. + Root string + + // Mountpoint indicates the mount point relative to the process's root. + Mountpoint string + + // Opts represents mount-specific options. + Opts string + + // Optional represents optional fields. + Optional string + + // Fstype indicates the type of filesystem, such as EXT3. + Fstype string + + // Source indicates filesystem specific information or "none". + Source string + + // VfsOpts represents per super block options. + VfsOpts string +} diff --git a/vendor/src/github.com/containers/storage/pkg/mount/mountinfo_freebsd.go b/vendor/src/github.com/containers/storage/pkg/mount/mountinfo_freebsd.go new file mode 100644 index 00000000..4f32edcd --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/mount/mountinfo_freebsd.go @@ -0,0 +1,41 @@ +package mount + +/* +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "reflect" + "unsafe" +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts. +func parseMountTable() ([]*Info, error) { + var rawEntries *C.struct_statfs + + count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) + if count == 0 { + return nil, fmt.Errorf("Failed to call getmntinfo") + } + + var entries []C.struct_statfs + header := (*reflect.SliceHeader)(unsafe.Pointer(&entries)) + header.Cap = count + header.Len = count + header.Data = uintptr(unsafe.Pointer(rawEntries)) + + var out []*Info + for _, entry := range entries { + var mountinfo Info + mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) + mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) + mountinfo.Fstype = C.GoString(&entry.f_fstypename[0]) + out = append(out, &mountinfo) + } + return out, nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/mount/mountinfo_linux.go b/vendor/src/github.com/containers/storage/pkg/mount/mountinfo_linux.go new file mode 100644 index 00000000..be69fee1 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/mount/mountinfo_linux.go @@ -0,0 +1,95 @@ +// +build linux + +package mount + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" +) + +const ( + /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue + (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) + + (1) mount ID: unique identifier of the mount (may be reused after umount) + (2) parent ID: ID of parent (or of self for the top of the mount tree) + (3) major:minor: value of st_dev for files on filesystem + (4) root: root of the mount within the filesystem + (5) mount point: mount point relative to the process's root + (6) mount options: per mount options + (7) optional fields: zero or more fields of the form "tag[:value]" + (8) separator: marks the end of the optional fields + (9) filesystem type: name of filesystem of the form "type[.subtype]" + (10) mount source: filesystem specific information or "none" + (11) super options: per super block options*/ + mountinfoFormat = "%d %d %d:%d %s %s %s %s" +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts +func parseMountTable() ([]*Info, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} + +func parseInfoFile(r io.Reader) ([]*Info, error) { + var ( + s = bufio.NewScanner(r) + out = []*Info{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + var ( + p = &Info{} + text = s.Text() + optionalFields string + ) + + if _, err := fmt.Sscanf(text, mountinfoFormat, + &p.ID, &p.Parent, &p.Major, &p.Minor, + &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil { + return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) + } + // Safe as mountinfo encodes mountpoints with spaces as \040. + index := strings.Index(text, " - ") + postSeparatorFields := strings.Fields(text[index+3:]) + if len(postSeparatorFields) < 3 { + return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text) + } + + if optionalFields != "-" { + p.Optional = optionalFields + } + + p.Fstype = postSeparatorFields[0] + p.Source = postSeparatorFields[1] + p.VfsOpts = strings.Join(postSeparatorFields[2:], " ") + out = append(out, p) + } + return out, nil +} + +// PidMountInfo collects the mounts for a specific process ID. If the process +// ID is unknown, it is better to use `GetMounts` which will inspect +// "/proc/self/mountinfo" instead. +func PidMountInfo(pid int) ([]*Info, error) { + f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} diff --git a/vendor/src/github.com/containers/storage/pkg/mount/mountinfo_solaris.go b/vendor/src/github.com/containers/storage/pkg/mount/mountinfo_solaris.go new file mode 100644 index 00000000..ad9ab57f --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/mount/mountinfo_solaris.go @@ -0,0 +1,37 @@ +// +build solaris,cgo + +package mount + +/* +#include +#include +*/ +import "C" + +import ( + "fmt" +) + +func parseMountTable() ([]*Info, error) { + mnttab := C.fopen(C.CString(C.MNTTAB), C.CString("r")) + if mnttab == nil { + return nil, fmt.Errorf("Failed to open %s", C.MNTTAB) + } + + var out []*Info + var mp C.struct_mnttab + + ret := C.getmntent(mnttab, &mp) + for ret == 0 { + var mountinfo Info + mountinfo.Mountpoint = C.GoString(mp.mnt_mountp) + mountinfo.Source = C.GoString(mp.mnt_special) + mountinfo.Fstype = C.GoString(mp.mnt_fstype) + mountinfo.Opts = C.GoString(mp.mnt_mntopts) + out = append(out, &mountinfo) + ret = C.getmntent(mnttab, &mp) + } + + C.fclose(mnttab) + return out, nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/mount/mountinfo_unsupported.go b/vendor/src/github.com/containers/storage/pkg/mount/mountinfo_unsupported.go new file mode 100644 index 00000000..7fbcf192 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/mount/mountinfo_unsupported.go @@ -0,0 +1,12 @@ +// +build !windows,!linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo + +package mount + +import ( + "fmt" + "runtime" +) + +func parseMountTable() ([]*Info, error) { + return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} diff --git a/vendor/src/github.com/containers/storage/pkg/mount/mountinfo_windows.go b/vendor/src/github.com/containers/storage/pkg/mount/mountinfo_windows.go new file mode 100644 index 00000000..dab8a37e --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/mount/mountinfo_windows.go @@ -0,0 +1,6 @@ +package mount + +func parseMountTable() ([]*Info, error) { + // Do NOT return an error! + return nil, nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go b/vendor/src/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go new file mode 100644 index 00000000..8ceec84b --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go @@ -0,0 +1,69 @@ +// +build linux + +package mount + +// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "shared") +} + +// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "rshared") +} + +// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. +// See the supported options in flags.go for further reference. +func MakePrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "private") +} + +// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeRPrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "rprivate") +} + +// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "slave") +} + +// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "rslave") +} + +// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "unbindable") +} + +// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount +// option enabled. See the supported options in flags.go for further reference. +func MakeRUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "runbindable") +} + +func ensureMountedAs(mountPoint, options string) error { + mounted, err := Mounted(mountPoint) + if err != nil { + return err + } + + if !mounted { + if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil { + return err + } + } + if _, err = Mounted(mountPoint); err != nil { + return err + } + + return ForceMount("", mountPoint, "none", options) +} diff --git a/vendor/src/github.com/containers/storage/pkg/parsers/kernel/kernel.go b/vendor/src/github.com/containers/storage/pkg/parsers/kernel/kernel.go new file mode 100644 index 00000000..7738fc74 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/parsers/kernel/kernel.go @@ -0,0 +1,74 @@ +// +build !windows + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "errors" + "fmt" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + Kernel int // Version of the kernel (e.g. 4.1.2-generic -> 4) + Major int // Major part of the kernel version (e.g. 4.1.2-generic -> 1) + Minor int // Minor part of the kernel version (e.g. 4.1.2-generic -> 2) + Flavor string // Flavor of the kernel version (e.g. 4.1.2-generic -> generic) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) +} + +// CompareKernelVersion compares two kernel.VersionInfo structs. +// Returns -1 if a < b, 0 if a == b, 1 it a > b +func CompareKernelVersion(a, b VersionInfo) int { + if a.Kernel < b.Kernel { + return -1 + } else if a.Kernel > b.Kernel { + return 1 + } + + if a.Major < b.Major { + return -1 + } else if a.Major > b.Major { + return 1 + } + + if a.Minor < b.Minor { + return -1 + } else if a.Minor > b.Minor { + return 1 + } + + return 0 +} + +// ParseRelease parses a string and creates a VersionInfo based on it. +func ParseRelease(release string) (*VersionInfo, error) { + var ( + kernel, major, minor, parsed int + flavor, partial string + ) + + // Ignore error from Sscanf to allow an empty flavor. Instead, just + // make sure we got all the version numbers. + parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial) + if parsed < 2 { + return nil, errors.New("Can't parse kernel version " + release) + } + + // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64 + parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor) + if parsed < 1 { + flavor = partial + } + + return &VersionInfo{ + Kernel: kernel, + Major: major, + Minor: minor, + Flavor: flavor, + }, nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go b/vendor/src/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go new file mode 100644 index 00000000..71f205b2 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go @@ -0,0 +1,56 @@ +// +build darwin + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "fmt" + "os/exec" + "strings" + + "github.com/mattn/go-shellwords" +) + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + release, err := getRelease() + if err != nil { + return nil, err + } + + return ParseRelease(release) +} + +// getRelease uses `system_profiler SPSoftwareDataType` to get OSX kernel version +func getRelease() (string, error) { + cmd := exec.Command("system_profiler", "SPSoftwareDataType") + osName, err := cmd.Output() + if err != nil { + return "", err + } + + var release string + data := strings.Split(string(osName), "\n") + for _, line := range data { + if strings.Contains(line, "Kernel Version") { + // It has the format like ' Kernel Version: Darwin 14.5.0' + content := strings.SplitN(line, ":", 2) + if len(content) != 2 { + return "", fmt.Errorf("Kernel Version is invalid") + } + + prettyNames, err := shellwords.Parse(content[1]) + if err != nil { + return "", fmt.Errorf("Kernel Version is invalid: %s", err.Error()) + } + + if len(prettyNames) != 2 { + return "", fmt.Errorf("Kernel Version needs to be 'Darwin x.x.x' ") + } + release = prettyNames[1] + } + } + + return release, nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go b/vendor/src/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go new file mode 100644 index 00000000..54a89d28 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go @@ -0,0 +1,30 @@ +// +build linux freebsd solaris + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "bytes" +) + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + uts, err := uname() + if err != nil { + return nil, err + } + + release := make([]byte, len(uts.Release)) + + i := 0 + for _, c := range uts.Release { + release[i] = byte(c) + i++ + } + + // Remove the \x00 from the release for Atoi to parse correctly + release = release[:bytes.IndexByte(release, 0)] + + return ParseRelease(string(release)) +} diff --git a/vendor/src/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go b/vendor/src/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go new file mode 100644 index 00000000..80fab8ff --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go @@ -0,0 +1,69 @@ +// +build windows + +package kernel + +import ( + "fmt" + "syscall" + "unsafe" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + kvi string // Version of the kernel (e.g. 6.1.7601.17592 -> 6) + major int // Major part of the kernel version (e.g. 6.1.7601.17592 -> 1) + minor int // Minor part of the kernel version (e.g. 6.1.7601.17592 -> 7601) + build int // Build number of the kernel version (e.g. 6.1.7601.17592 -> 17592) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi) +} + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + + var ( + h syscall.Handle + dwVersion uint32 + err error + ) + + KVI := &VersionInfo{"Unknown", 0, 0, 0} + + if err = syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, + syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), + 0, + syscall.KEY_READ, + &h); err != nil { + return KVI, err + } + defer syscall.RegCloseKey(h) + + var buf [1 << 10]uint16 + var typ uint32 + n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 + + if err = syscall.RegQueryValueEx(h, + syscall.StringToUTF16Ptr("BuildLabEx"), + nil, + &typ, + (*byte)(unsafe.Pointer(&buf[0])), + &n); err != nil { + return KVI, err + } + + KVI.kvi = syscall.UTF16ToString(buf[:]) + + // Important - docker.exe MUST be manifested for this API to return + // the correct information. + if dwVersion, err = syscall.GetVersion(); err != nil { + return KVI, err + } + + KVI.major = int(dwVersion & 0xFF) + KVI.minor = int((dwVersion & 0XFF00) >> 8) + KVI.build = int((dwVersion & 0xFFFF0000) >> 16) + + return KVI, nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go b/vendor/src/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go new file mode 100644 index 00000000..bb9b3264 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go @@ -0,0 +1,19 @@ +package kernel + +import ( + "syscall" +) + +// Utsname represents the system name structure. +// It is passthrough for syscall.Utsname in order to make it portable with +// other platforms where it is not available. +type Utsname syscall.Utsname + +func uname() (*syscall.Utsname, error) { + uts := &syscall.Utsname{} + + if err := syscall.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go b/vendor/src/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go new file mode 100644 index 00000000..49370bd3 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go @@ -0,0 +1,14 @@ +package kernel + +import ( + "golang.org/x/sys/unix" +) + +func uname() (*unix.Utsname, error) { + uts := &unix.Utsname{} + + if err := unix.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go b/vendor/src/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go new file mode 100644 index 00000000..1da3f239 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go @@ -0,0 +1,18 @@ +// +build !linux,!solaris + +package kernel + +import ( + "errors" +) + +// Utsname represents the system name structure. +// It is defined here to make it portable as it is available on linux but not +// on windows. +type Utsname struct { + Release [65]byte +} + +func uname() (*Utsname, error) { + return nil, errors.New("Kernel version detection is available only on linux") +} diff --git a/vendor/src/github.com/containers/storage/pkg/parsers/parsers.go b/vendor/src/github.com/containers/storage/pkg/parsers/parsers.go new file mode 100644 index 00000000..acc89716 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/parsers/parsers.go @@ -0,0 +1,69 @@ +// Package parsers provides helper functions to parse and validate different type +// of string. It can be hosts, unix addresses, tcp addresses, filters, kernel +// operating system versions. +package parsers + +import ( + "fmt" + "strconv" + "strings" +) + +// ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value) +func ParseKeyValueOpt(opt string) (string, string, error) { + parts := strings.SplitN(opt, "=", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt) + } + return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil +} + +// ParseUintList parses and validates the specified string as the value +// found in some cgroup file (e.g. `cpuset.cpus`, `cpuset.mems`), which could be +// one of the formats below. Note that duplicates are actually allowed in the +// input string. It returns a `map[int]bool` with available elements from `val` +// set to `true`. +// Supported formats: +// 7 +// 1-6 +// 0,3-4,7,8-10 +// 0-0,0,1-7 +// 03,1-3 <- this is gonna get parsed as [1,2,3] +// 3,2,1 +// 0-2,3,1 +func ParseUintList(val string) (map[int]bool, error) { + if val == "" { + return map[int]bool{}, nil + } + + availableInts := make(map[int]bool) + split := strings.Split(val, ",") + errInvalidFormat := fmt.Errorf("invalid format: %s", val) + + for _, r := range split { + if !strings.Contains(r, "-") { + v, err := strconv.Atoi(r) + if err != nil { + return nil, errInvalidFormat + } + availableInts[v] = true + } else { + split := strings.SplitN(r, "-", 2) + min, err := strconv.Atoi(split[0]) + if err != nil { + return nil, errInvalidFormat + } + max, err := strconv.Atoi(split[1]) + if err != nil { + return nil, errInvalidFormat + } + if max < min { + return nil, errInvalidFormat + } + for i := min; i <= max; i++ { + availableInts[i] = true + } + } + } + return availableInts, nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/pools/pools.go b/vendor/src/github.com/containers/storage/pkg/pools/pools.go new file mode 100644 index 00000000..a15e3688 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/pools/pools.go @@ -0,0 +1,119 @@ +// Package pools provides a collection of pools which provide various +// data types with buffers. These can be used to lower the number of +// memory allocations and reuse buffers. +// +// New pools should be added to this package to allow them to be +// shared across packages. +// +// Utility functions which operate on pools should be added to this +// package to allow them to be reused. +package pools + +import ( + "bufio" + "io" + "sync" + + "github.com/containers/storage/pkg/ioutils" +) + +var ( + // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. + BufioReader32KPool *BufioReaderPool + // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. + BufioWriter32KPool *BufioWriterPool +) + +const buffer32K = 32 * 1024 + +// BufioReaderPool is a bufio reader that uses sync.Pool. +type BufioReaderPool struct { + pool *sync.Pool +} + +func init() { + BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) + BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) +} + +// newBufioReaderPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioReaderPoolWithSize(size int) *BufioReaderPool { + pool := &sync.Pool{ + New: func() interface{} { return bufio.NewReaderSize(nil, size) }, + } + return &BufioReaderPool{pool: pool} +} + +// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. +func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { + buf := bufPool.pool.Get().(*bufio.Reader) + buf.Reset(r) + return buf +} + +// Put puts the bufio.Reader back into the pool. +func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. +func Copy(dst io.Writer, src io.Reader) (written int64, err error) { + buf := BufioReader32KPool.Get(src) + written, err = io.Copy(dst, buf) + BufioReader32KPool.Put(buf) + return +} + +// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back +// into the pool and closes the reader if it's an io.ReadCloser. +func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { + return ioutils.NewReadCloserWrapper(r, func() error { + if readCloser, ok := r.(io.ReadCloser); ok { + readCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} + +// BufioWriterPool is a bufio writer that uses sync.Pool. +type BufioWriterPool struct { + pool *sync.Pool +} + +// newBufioWriterPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioWriterPoolWithSize(size int) *BufioWriterPool { + pool := &sync.Pool{ + New: func() interface{} { return bufio.NewWriterSize(nil, size) }, + } + return &BufioWriterPool{pool: pool} +} + +// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. +func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { + buf := bufPool.pool.Get().(*bufio.Writer) + buf.Reset(w) + return buf +} + +// Put puts the bufio.Writer back into the pool. +func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back +// into the pool and closes the writer if it's an io.Writecloser. +func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { + return ioutils.NewWriteCloserWrapper(w, func() error { + buf.Flush() + if writeCloser, ok := w.(io.WriteCloser); ok { + writeCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} diff --git a/vendor/src/github.com/containers/storage/pkg/promise/promise.go b/vendor/src/github.com/containers/storage/pkg/promise/promise.go new file mode 100644 index 00000000..dd52b908 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/promise/promise.go @@ -0,0 +1,11 @@ +package promise + +// Go is a basic promise implementation: it wraps calls a function in a goroutine, +// and returns a channel which will later return the function's return value. +func Go(f func() error) chan error { + ch := make(chan error, 1) + go func() { + ch <- f() + }() + return ch +} diff --git a/vendor/src/github.com/containers/storage/pkg/random/random.go b/vendor/src/github.com/containers/storage/pkg/random/random.go new file mode 100644 index 00000000..70de4d13 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/random/random.go @@ -0,0 +1,71 @@ +package random + +import ( + cryptorand "crypto/rand" + "io" + "math" + "math/big" + "math/rand" + "sync" + "time" +) + +// Rand is a global *rand.Rand instance, which initialized with NewSource() source. +var Rand = rand.New(NewSource()) + +// Reader is a global, shared instance of a pseudorandom bytes generator. +// It doesn't consume entropy. +var Reader io.Reader = &reader{rnd: Rand} + +// copypaste from standard math/rand +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +// NewSource returns math/rand.Source safe for concurrent use and initialized +// with current unix-nano timestamp +func NewSource() rand.Source { + var seed int64 + if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil { + // This should not happen, but worst-case fallback to time-based seed. + seed = time.Now().UnixNano() + } else { + seed = cryptoseed.Int64() + } + return &lockedSource{ + src: rand.NewSource(seed), + } +} + +type reader struct { + rnd *rand.Rand +} + +func (r *reader) Read(b []byte) (int, error) { + i := 0 + for { + val := r.rnd.Int63() + for val > 0 { + b[i] = byte(val) + i++ + if i == len(b) { + return i, nil + } + val >>= 8 + } + } +} diff --git a/vendor/src/github.com/containers/storage/pkg/reexec/command_linux.go b/vendor/src/github.com/containers/storage/pkg/reexec/command_linux.go new file mode 100644 index 00000000..3c3a73a9 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/reexec/command_linux.go @@ -0,0 +1,28 @@ +// +build linux + +package reexec + +import ( + "os/exec" + "syscall" +) + +// Self returns the path to the current process's binary. +// Returns "/proc/self/exe". +func Self() string { + return "/proc/self/exe" +} + +// Command returns *exec.Cmd which have Path as current binary. Also it setting +// SysProcAttr.Pdeathsig to SIGTERM. +// This will use the in-memory version (/proc/self/exe) of the current binary, +// it is thus safe to delete or replace the on-disk binary (os.Args[0]). +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + SysProcAttr: &syscall.SysProcAttr{ + Pdeathsig: syscall.SIGTERM, + }, + } +} diff --git a/vendor/src/github.com/containers/storage/pkg/reexec/command_unix.go b/vendor/src/github.com/containers/storage/pkg/reexec/command_unix.go new file mode 100644 index 00000000..b70edcb3 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/reexec/command_unix.go @@ -0,0 +1,23 @@ +// +build freebsd solaris + +package reexec + +import ( + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which have Path as current binary. +// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will +// be set to "/usr/bin/docker". +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + } +} diff --git a/vendor/src/github.com/containers/storage/pkg/reexec/command_unsupported.go b/vendor/src/github.com/containers/storage/pkg/reexec/command_unsupported.go new file mode 100644 index 00000000..9aed004e --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/reexec/command_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux,!windows,!freebsd,!solaris + +package reexec + +import ( + "os/exec" +) + +// Command is unsupported on operating systems apart from Linux and Windows. +func Command(args ...string) *exec.Cmd { + return nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/reexec/command_windows.go b/vendor/src/github.com/containers/storage/pkg/reexec/command_windows.go new file mode 100644 index 00000000..8d65e0ae --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/reexec/command_windows.go @@ -0,0 +1,23 @@ +// +build windows + +package reexec + +import ( + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which have Path as current binary. +// For example if current binary is "docker.exe" at "C:\", then cmd.Path will +// be set to "C:\docker.exe". +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + } +} diff --git a/vendor/src/github.com/containers/storage/pkg/reexec/reexec.go b/vendor/src/github.com/containers/storage/pkg/reexec/reexec.go new file mode 100644 index 00000000..c56671d9 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/reexec/reexec.go @@ -0,0 +1,47 @@ +package reexec + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" +) + +var registeredInitializers = make(map[string]func()) + +// Register adds an initialization func under the specified name +func Register(name string, initializer func()) { + if _, exists := registeredInitializers[name]; exists { + panic(fmt.Sprintf("reexec func already registered under name %q", name)) + } + + registeredInitializers[name] = initializer +} + +// Init is called as the first part of the exec process and returns true if an +// initialization function was called. +func Init() bool { + initializer, exists := registeredInitializers[os.Args[0]] + if exists { + initializer() + + return true + } + return false +} + +func naiveSelf() string { + name := os.Args[0] + if filepath.Base(name) == name { + if lp, err := exec.LookPath(name); err == nil { + return lp + } + } + // handle conversion of relative paths to absolute + if absName, err := filepath.Abs(name); err == nil { + return absName + } + // if we couldn't get absolute name, return original + // (NOTE: Go only errors on Abs() if os.Getwd fails) + return name +} diff --git a/vendor/src/github.com/containers/storage/pkg/stringid/stringid.go b/vendor/src/github.com/containers/storage/pkg/stringid/stringid.go new file mode 100644 index 00000000..74dfaaaa --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/stringid/stringid.go @@ -0,0 +1,71 @@ +// Package stringid provides helper functions for dealing with string identifiers +package stringid + +import ( + "crypto/rand" + "encoding/hex" + "io" + "regexp" + "strconv" + "strings" + + "github.com/containers/storage/pkg/random" +) + +const shortLen = 12 + +var validShortID = regexp.MustCompile("^[a-z0-9]{12}$") + +// IsShortID determines if an arbitrary string *looks like* a short ID. +func IsShortID(id string) bool { + return validShortID.MatchString(id) +} + +// TruncateID returns a shorthand version of a string identifier for convenience. +// A collision with other shorthands is very unlikely, but possible. +// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller +// will need to use a longer prefix, or the full-length Id. +func TruncateID(id string) string { + if i := strings.IndexRune(id, ':'); i >= 0 { + id = id[i+1:] + } + trimTo := shortLen + if len(id) < shortLen { + trimTo = len(id) + } + return id[:trimTo] +} + +func generateID(crypto bool) string { + b := make([]byte, 32) + r := random.Reader + if crypto { + r = rand.Reader + } + for { + if _, err := io.ReadFull(r, b); err != nil { + panic(err) // This shouldn't happen + } + id := hex.EncodeToString(b) + // if we try to parse the truncated for as an int and we don't have + // an error then the value is all numeric and causes issues when + // used as a hostname. ref #3869 + if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil { + continue + } + return id + } +} + +// GenerateRandomID returns a unique id. +func GenerateRandomID() string { + return generateID(true) + +} + +// GenerateNonCryptoID generates unique id without using cryptographically +// secure sources of random. +// It helps you to save entropy. +func GenerateNonCryptoID() string { + return generateID(false) +} diff --git a/vendor/src/github.com/containers/storage/pkg/system/chtimes.go b/vendor/src/github.com/containers/storage/pkg/system/chtimes.go new file mode 100644 index 00000000..7637f12e --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/system/chtimes.go @@ -0,0 +1,52 @@ +package system + +import ( + "os" + "syscall" + "time" + "unsafe" +) + +var ( + maxTime time.Time +) + +func init() { + if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { + // This is a 64 bit timespec + // os.Chtimes limits time to the following + maxTime = time.Unix(0, 1<<63-1) + } else { + // This is a 32 bit timespec + maxTime = time.Unix(1<<31-1, 0) + } +} + +// Chtimes changes the access time and modified time of a file at the given path +func Chtimes(name string, atime time.Time, mtime time.Time) error { + unixMinTime := time.Unix(0, 0) + unixMaxTime := maxTime + + // If the modified time is prior to the Unix Epoch, or after the + // end of Unix Time, os.Chtimes has undefined behavior + // default to Unix Epoch in this case, just in case + + if atime.Before(unixMinTime) || atime.After(unixMaxTime) { + atime = unixMinTime + } + + if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) { + mtime = unixMinTime + } + + if err := os.Chtimes(name, atime, mtime); err != nil { + return err + } + + // Take platform specific action for setting create time. + if err := setCTime(name, mtime); err != nil { + return err + } + + return nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/system/chtimes_unix.go b/vendor/src/github.com/containers/storage/pkg/system/chtimes_unix.go new file mode 100644 index 00000000..09d58bcb --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/system/chtimes_unix.go @@ -0,0 +1,14 @@ +// +build !windows + +package system + +import ( + "time" +) + +//setCTime will set the create time on a file. On Unix, the create +//time is updated as a side effect of setting the modified time, so +//no action is required. +func setCTime(path string, ctime time.Time) error { + return nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/system/chtimes_windows.go b/vendor/src/github.com/containers/storage/pkg/system/chtimes_windows.go new file mode 100644 index 00000000..29458684 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/system/chtimes_windows.go @@ -0,0 +1,27 @@ +// +build windows + +package system + +import ( + "syscall" + "time" +) + +//setCTime will set the create time on a file. On Windows, this requires +//calling SetFileTime and explicitly including the create time. +func setCTime(path string, ctime time.Time) error { + ctimespec := syscall.NsecToTimespec(ctime.UnixNano()) + pathp, e := syscall.UTF16PtrFromString(path) + if e != nil { + return e + } + h, e := syscall.CreateFile(pathp, + syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil, + syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0) + if e != nil { + return e + } + defer syscall.Close(h) + c := syscall.NsecToFiletime(syscall.TimespecToNsec(ctimespec)) + return syscall.SetFileTime(h, &c, nil, nil) +} diff --git a/vendor/src/github.com/containers/storage/pkg/system/errors.go b/vendor/src/github.com/containers/storage/pkg/system/errors.go new file mode 100644 index 00000000..28831898 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/system/errors.go @@ -0,0 +1,10 @@ +package system + +import ( + "errors" +) + +var ( + // ErrNotSupportedPlatform means the platform is not supported. + ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") +) diff --git a/vendor/src/github.com/containers/storage/pkg/system/events_windows.go b/vendor/src/github.com/containers/storage/pkg/system/events_windows.go new file mode 100644 index 00000000..04e2de78 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/system/events_windows.go @@ -0,0 +1,83 @@ +package system + +// This file implements syscalls for Win32 events which are not implemented +// in golang. + +import ( + "syscall" + "unsafe" +) + +var ( + procCreateEvent = modkernel32.NewProc("CreateEventW") + procOpenEvent = modkernel32.NewProc("OpenEventW") + procSetEvent = modkernel32.NewProc("SetEvent") + procResetEvent = modkernel32.NewProc("ResetEvent") + procPulseEvent = modkernel32.NewProc("PulseEvent") +) + +// CreateEvent implements win32 CreateEventW func in golang. It will create an event object. +func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) { + namep, _ := syscall.UTF16PtrFromString(name) + var _p1 uint32 + if manualReset { + _p1 = 1 + } + var _p2 uint32 + if initialState { + _p2 = 1 + } + r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep))) + use(unsafe.Pointer(namep)) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = e1 + } + return +} + +// OpenEvent implements win32 OpenEventW func in golang. It opens an event object. +func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) { + namep, _ := syscall.UTF16PtrFromString(name) + var _p1 uint32 + if inheritHandle { + _p1 = 1 + } + r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep))) + use(unsafe.Pointer(namep)) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = e1 + } + return +} + +// SetEvent implements win32 SetEvent func in golang. +func SetEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procSetEvent) +} + +// ResetEvent implements win32 ResetEvent func in golang. +func ResetEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procResetEvent) +} + +// PulseEvent implements win32 PulseEvent func in golang. +func PulseEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procPulseEvent) +} + +func setResetPulse(handle syscall.Handle, proc *syscall.LazyProc) (err error) { + r0, _, _ := proc.Call(uintptr(handle)) + if r0 != 0 { + err = syscall.Errno(r0) + } + return +} + +var temp unsafe.Pointer + +// use ensures a variable is kept alive without the GC freeing while still needed +func use(p unsafe.Pointer) { + temp = p +} diff --git a/vendor/src/github.com/containers/storage/pkg/system/filesys.go b/vendor/src/github.com/containers/storage/pkg/system/filesys.go new file mode 100644 index 00000000..c14feb84 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/system/filesys.go @@ -0,0 +1,19 @@ +// +build !windows + +package system + +import ( + "os" + "path/filepath" +) + +// MkdirAll creates a directory named path along with any necessary parents, +// with permission specified by attribute perm for all dir created. +func MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. +func IsAbs(path string) bool { + return filepath.IsAbs(path) +} diff --git a/vendor/src/github.com/containers/storage/pkg/system/filesys_windows.go b/vendor/src/github.com/containers/storage/pkg/system/filesys_windows.go new file mode 100644 index 00000000..16823d55 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/system/filesys_windows.go @@ -0,0 +1,82 @@ +// +build windows + +package system + +import ( + "os" + "path/filepath" + "regexp" + "strings" + "syscall" +) + +// MkdirAll implementation that is volume path aware for Windows. +func MkdirAll(path string, perm os.FileMode) error { + if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { + return nil + } + + // The rest of this method is copied from os.MkdirAll and should be kept + // as-is to ensure compatibility. + + // Fast path: if we can tell whether path is a directory or file, stop with success or error. + dir, err := os.Stat(path) + if err == nil { + if dir.IsDir() { + return nil + } + return &os.PathError{ + Op: "mkdir", + Path: path, + Err: syscall.ENOTDIR, + } + } + + // Slow path: make sure parent exists and then call Mkdir for path. + i := len(path) + for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. + i-- + } + + j := i + for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. + j-- + } + + if j > 1 { + // Create parent + err = MkdirAll(path[0:j-1], perm) + if err != nil { + return err + } + } + + // Parent now exists; invoke Mkdir and use its result. + err = os.Mkdir(path, perm) + if err != nil { + // Handle arguments like "foo/." by + // double-checking that directory doesn't exist. + dir, err1 := os.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + return err + } + return nil +} + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, +// golang filepath.IsAbs does not consider a path \windows\system32 as absolute +// as it doesn't start with a drive-letter/colon combination. However, in +// docker we need to verify things such as WORKDIR /windows/system32 in +// a Dockerfile (which gets translated to \windows\system32 when being processed +// by the daemon. This SHOULD be treated as absolute from a docker processing +// perspective. +func IsAbs(path string) bool { + if !filepath.IsAbs(path) { + if !strings.HasPrefix(path, string(os.PathSeparator)) { + return false + } + } + return true +} diff --git a/vendor/src/github.com/containers/storage/pkg/system/lstat.go b/vendor/src/github.com/containers/storage/pkg/system/lstat.go new file mode 100644 index 00000000..bd23c4d5 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/system/lstat.go @@ -0,0 +1,19 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// Lstat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Lstat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Lstat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/src/github.com/containers/storage/pkg/system/lstat_windows.go b/vendor/src/github.com/containers/storage/pkg/system/lstat_windows.go new file mode 100644 index 00000000..49e87eb4 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/system/lstat_windows.go @@ -0,0 +1,25 @@ +// +build windows + +package system + +import ( + "os" +) + +// Lstat calls os.Lstat to get a fileinfo interface back. +// This is then copied into our own locally defined structure. +// Note the Linux version uses fromStatT to do the copy back, +// but that not strictly necessary when already in an OS specific module. +func Lstat(path string) (*StatT, error) { + fi, err := os.Lstat(path) + if err != nil { + return nil, err + } + + return &StatT{ + name: fi.Name(), + size: fi.Size(), + mode: fi.Mode(), + modTime: fi.ModTime(), + isDir: fi.IsDir()}, nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/system/meminfo.go b/vendor/src/github.com/containers/storage/pkg/system/meminfo.go new file mode 100644 index 00000000..3b6e947e --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/system/meminfo.go @@ -0,0 +1,17 @@ +package system + +// MemInfo contains memory statistics of the host system. +type MemInfo struct { + // Total usable RAM (i.e. physical RAM minus a few reserved bits and the + // kernel binary code). + MemTotal int64 + + // Amount of free memory. + MemFree int64 + + // Total amount of swap space available. + SwapTotal int64 + + // Amount of swap space that is currently unused. + SwapFree int64 +} diff --git a/vendor/src/github.com/containers/storage/pkg/system/meminfo_linux.go b/vendor/src/github.com/containers/storage/pkg/system/meminfo_linux.go new file mode 100644 index 00000000..385f1d5e --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/system/meminfo_linux.go @@ -0,0 +1,65 @@ +package system + +import ( + "bufio" + "io" + "os" + "strconv" + "strings" + + "github.com/docker/go-units" +) + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + file, err := os.Open("/proc/meminfo") + if err != nil { + return nil, err + } + defer file.Close() + return parseMemInfo(file) +} + +// parseMemInfo parses the /proc/meminfo file into +// a MemInfo object given an io.Reader to the file. +// Throws error if there are problems reading from the file +func parseMemInfo(reader io.Reader) (*MemInfo, error) { + meminfo := &MemInfo{} + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + // Expected format: ["MemTotal:", "1234", "kB"] + parts := strings.Fields(scanner.Text()) + + // Sanity checks: Skip malformed entries. + if len(parts) < 3 || parts[2] != "kB" { + continue + } + + // Convert to bytes. + size, err := strconv.Atoi(parts[1]) + if err != nil { + continue + } + bytes := int64(size) * units.KiB + + switch parts[0] { + case "MemTotal:": + meminfo.MemTotal = bytes + case "MemFree:": + meminfo.MemFree = bytes + case "SwapTotal:": + meminfo.SwapTotal = bytes + case "SwapFree:": + meminfo.SwapFree = bytes + } + + } + + // Handle errors that may have occurred during the reading of the file. + if err := scanner.Err(); err != nil { + return nil, err + } + + return meminfo, nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/system/meminfo_solaris.go b/vendor/src/github.com/containers/storage/pkg/system/meminfo_solaris.go new file mode 100644 index 00000000..313c601b --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/system/meminfo_solaris.go @@ -0,0 +1,128 @@ +// +build solaris,cgo + +package system + +import ( + "fmt" + "unsafe" +) + +// #cgo LDFLAGS: -lkstat +// #include +// #include +// #include +// #include +// #include +// #include +// struct swaptable *allocSwaptable(int num) { +// struct swaptable *st; +// struct swapent *swapent; +// st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int)); +// swapent = st->swt_ent; +// for (int i = 0; i < num; i++,swapent++) { +// swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char)); +// } +// st->swt_n = num; +// return st; +//} +// void freeSwaptable (struct swaptable *st) { +// struct swapent *swapent = st->swt_ent; +// for (int i = 0; i < st->swt_n; i++,swapent++) { +// free(swapent->ste_path); +// } +// free(st); +// } +// swapent_t getSwapEnt(swapent_t *ent, int i) { +// return ent[i]; +// } +// int64_t getPpKernel() { +// int64_t pp_kernel = 0; +// kstat_ctl_t *ksc; +// kstat_t *ks; +// kstat_named_t *knp; +// kid_t kid; +// +// if ((ksc = kstat_open()) == NULL) { +// return -1; +// } +// if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) { +// return -1; +// } +// if (((kid = kstat_read(ksc, ks, NULL)) == -1) || +// ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) { +// return -1; +// } +// switch (knp->data_type) { +// case KSTAT_DATA_UINT64: +// pp_kernel = knp->value.ui64; +// break; +// case KSTAT_DATA_UINT32: +// pp_kernel = knp->value.ui32; +// break; +// } +// pp_kernel *= sysconf(_SC_PAGESIZE); +// return (pp_kernel > 0 ? pp_kernel : -1); +// } +import "C" + +// Get the system memory info using sysconf same as prtconf +func getTotalMem() int64 { + pagesize := C.sysconf(C._SC_PAGESIZE) + npages := C.sysconf(C._SC_PHYS_PAGES) + return int64(pagesize * npages) +} + +func getFreeMem() int64 { + pagesize := C.sysconf(C._SC_PAGESIZE) + npages := C.sysconf(C._SC_AVPHYS_PAGES) + return int64(pagesize * npages) +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + + ppKernel := C.getPpKernel() + MemTotal := getTotalMem() + MemFree := getFreeMem() + SwapTotal, SwapFree, err := getSysSwap() + + if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || + SwapFree < 0 { + return nil, fmt.Errorf("Error getting system memory info %v\n", err) + } + + meminfo := &MemInfo{} + // Total memory is total physical memory less than memory locked by kernel + meminfo.MemTotal = MemTotal - int64(ppKernel) + meminfo.MemFree = MemFree + meminfo.SwapTotal = SwapTotal + meminfo.SwapFree = SwapFree + + return meminfo, nil +} + +func getSysSwap() (int64, int64, error) { + var tSwap int64 + var fSwap int64 + var diskblksPerPage int64 + num, err := C.swapctl(C.SC_GETNSWP, nil) + if err != nil { + return -1, -1, err + } + st := C.allocSwaptable(num) + _, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st)) + if err != nil { + C.freeSwaptable(st) + return -1, -1, err + } + + diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT) + for i := 0; i < int(num); i++ { + swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i)) + tSwap += int64(swapent.ste_pages) * diskblksPerPage + fSwap += int64(swapent.ste_free) * diskblksPerPage + } + C.freeSwaptable(st) + return tSwap, fSwap, nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/system/meminfo_unsupported.go b/vendor/src/github.com/containers/storage/pkg/system/meminfo_unsupported.go new file mode 100644 index 00000000..3ce019df --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/system/meminfo_unsupported.go @@ -0,0 +1,8 @@ +// +build !linux,!windows,!solaris + +package system + +// ReadMemInfo is not supported on platforms other than linux and windows. +func ReadMemInfo() (*MemInfo, error) { + return nil, ErrNotSupportedPlatform +} diff --git a/vendor/src/github.com/containers/storage/pkg/system/meminfo_windows.go b/vendor/src/github.com/containers/storage/pkg/system/meminfo_windows.go new file mode 100644 index 00000000..d4664259 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/system/meminfo_windows.go @@ -0,0 +1,44 @@ +package system + +import ( + "syscall" + "unsafe" +) + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + + procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") +) + +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx +type memorystatusex struct { + dwLength uint32 + dwMemoryLoad uint32 + ullTotalPhys uint64 + ullAvailPhys uint64 + ullTotalPageFile uint64 + ullAvailPageFile uint64 + ullTotalVirtual uint64 + ullAvailVirtual uint64 + ullAvailExtendedVirtual uint64 +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + msi := &memorystatusex{ + dwLength: 64, + } + r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) + if r1 == 0 { + return &MemInfo{}, nil + } + return &MemInfo{ + MemTotal: int64(msi.ullTotalPhys), + MemFree: int64(msi.ullAvailPhys), + SwapTotal: int64(msi.ullTotalPageFile), + SwapFree: int64(msi.ullAvailPageFile), + }, nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/system/mknod.go b/vendor/src/github.com/containers/storage/pkg/system/mknod.go new file mode 100644 index 00000000..73958182 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/system/mknod.go @@ -0,0 +1,22 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// Mknod creates a filesystem node (file, device special file or named pipe) named path +// with attributes specified by mode and dev. +func Mknod(path string, mode uint32, dev int) error { + return syscall.Mknod(path, mode, dev) +} + +// Mkdev is used to build the value of linux devices (in /dev/) which specifies major +// and minor number of the newly created device special file. +// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. +// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, +// then the top 12 bits of the minor. +func Mkdev(major int64, minor int64) uint32 { + return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) +} diff --git a/vendor/src/github.com/containers/storage/pkg/system/mknod_windows.go b/vendor/src/github.com/containers/storage/pkg/system/mknod_windows.go new file mode 100644 index 00000000..2e863c02 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/system/mknod_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package system + +// Mknod is not implemented on Windows. +func Mknod(path string, mode uint32, dev int) error { + return ErrNotSupportedPlatform +} + +// Mkdev is not implemented on Windows. +func Mkdev(major int64, minor int64) uint32 { + panic("Mkdev not implemented on Windows.") +} diff --git a/vendor/src/github.com/containers/storage/pkg/system/path_unix.go b/vendor/src/github.com/containers/storage/pkg/system/path_unix.go new file mode 100644 index 00000000..c607c4db --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/system/path_unix.go @@ -0,0 +1,14 @@ +// +build !windows + +package system + +// DefaultPathEnv is unix style list of directories to search for +// executables. Each directory is separated from the next by a colon +// ':' character . +const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + +// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, +// is the system drive. This is a no-op on Linux. +func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { + return path, nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/system/path_windows.go b/vendor/src/github.com/containers/storage/pkg/system/path_windows.go new file mode 100644 index 00000000..cbfe2c15 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/system/path_windows.go @@ -0,0 +1,37 @@ +// +build windows + +package system + +import ( + "fmt" + "path/filepath" + "strings" +) + +// DefaultPathEnv is deliberately empty on Windows as the default path will be set by +// the container. Docker has no context of what the default path should be. +const DefaultPathEnv = "" + +// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. +// This is used, for example, when validating a user provided path in docker cp. +// If a drive letter is supplied, it must be the system drive. The drive letter +// is always removed. Also, it translates it to OS semantics (IOW / to \). We +// need the path in this syntax so that it can ultimately be contatenated with +// a Windows long-path which doesn't support drive-letters. Examples: +// C: --> Fail +// C:\ --> \ +// a --> a +// /a --> \a +// d:\ --> Fail +func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { + if len(path) == 2 && string(path[1]) == ":" { + return "", fmt.Errorf("No relative path specified in %q", path) + } + if !filepath.IsAbs(path) || len(path) < 2 { + return filepath.FromSlash(path), nil + } + if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { + return "", fmt.Errorf("The specified path is not on the system drive (C:)") + } + return filepath.FromSlash(path[2:]), nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/system/stat.go b/vendor/src/github.com/containers/storage/pkg/system/stat.go new file mode 100644 index 00000000..087034c5 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/system/stat.go @@ -0,0 +1,53 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// StatT type contains status of a file. It contains metadata +// like permission, owner, group, size, etc about a file. +type StatT struct { + mode uint32 + uid uint32 + gid uint32 + rdev uint64 + size int64 + mtim syscall.Timespec +} + +// Mode returns file's permission mode. +func (s StatT) Mode() uint32 { + return s.mode +} + +// UID returns file's user id of owner. +func (s StatT) UID() uint32 { + return s.uid +} + +// GID returns file's group id of owner. +func (s StatT) GID() uint32 { + return s.gid +} + +// Rdev returns file's device ID (if it's special file). +func (s StatT) Rdev() uint64 { + return s.rdev +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mtim returns file's last modification time. +func (s StatT) Mtim() syscall.Timespec { + return s.mtim +} + +// GetLastModification returns file's last modification time. +func (s StatT) GetLastModification() syscall.Timespec { + return s.Mtim() +} diff --git a/vendor/src/github.com/containers/storage/pkg/system/stat_freebsd.go b/vendor/src/github.com/containers/storage/pkg/system/stat_freebsd.go new file mode 100644 index 00000000..d0fb6f15 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/system/stat_freebsd.go @@ -0,0 +1,27 @@ +package system + +import ( + "syscall" +) + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} + +// Stat takes a path to a file and returns +// a system.Stat_t type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/src/github.com/containers/storage/pkg/system/stat_linux.go b/vendor/src/github.com/containers/storage/pkg/system/stat_linux.go new file mode 100644 index 00000000..8b1eded1 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/system/stat_linux.go @@ -0,0 +1,33 @@ +package system + +import ( + "syscall" +) + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: s.Mode, + uid: s.Uid, + gid: s.Gid, + rdev: s.Rdev, + mtim: s.Mtim}, nil +} + +// FromStatT exists only on linux, and loads a system.StatT from a +// syscal.Stat_t. +func FromStatT(s *syscall.Stat_t) (*StatT, error) { + return fromStatT(s) +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/src/github.com/containers/storage/pkg/system/stat_openbsd.go b/vendor/src/github.com/containers/storage/pkg/system/stat_openbsd.go new file mode 100644 index 00000000..3c3b71fb --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/system/stat_openbsd.go @@ -0,0 +1,15 @@ +package system + +import ( + "syscall" +) + +// fromStatT creates a system.StatT type from a syscall.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/system/stat_solaris.go b/vendor/src/github.com/containers/storage/pkg/system/stat_solaris.go new file mode 100644 index 00000000..0216985a --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/system/stat_solaris.go @@ -0,0 +1,34 @@ +// +build solaris + +package system + +import ( + "syscall" +) + +// fromStatT creates a system.StatT type from a syscall.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} + +// FromStatT loads a system.StatT from a syscal.Stat_t. +func FromStatT(s *syscall.Stat_t) (*StatT, error) { + return fromStatT(s) +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/src/github.com/containers/storage/pkg/system/stat_unsupported.go b/vendor/src/github.com/containers/storage/pkg/system/stat_unsupported.go new file mode 100644 index 00000000..f53e9de4 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/system/stat_unsupported.go @@ -0,0 +1,17 @@ +// +build !linux,!windows,!freebsd,!solaris,!openbsd + +package system + +import ( + "syscall" +) + +// fromStatT creates a system.StatT type from a syscall.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/system/stat_windows.go b/vendor/src/github.com/containers/storage/pkg/system/stat_windows.go new file mode 100644 index 00000000..39490c62 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/system/stat_windows.go @@ -0,0 +1,43 @@ +// +build windows + +package system + +import ( + "os" + "time" +) + +// StatT type contains status of a file. It contains metadata +// like name, permission, size, etc about a file. +type StatT struct { + name string + size int64 + mode os.FileMode + modTime time.Time + isDir bool +} + +// Name returns file's name. +func (s StatT) Name() string { + return s.name +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mode returns file's permission mode. +func (s StatT) Mode() os.FileMode { + return s.mode +} + +// ModTime returns file's last modification time. +func (s StatT) ModTime() time.Time { + return s.modTime +} + +// IsDir returns whether file is actually a directory. +func (s StatT) IsDir() bool { + return s.isDir +} diff --git a/vendor/src/github.com/containers/storage/pkg/system/syscall_unix.go b/vendor/src/github.com/containers/storage/pkg/system/syscall_unix.go new file mode 100644 index 00000000..3ae91284 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/system/syscall_unix.go @@ -0,0 +1,17 @@ +// +build linux freebsd + +package system + +import "syscall" + +// Unmount is a platform-specific helper function to call +// the unmount syscall. +func Unmount(dest string) error { + return syscall.Unmount(dest, 0) +} + +// CommandLineToArgv should not be used on Unix. +// It simply returns commandLine in the only element in the returned array. +func CommandLineToArgv(commandLine string) ([]string, error) { + return []string{commandLine}, nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/system/syscall_windows.go b/vendor/src/github.com/containers/storage/pkg/system/syscall_windows.go new file mode 100644 index 00000000..f5f2d569 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/system/syscall_windows.go @@ -0,0 +1,103 @@ +package system + +import ( + "syscall" + "unsafe" + + "github.com/Sirupsen/logrus" +) + +var ( + ntuserApiset = syscall.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") + procGetVersionExW = modkernel32.NewProc("GetVersionExW") +) + +// OSVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type OSVersion struct { + Version uint32 + MajorVersion uint8 + MinorVersion uint8 + Build uint16 +} + +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx +type osVersionInfoEx struct { + OSVersionInfoSize uint32 + MajorVersion uint32 + MinorVersion uint32 + BuildNumber uint32 + PlatformID uint32 + CSDVersion [128]uint16 + ServicePackMajor uint16 + ServicePackMinor uint16 + SuiteMask uint16 + ProductType byte + Reserve byte +} + +// GetOSVersion gets the operating system version on Windows. Note that +// docker.exe must be manifested to get the correct version information. +func GetOSVersion() OSVersion { + var err error + osv := OSVersion{} + osv.Version, err = syscall.GetVersion() + if err != nil { + // GetVersion never fails. + panic(err) + } + osv.MajorVersion = uint8(osv.Version & 0xFF) + osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) + osv.Build = uint16(osv.Version >> 16) + return osv +} + +// IsWindowsClient returns true if the SKU is client +func IsWindowsClient() bool { + osviex := &osVersionInfoEx{OSVersionInfoSize: 284} + r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex))) + if r1 == 0 { + logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err) + return false + } + const verNTWorkstation = 0x00000001 + return osviex.ProductType == verNTWorkstation +} + +// Unmount is a platform-specific helper function to call +// the unmount syscall. Not supported on Windows +func Unmount(dest string) error { + return nil +} + +// CommandLineToArgv wraps the Windows syscall to turn a commandline into an argument array. +func CommandLineToArgv(commandLine string) ([]string, error) { + var argc int32 + + argsPtr, err := syscall.UTF16PtrFromString(commandLine) + if err != nil { + return nil, err + } + + argv, err := syscall.CommandLineToArgv(argsPtr, &argc) + if err != nil { + return nil, err + } + defer syscall.LocalFree(syscall.Handle(uintptr(unsafe.Pointer(argv)))) + + newArgs := make([]string, argc) + for i, v := range (*argv)[:argc] { + newArgs[i] = string(syscall.UTF16ToString((*v)[:])) + } + + return newArgs, nil +} + +// HasWin32KSupport determines whether containers that depend on win32k can +// run on this machine. Win32k is the driver used to implement windowing. +func HasWin32KSupport() bool { + // For now, check for ntuser API support on the host. In the future, a host + // may support win32k in containers even if the host does not support ntuser + // APIs. + return ntuserApiset.Load() == nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/system/umask.go b/vendor/src/github.com/containers/storage/pkg/system/umask.go new file mode 100644 index 00000000..3d0146b0 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/system/umask.go @@ -0,0 +1,13 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// Umask sets current process's file mode creation mask to newmask +// and returns oldmask. +func Umask(newmask int) (oldmask int, err error) { + return syscall.Umask(newmask), nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/system/umask_windows.go b/vendor/src/github.com/containers/storage/pkg/system/umask_windows.go new file mode 100644 index 00000000..13f1de17 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/system/umask_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package system + +// Umask is not supported on the windows platform. +func Umask(newmask int) (oldmask int, err error) { + // should not be called on cli code path + return 0, ErrNotSupportedPlatform +} diff --git a/vendor/src/github.com/containers/storage/pkg/system/utimes_darwin.go b/vendor/src/github.com/containers/storage/pkg/system/utimes_darwin.go new file mode 100644 index 00000000..0a161975 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/system/utimes_darwin.go @@ -0,0 +1,8 @@ +package system + +import "syscall" + +// LUtimesNano is not supported by darwin platform. +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} diff --git a/vendor/src/github.com/containers/storage/pkg/system/utimes_freebsd.go b/vendor/src/github.com/containers/storage/pkg/system/utimes_freebsd.go new file mode 100644 index 00000000..e2eac3b5 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/system/utimes_freebsd.go @@ -0,0 +1,22 @@ +package system + +import ( + "syscall" + "unsafe" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + var _path *byte + _path, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + + if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS { + return err + } + + return nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/system/utimes_linux.go b/vendor/src/github.com/containers/storage/pkg/system/utimes_linux.go new file mode 100644 index 00000000..fc8a1aba --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/system/utimes_linux.go @@ -0,0 +1,26 @@ +package system + +import ( + "syscall" + "unsafe" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + // These are not currently available in syscall + atFdCwd := -100 + atSymLinkNoFollow := 0x100 + + var _path *byte + _path, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + + if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(atSymLinkNoFollow), 0, 0); err != 0 && err != syscall.ENOSYS { + return err + } + + return nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/system/utimes_unsupported.go b/vendor/src/github.com/containers/storage/pkg/system/utimes_unsupported.go new file mode 100644 index 00000000..50c3a043 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/system/utimes_unsupported.go @@ -0,0 +1,10 @@ +// +build !linux,!freebsd,!darwin + +package system + +import "syscall" + +// LUtimesNano is not supported on platforms other than linux, freebsd and darwin. +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} diff --git a/vendor/src/github.com/containers/storage/pkg/system/xattrs_linux.go b/vendor/src/github.com/containers/storage/pkg/system/xattrs_linux.go new file mode 100644 index 00000000..d2e2c057 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/system/xattrs_linux.go @@ -0,0 +1,63 @@ +package system + +import ( + "syscall" + "unsafe" +) + +// Lgetxattr retrieves the value of the extended attribute identified by attr +// and associated with the given path in the file system. +// It will returns a nil slice and nil error if the xattr is not set. +func Lgetxattr(path string, attr string) ([]byte, error) { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return nil, err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return nil, err + } + + dest := make([]byte, 128) + destBytes := unsafe.Pointer(&dest[0]) + sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + if errno == syscall.ENODATA { + return nil, nil + } + if errno == syscall.ERANGE { + dest = make([]byte, sz) + destBytes := unsafe.Pointer(&dest[0]) + sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + } + if errno != 0 { + return nil, errno + } + + return dest[:sz], nil +} + +var _zero uintptr + +// Lsetxattr sets the value of the extended attribute identified by attr +// and associated with the given path in the file system. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return err + } + var dataBytes unsafe.Pointer + if len(data) > 0 { + dataBytes = unsafe.Pointer(&data[0]) + } else { + dataBytes = unsafe.Pointer(&_zero) + } + _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) + if errno != 0 { + return errno + } + return nil +} diff --git a/vendor/src/github.com/containers/storage/pkg/system/xattrs_unsupported.go b/vendor/src/github.com/containers/storage/pkg/system/xattrs_unsupported.go new file mode 100644 index 00000000..0114f222 --- /dev/null +++ b/vendor/src/github.com/containers/storage/pkg/system/xattrs_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package system + +// Lgetxattr is not supported on platforms other than linux. +func Lgetxattr(path string, attr string) ([]byte, error) { + return nil, ErrNotSupportedPlatform +} + +// Lsetxattr is not supported on platforms other than linux. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + return ErrNotSupportedPlatform +} diff --git a/vendor/src/github.com/containers/storage/storage/containers.go b/vendor/src/github.com/containers/storage/storage/containers.go new file mode 100644 index 00000000..5398f7a7 --- /dev/null +++ b/vendor/src/github.com/containers/storage/storage/containers.go @@ -0,0 +1,478 @@ +package storage + +import ( + "encoding/json" + "errors" + "io/ioutil" + "os" + "path/filepath" + "time" + + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/stringid" +) + +var ( + // ErrContainerUnknown indicates that there was no container with the specified name or ID + ErrContainerUnknown = errors.New("container not known") +) + +// A Container is a reference to a read-write layer with metadata. +type Container struct { + // ID is either one which was specified at create-time, or a random + // value which was generated by the library. + ID string `json:"id"` + + // Names is an optional set of user-defined convenience values. The + // container can be referred to by its ID or any of its names. Names + // are unique among containers. + Names []string `json:"names,omitempty"` + + // ImageID is the ID of the image which was used to create the container. + ImageID string `json:"image"` + + // LayerID is the ID of the read-write layer for the container itself. + // It is assumed that the image's top layer is the parent of the container's + // read-write layer. + LayerID string `json:"layer"` + + // Metadata is data we keep for the convenience of the caller. It is not + // expected to be large, since it is kept in memory. + Metadata string `json:"metadata,omitempty"` + + // BigDataNames is a list of names of data items that we keep for the + // convenience of the caller. They can be large, and are only in + // memory when being read from or written to disk. + BigDataNames []string `json:"big-data-names,omitempty"` + + // BigDataSizes maps the names in BigDataNames to the sizes of the data + // that has been stored, if they're known. + BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"` + + Flags map[string]interface{} `json:"flags,omitempty"` +} + +// ContainerStore provides bookkeeping for information about Containers. +type ContainerStore interface { + FileBasedStore + MetadataStore + BigDataStore + FlaggableStore + + // Create creates a container that has a specified ID (or generates a + // random one if an empty value is supplied) and optional names, + // based on the specified image, using the specified layer as its + // read-write layer. + Create(id string, names []string, image, layer, metadata string) (*Container, error) + + // SetNames updates the list of names associated with the container + // with the specified ID. + SetNames(id string, names []string) error + + // Get retrieves information about a container given an ID or name. + Get(id string) (*Container, error) + + // Exists checks if there is a container with the given ID or name. + Exists(id string) bool + + // Delete removes the record of the container. + Delete(id string) error + + // Wipe removes records of all containers. + Wipe() error + + // Lookup attempts to translate a name to an ID. Most methods do this + // implicitly. + Lookup(name string) (string, error) + + // Containers returns a slice enumerating the known containers. + Containers() ([]Container, error) +} + +type containerStore struct { + lockfile Locker + dir string + containers []Container + byid map[string]*Container + bylayer map[string]*Container + byname map[string]*Container +} + +func (r *containerStore) Containers() ([]Container, error) { + return r.containers, nil +} + +func (r *containerStore) containerspath() string { + return filepath.Join(r.dir, "containers.json") +} + +func (r *containerStore) datadir(id string) string { + return filepath.Join(r.dir, id) +} + +func (r *containerStore) datapath(id, key string) string { + return filepath.Join(r.datadir(id), makeBigDataBaseName(key)) +} + +func (r *containerStore) Load() error { + needSave := false + rpath := r.containerspath() + data, err := ioutil.ReadFile(rpath) + if err != nil && !os.IsNotExist(err) { + return err + } + containers := []Container{} + layers := make(map[string]*Container) + ids := make(map[string]*Container) + names := make(map[string]*Container) + if err = json.Unmarshal(data, &containers); len(data) == 0 || err == nil { + for n, container := range containers { + ids[container.ID] = &containers[n] + layers[container.LayerID] = &containers[n] + for _, name := range container.Names { + if conflict, ok := names[name]; ok { + r.removeName(conflict, name) + needSave = true + } + names[name] = &containers[n] + } + } + } + r.containers = containers + r.byid = ids + r.bylayer = layers + r.byname = names + if needSave { + r.Touch() + return r.Save() + } + return nil +} + +func (r *containerStore) Save() error { + rpath := r.containerspath() + jdata, err := json.Marshal(&r.containers) + if err != nil { + return err + } + return ioutils.AtomicWriteFile(rpath, jdata, 0600) +} + +func newContainerStore(dir string) (ContainerStore, error) { + if err := os.MkdirAll(dir, 0700); err != nil { + return nil, err + } + lockfile, err := GetLockfile(filepath.Join(dir, "containers.lock")) + if err != nil { + return nil, err + } + lockfile.Lock() + defer lockfile.Unlock() + cstore := containerStore{ + lockfile: lockfile, + dir: dir, + containers: []Container{}, + byid: make(map[string]*Container), + bylayer: make(map[string]*Container), + byname: make(map[string]*Container), + } + if err := cstore.Load(); err != nil { + return nil, err + } + return &cstore, nil +} + +func (r *containerStore) ClearFlag(id string, flag string) error { + if container, ok := r.byname[id]; ok { + id = container.ID + } else if container, ok := r.bylayer[id]; ok { + id = container.ID + } + if _, ok := r.byid[id]; !ok { + return ErrContainerUnknown + } + container := r.byid[id] + delete(container.Flags, flag) + return r.Save() +} + +func (r *containerStore) SetFlag(id string, flag string, value interface{}) error { + if container, ok := r.byname[id]; ok { + id = container.ID + } else if container, ok := r.bylayer[id]; ok { + id = container.ID + } + if _, ok := r.byid[id]; !ok { + return ErrContainerUnknown + } + container := r.byid[id] + container.Flags[flag] = value + return r.Save() +} + +func (r *containerStore) Create(id string, names []string, image, layer, metadata string) (container *Container, err error) { + if id == "" { + id = stringid.GenerateRandomID() + _, idInUse := r.byid[id] + for idInUse { + id = stringid.GenerateRandomID() + _, idInUse = r.byid[id] + } + } + if _, idInUse := r.byid[id]; idInUse { + return nil, ErrDuplicateID + } + for _, name := range names { + if _, nameInUse := r.byname[name]; nameInUse { + return nil, ErrDuplicateName + } + } + if err == nil { + newContainer := Container{ + ID: id, + Names: names, + ImageID: image, + LayerID: layer, + Metadata: metadata, + BigDataNames: []string{}, + BigDataSizes: make(map[string]int64), + Flags: make(map[string]interface{}), + } + r.containers = append(r.containers, newContainer) + container = &r.containers[len(r.containers)-1] + r.byid[id] = container + r.bylayer[layer] = container + for _, name := range names { + r.byname[name] = container + } + err = r.Save() + } + return container, err +} + +func (r *containerStore) GetMetadata(id string) (string, error) { + if container, ok := r.byname[id]; ok { + id = container.ID + } else if container, ok := r.bylayer[id]; ok { + id = container.ID + } + if container, ok := r.byid[id]; ok { + return container.Metadata, nil + } + return "", ErrContainerUnknown +} + +func (r *containerStore) SetMetadata(id, metadata string) error { + if container, ok := r.byname[id]; ok { + id = container.ID + } else if container, ok := r.bylayer[id]; ok { + id = container.ID + } + if container, ok := r.byid[id]; ok { + container.Metadata = metadata + return r.Save() + } + return ErrContainerUnknown +} + +func (r *containerStore) removeName(container *Container, name string) { + newNames := []string{} + for _, oldName := range container.Names { + if oldName != name { + newNames = append(newNames, oldName) + } + } + container.Names = newNames +} + +func (r *containerStore) SetNames(id string, names []string) error { + if container, ok := r.byname[id]; ok { + id = container.ID + } else if container, ok := r.bylayer[id]; ok { + id = container.ID + } + if container, ok := r.byid[id]; ok { + for _, name := range container.Names { + delete(r.byname, name) + } + for _, name := range names { + if otherContainer, ok := r.byname[name]; ok { + r.removeName(otherContainer, name) + } + r.byname[name] = container + } + container.Names = names + return r.Save() + } + return ErrContainerUnknown +} + +func (r *containerStore) Delete(id string) error { + if container, ok := r.byname[id]; ok { + id = container.ID + } else if container, ok := r.bylayer[id]; ok { + id = container.ID + } + if _, ok := r.byid[id]; !ok { + return ErrContainerUnknown + } + if container, ok := r.byid[id]; ok { + newContainers := []Container{} + for _, candidate := range r.containers { + if candidate.ID != id { + newContainers = append(newContainers, candidate) + } + } + delete(r.byid, container.ID) + delete(r.bylayer, container.LayerID) + for _, name := range container.Names { + delete(r.byname, name) + } + r.containers = newContainers + if err := r.Save(); err != nil { + return err + } + if err := os.RemoveAll(r.datadir(id)); err != nil { + return err + } + } + return nil +} + +func (r *containerStore) Get(id string) (*Container, error) { + if c, ok := r.byname[id]; ok { + return c, nil + } else if c, ok := r.bylayer[id]; ok { + return c, nil + } + if c, ok := r.byid[id]; ok { + return c, nil + } + return nil, ErrContainerUnknown +} + +func (r *containerStore) Lookup(name string) (id string, err error) { + container, ok := r.byname[name] + if !ok { + container, ok = r.byid[name] + if !ok { + return "", ErrContainerUnknown + } + } + return container.ID, nil +} + +func (r *containerStore) Exists(id string) bool { + if _, ok := r.byname[id]; ok { + return true + } + if _, ok := r.bylayer[id]; ok { + return true + } + if _, ok := r.byid[id]; ok { + return true + } + return false +} + +func (r *containerStore) GetBigData(id, key string) ([]byte, error) { + if img, ok := r.byname[id]; ok { + id = img.ID + } + if _, ok := r.byid[id]; !ok { + return nil, ErrContainerUnknown + } + return ioutil.ReadFile(r.datapath(id, key)) +} + +func (r *containerStore) GetBigDataSize(id, key string) (int64, error) { + if img, ok := r.byname[id]; ok { + id = img.ID + } + if _, ok := r.byid[id]; !ok { + return -1, ErrContainerUnknown + } + if size, ok := r.byid[id].BigDataSizes[key]; ok { + return size, nil + } + return -1, ErrSizeUnknown +} + +func (r *containerStore) GetBigDataNames(id string) ([]string, error) { + if img, ok := r.byname[id]; ok { + id = img.ID + } + if _, ok := r.byid[id]; !ok { + return nil, ErrContainerUnknown + } + return r.byid[id].BigDataNames, nil +} + +func (r *containerStore) SetBigData(id, key string, data []byte) error { + if img, ok := r.byname[id]; ok { + id = img.ID + } + if _, ok := r.byid[id]; !ok { + return ErrContainerUnknown + } + if err := os.MkdirAll(r.datadir(id), 0700); err != nil { + return err + } + err := ioutils.AtomicWriteFile(r.datapath(id, key), data, 0600) + if err == nil { + save := false + oldSize, ok := r.byid[id].BigDataSizes[key] + r.byid[id].BigDataSizes[key] = int64(len(data)) + if !ok || oldSize != r.byid[id].BigDataSizes[key] { + save = true + } + add := true + for _, name := range r.byid[id].BigDataNames { + if name == key { + add = false + break + } + } + if add { + r.byid[id].BigDataNames = append(r.byid[id].BigDataNames, key) + save = true + } + if save { + err = r.Save() + } + } + return err +} + +func (r *containerStore) Wipe() error { + ids := []string{} + for id := range r.byid { + ids = append(ids, id) + } + for _, id := range ids { + if err := r.Delete(id); err != nil { + return err + } + } + return nil +} + +func (r *containerStore) Lock() { + r.lockfile.Lock() +} + +func (r *containerStore) Unlock() { + r.lockfile.Unlock() +} + +func (r *containerStore) Touch() error { + return r.lockfile.Touch() +} + +func (r *containerStore) Modified() (bool, error) { + return r.lockfile.Modified() +} + +func (r *containerStore) TouchedSince(when time.Time) bool { + return r.lockfile.TouchedSince(when) +} diff --git a/vendor/src/github.com/containers/storage/storage/images.go b/vendor/src/github.com/containers/storage/storage/images.go new file mode 100644 index 00000000..6798c573 --- /dev/null +++ b/vendor/src/github.com/containers/storage/storage/images.go @@ -0,0 +1,448 @@ +package storage + +import ( + "encoding/json" + "errors" + "io/ioutil" + "os" + "path/filepath" + "time" + + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/stringid" +) + +var ( + // ErrImageUnknown indicates that there was no image with the specified name or ID + ErrImageUnknown = errors.New("image not known") +) + +// An Image is a reference to a layer and an associated metadata string. +type Image struct { + // ID is either one which was specified at create-time, or a random + // value which was generated by the library. + ID string `json:"id"` + + // Names is an optional set of user-defined convenience values. The + // image can be referred to by its ID or any of its names. Names are + // unique among images. + Names []string `json:"names,omitempty"` + + // TopLayer is the ID of the topmost layer of the image itself. + // Multiple images can refer to the same top layer. + TopLayer string `json:"layer"` + + // Metadata is data we keep for the convenience of the caller. It is not + // expected to be large, since it is kept in memory. + Metadata string `json:"metadata,omitempty"` + + // BigDataNames is a list of names of data items that we keep for the + // convenience of the caller. They can be large, and are only in + // memory when being read from or written to disk. + BigDataNames []string `json:"big-data-names,omitempty"` + + // BigDataSizes maps the names in BigDataNames to the sizes of the data + // that has been stored, if they're known. + BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"` + + Flags map[string]interface{} `json:"flags,omitempty"` +} + +// ImageStore provides bookkeeping for information about Images. +type ImageStore interface { + FileBasedStore + MetadataStore + BigDataStore + FlaggableStore + + // Create creates an image that has a specified ID (or a random one) and + // optional names, using the specified layer as its topmost (hopefully + // read-only) layer. That layer can be referenced by multiple images. + Create(id string, names []string, layer, metadata string) (*Image, error) + + // SetNames replaces the list of names associated with an image with the + // supplied values. + SetNames(id string, names []string) error + + // Exists checks if there is an image with the given ID or name. + Exists(id string) bool + + // Get retrieves information about an image given an ID or name. + Get(id string) (*Image, error) + + // Delete removes the record of the image. + Delete(id string) error + + // Wipe removes records of all images. + Wipe() error + + // Lookup attempts to translate a name to an ID. Most methods do this + // implicitly. + Lookup(name string) (string, error) + + // Images returns a slice enumerating the known images. + Images() ([]Image, error) +} + +type imageStore struct { + lockfile Locker + dir string + images []Image + byid map[string]*Image + byname map[string]*Image +} + +func (r *imageStore) Images() ([]Image, error) { + return r.images, nil +} + +func (r *imageStore) imagespath() string { + return filepath.Join(r.dir, "images.json") +} + +func (r *imageStore) datadir(id string) string { + return filepath.Join(r.dir, id) +} + +func (r *imageStore) datapath(id, key string) string { + return filepath.Join(r.datadir(id), makeBigDataBaseName(key)) +} + +func (r *imageStore) Load() error { + needSave := false + rpath := r.imagespath() + data, err := ioutil.ReadFile(rpath) + if err != nil && !os.IsNotExist(err) { + return err + } + images := []Image{} + ids := make(map[string]*Image) + names := make(map[string]*Image) + if err = json.Unmarshal(data, &images); len(data) == 0 || err == nil { + for n, image := range images { + ids[image.ID] = &images[n] + for _, name := range image.Names { + if conflict, ok := names[name]; ok { + r.removeName(conflict, name) + needSave = true + } + names[name] = &images[n] + } + } + } + r.images = images + r.byid = ids + r.byname = names + if needSave { + r.Touch() + return r.Save() + } + return nil +} + +func (r *imageStore) Save() error { + rpath := r.imagespath() + jdata, err := json.Marshal(&r.images) + if err != nil { + return err + } + return ioutils.AtomicWriteFile(rpath, jdata, 0600) +} + +func newImageStore(dir string) (ImageStore, error) { + if err := os.MkdirAll(dir, 0700); err != nil { + return nil, err + } + lockfile, err := GetLockfile(filepath.Join(dir, "images.lock")) + if err != nil { + return nil, err + } + lockfile.Lock() + defer lockfile.Unlock() + istore := imageStore{ + lockfile: lockfile, + dir: dir, + images: []Image{}, + byid: make(map[string]*Image), + byname: make(map[string]*Image), + } + if err := istore.Load(); err != nil { + return nil, err + } + return &istore, nil +} + +func (r *imageStore) ClearFlag(id string, flag string) error { + if image, ok := r.byname[id]; ok { + id = image.ID + } + if _, ok := r.byid[id]; !ok { + return ErrImageUnknown + } + image := r.byid[id] + delete(image.Flags, flag) + return r.Save() +} + +func (r *imageStore) SetFlag(id string, flag string, value interface{}) error { + if image, ok := r.byname[id]; ok { + id = image.ID + } + if _, ok := r.byid[id]; !ok { + return ErrImageUnknown + } + image := r.byid[id] + image.Flags[flag] = value + return r.Save() +} + +func (r *imageStore) Create(id string, names []string, layer, metadata string) (image *Image, err error) { + if id == "" { + id = stringid.GenerateRandomID() + _, idInUse := r.byid[id] + for idInUse { + id = stringid.GenerateRandomID() + _, idInUse = r.byid[id] + } + } + if _, idInUse := r.byid[id]; idInUse { + return nil, ErrDuplicateID + } + for _, name := range names { + if _, nameInUse := r.byname[name]; nameInUse { + return nil, ErrDuplicateName + } + } + if err == nil { + newImage := Image{ + ID: id, + Names: names, + TopLayer: layer, + Metadata: metadata, + BigDataNames: []string{}, + BigDataSizes: make(map[string]int64), + Flags: make(map[string]interface{}), + } + r.images = append(r.images, newImage) + image = &r.images[len(r.images)-1] + r.byid[id] = image + for _, name := range names { + r.byname[name] = image + } + err = r.Save() + } + return image, err +} + +func (r *imageStore) GetMetadata(id string) (string, error) { + if image, ok := r.byname[id]; ok { + id = image.ID + } + if image, ok := r.byid[id]; ok { + return image.Metadata, nil + } + return "", ErrImageUnknown +} + +func (r *imageStore) SetMetadata(id, metadata string) error { + if image, ok := r.byname[id]; ok { + id = image.ID + } + if image, ok := r.byid[id]; ok { + image.Metadata = metadata + return r.Save() + } + return ErrImageUnknown +} + +func (r *imageStore) removeName(image *Image, name string) { + newNames := []string{} + for _, oldName := range image.Names { + if oldName != name { + newNames = append(newNames, oldName) + } + } + image.Names = newNames +} + +func (r *imageStore) SetNames(id string, names []string) error { + if image, ok := r.byname[id]; ok { + id = image.ID + } + if image, ok := r.byid[id]; ok { + for _, name := range image.Names { + delete(r.byname, name) + } + for _, name := range names { + if otherImage, ok := r.byname[name]; ok { + r.removeName(otherImage, name) + } + r.byname[name] = image + } + image.Names = names + return r.Save() + } + return ErrImageUnknown +} + +func (r *imageStore) Delete(id string) error { + if image, ok := r.byname[id]; ok { + id = image.ID + } + if _, ok := r.byid[id]; !ok { + return ErrImageUnknown + } + if image, ok := r.byid[id]; ok { + newImages := []Image{} + for _, candidate := range r.images { + if candidate.ID != id { + newImages = append(newImages, candidate) + } + } + delete(r.byid, image.ID) + for _, name := range image.Names { + delete(r.byname, name) + } + r.images = newImages + if err := r.Save(); err != nil { + return err + } + if err := os.RemoveAll(r.datadir(id)); err != nil { + return err + } + } + return nil +} + +func (r *imageStore) Get(id string) (*Image, error) { + if image, ok := r.byname[id]; ok { + return image, nil + } + if image, ok := r.byid[id]; ok { + return image, nil + } + return nil, ErrImageUnknown +} + +func (r *imageStore) Lookup(name string) (id string, err error) { + image, ok := r.byname[name] + if !ok { + image, ok = r.byid[name] + if !ok { + return "", ErrImageUnknown + } + } + return image.ID, nil +} + +func (r *imageStore) Exists(id string) bool { + if _, ok := r.byname[id]; ok { + return true + } + if _, ok := r.byid[id]; ok { + return true + } + return false +} + +func (r *imageStore) GetBigData(id, key string) ([]byte, error) { + if img, ok := r.byname[id]; ok { + id = img.ID + } + if _, ok := r.byid[id]; !ok { + return nil, ErrImageUnknown + } + return ioutil.ReadFile(r.datapath(id, key)) +} + +func (r *imageStore) GetBigDataSize(id, key string) (int64, error) { + if img, ok := r.byname[id]; ok { + id = img.ID + } + if _, ok := r.byid[id]; !ok { + return -1, ErrImageUnknown + } + if size, ok := r.byid[id].BigDataSizes[key]; ok { + return size, nil + } + return -1, ErrSizeUnknown +} + +func (r *imageStore) GetBigDataNames(id string) ([]string, error) { + if img, ok := r.byname[id]; ok { + id = img.ID + } + if _, ok := r.byid[id]; !ok { + return nil, ErrImageUnknown + } + return r.byid[id].BigDataNames, nil +} + +func (r *imageStore) SetBigData(id, key string, data []byte) error { + if img, ok := r.byname[id]; ok { + id = img.ID + } + if _, ok := r.byid[id]; !ok { + return ErrImageUnknown + } + if err := os.MkdirAll(r.datadir(id), 0700); err != nil { + return err + } + err := ioutils.AtomicWriteFile(r.datapath(id, key), data, 0600) + if err == nil { + add := true + save := false + oldSize, ok := r.byid[id].BigDataSizes[key] + r.byid[id].BigDataSizes[key] = int64(len(data)) + if !ok || oldSize != r.byid[id].BigDataSizes[key] { + save = true + } + for _, name := range r.byid[id].BigDataNames { + if name == key { + add = false + break + } + } + if add { + r.byid[id].BigDataNames = append(r.byid[id].BigDataNames, key) + save = true + } + if save { + err = r.Save() + } + } + return err +} + +func (r *imageStore) Wipe() error { + ids := []string{} + for id := range r.byid { + ids = append(ids, id) + } + for _, id := range ids { + if err := r.Delete(id); err != nil { + return err + } + } + return nil +} + +func (r *imageStore) Lock() { + r.lockfile.Lock() +} + +func (r *imageStore) Unlock() { + r.lockfile.Unlock() +} + +func (r *imageStore) Touch() error { + return r.lockfile.Touch() +} + +func (r *imageStore) Modified() (bool, error) { + return r.lockfile.Modified() +} + +func (r *imageStore) TouchedSince(when time.Time) bool { + return r.lockfile.TouchedSince(when) +} diff --git a/vendor/src/github.com/containers/storage/storage/layers.go b/vendor/src/github.com/containers/storage/storage/layers.go new file mode 100644 index 00000000..924b99ad --- /dev/null +++ b/vendor/src/github.com/containers/storage/storage/layers.go @@ -0,0 +1,881 @@ +package storage + +import ( + "bytes" + "compress/gzip" + "encoding/json" + "errors" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" + + drivers "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/stringid" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +const ( + tarSplitSuffix = ".tar-split.gz" + incompleteFlag = "incomplete" + compressionFlag = "diff-compression" +) + +var ( + // ErrParentUnknown indicates that we didn't record the ID of the parent of the specified layer + ErrParentUnknown = errors.New("parent of layer not known") + // ErrLayerUnknown indicates that there was no layer with the specified name or ID + ErrLayerUnknown = errors.New("layer not known") +) + +// A Layer is a record of a copy-on-write layer that's stored by the lower +// level graph driver. +type Layer struct { + // ID is either one which was specified at create-time, or a random + // value which was generated by the library. + ID string `json:"id"` + + // Names is an optional set of user-defined convenience values. The + // layer can be referred to by its ID or any of its names. Names are + // unique among layers. + Names []string `json:"names,omitempty"` + + // Parent is the ID of a layer from which this layer inherits data. + Parent string `json:"parent,omitempty"` + + // Metadata is data we keep for the convenience of the caller. It is not + // expected to be large, since it is kept in memory. + Metadata string `json:"metadata,omitempty"` + + // MountLabel is an SELinux label which should be used when attempting to mount + // the layer. + MountLabel string `json:"mountlabel,omitempty"` + + // MountPoint is the path where the layer is mounted, or where it was most + // recently mounted. This can change between subsequent Unmount() and + // Mount() calls, so the caller should consult this value after Mount() + // succeeds to find the location of the container's root filesystem. + MountPoint string `json:"-"` + + // MountCount is used as a reference count for the container's layer being + // mounted at the mount point. + MountCount int `json:"-"` + + Flags map[string]interface{} `json:"flags,omitempty"` +} + +type layerMountPoint struct { + ID string `json:"id"` + MountPoint string `json:"path"` + MountCount int `json:"count"` +} + +// LayerStore wraps a graph driver, adding the ability to refer to layers by +// name, and keeping track of parent-child relationships, along with a list of +// all known layers. +type LayerStore interface { + FileBasedStore + MetadataStore + FlaggableStore + + // Create creates a new layer, optionally giving it a specified ID rather than + // a randomly-generated one, either inheriting data from another specified + // layer or the empty base layer. The new layer can optionally be given names + // and have an SELinux label specified for use when mounting it. Some + // underlying drivers can accept a "size" option. At this time, most + // underlying drivers do not themselves distinguish between writeable + // and read-only layers. + Create(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool) (*Layer, error) + + // CreateWithFlags combines the functions of Create and SetFlag. + CreateWithFlags(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}) (layer *Layer, err error) + + // Put combines the functions of CreateWithFlags and ApplyDiff. + Put(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}, diff archive.Reader) (*Layer, int64, error) + + // Exists checks if a layer with the specified name or ID is known. + Exists(id string) bool + + // Get retrieves information about a layer given an ID or name. + Get(id string) (*Layer, error) + + // SetNames replaces the list of names associated with a layer with the + // supplied values. + SetNames(id string, names []string) error + + // Status returns an slice of key-value pairs, suitable for human consumption, + // relaying whatever status information the underlying driver can share. + Status() ([][2]string, error) + + // Delete deletes a layer with the specified name or ID. + Delete(id string) error + + // Wipe deletes all layers. + Wipe() error + + // Mount mounts a layer for use. If the specified layer is the parent of other + // layers, it should not be written to. An SELinux label to be applied to the + // mount can be specified to override the one configured for the layer. + Mount(id, mountLabel string) (string, error) + + // Unmount unmounts a layer when it is no longer in use. + Unmount(id string) error + + // Changes returns a slice of Change structures, which contain a pathname + // (Path) and a description of what sort of change (Kind) was made by the + // layer (either ChangeModify, ChangeAdd, or ChangeDelete), relative to a + // specified layer. By default, the layer's parent is used as a reference. + Changes(from, to string) ([]archive.Change, error) + + // Diff produces a tarstream which can be applied to a layer with the contents + // of the first layer to produce a layer with the contents of the second layer. + // By default, the parent of the second layer is used as the first + // layer, so it need not be specified. + Diff(from, to string) (io.ReadCloser, error) + + // DiffSize produces an estimate of the length of the tarstream which would be + // produced by Diff. + DiffSize(from, to string) (int64, error) + + // ApplyDiff reads a tarstream which was created by a previous call to Diff and + // applies its changes to a specified layer. + ApplyDiff(to string, diff archive.Reader) (int64, error) + + // Lookup attempts to translate a name to an ID. Most methods do this + // implicitly. + Lookup(name string) (string, error) + + // Layers returns a slice of the known layers. + Layers() ([]Layer, error) +} + +type layerStore struct { + lockfile Locker + rundir string + driver drivers.Driver + layerdir string + layers []Layer + byid map[string]*Layer + byname map[string]*Layer + byparent map[string][]*Layer + bymount map[string]*Layer +} + +func (r *layerStore) Layers() ([]Layer, error) { + return r.layers, nil +} + +func (r *layerStore) mountspath() string { + return filepath.Join(r.rundir, "mountpoints.json") +} + +func (r *layerStore) layerspath() string { + return filepath.Join(r.layerdir, "layers.json") +} + +func (r *layerStore) Load() error { + needSave := false + rpath := r.layerspath() + data, err := ioutil.ReadFile(rpath) + if err != nil && !os.IsNotExist(err) { + return err + } + layers := []Layer{} + ids := make(map[string]*Layer) + names := make(map[string]*Layer) + mounts := make(map[string]*Layer) + parents := make(map[string][]*Layer) + if err = json.Unmarshal(data, &layers); len(data) == 0 || err == nil { + for n, layer := range layers { + ids[layer.ID] = &layers[n] + for _, name := range layer.Names { + if conflict, ok := names[name]; ok { + r.removeName(conflict, name) + needSave = true + } + names[name] = &layers[n] + } + if pslice, ok := parents[layer.Parent]; ok { + parents[layer.Parent] = append(pslice, &layers[n]) + } else { + parents[layer.Parent] = []*Layer{&layers[n]} + } + } + } + mpath := r.mountspath() + data, err = ioutil.ReadFile(mpath) + if err != nil && !os.IsNotExist(err) { + return err + } + layerMounts := []layerMountPoint{} + if err = json.Unmarshal(data, &layerMounts); len(data) == 0 || err == nil { + for _, mount := range layerMounts { + if mount.MountPoint != "" { + if layer, ok := ids[mount.ID]; ok { + mounts[mount.MountPoint] = layer + layer.MountPoint = mount.MountPoint + layer.MountCount = mount.MountCount + } + } + } + } + r.layers = layers + r.byid = ids + r.byname = names + r.byparent = parents + r.bymount = mounts + err = nil + // Last step: try to remove anything that a previous user of this + // storage area marked for deletion but didn't manage to actually + // delete. + for _, layer := range r.layers { + if cleanup, ok := layer.Flags[incompleteFlag]; ok { + if b, ok := cleanup.(bool); ok && b { + err = r.Delete(layer.ID) + if err != nil { + break + } + needSave = true + } + } + } + if needSave { + r.Touch() + return r.Save() + } + return err +} + +func (r *layerStore) Save() error { + rpath := r.layerspath() + jldata, err := json.Marshal(&r.layers) + if err != nil { + return err + } + mpath := r.mountspath() + mounts := []layerMountPoint{} + for _, layer := range r.layers { + if layer.MountPoint != "" && layer.MountCount > 0 { + mounts = append(mounts, layerMountPoint{ + ID: layer.ID, + MountPoint: layer.MountPoint, + MountCount: layer.MountCount, + }) + } + } + jmdata, err := json.Marshal(&mounts) + if err != nil { + return err + } + if err := ioutils.AtomicWriteFile(rpath, jldata, 0600); err != nil { + return err + } + return ioutils.AtomicWriteFile(mpath, jmdata, 0600) +} + +func newLayerStore(rundir string, layerdir string, driver drivers.Driver) (LayerStore, error) { + if err := os.MkdirAll(rundir, 0700); err != nil { + return nil, err + } + if err := os.MkdirAll(layerdir, 0700); err != nil { + return nil, err + } + lockfile, err := GetLockfile(filepath.Join(layerdir, "layers.lock")) + if err != nil { + return nil, err + } + lockfile.Lock() + defer lockfile.Unlock() + rlstore := layerStore{ + lockfile: lockfile, + driver: driver, + rundir: rundir, + layerdir: layerdir, + byid: make(map[string]*Layer), + bymount: make(map[string]*Layer), + byname: make(map[string]*Layer), + byparent: make(map[string][]*Layer), + } + if err := rlstore.Load(); err != nil { + return nil, err + } + return &rlstore, nil +} + +func (r *layerStore) ClearFlag(id string, flag string) error { + if layer, ok := r.byname[id]; ok { + id = layer.ID + } + if _, ok := r.byid[id]; !ok { + return ErrLayerUnknown + } + layer := r.byid[id] + delete(layer.Flags, flag) + return r.Save() +} + +func (r *layerStore) SetFlag(id string, flag string, value interface{}) error { + if layer, ok := r.byname[id]; ok { + id = layer.ID + } + if _, ok := r.byid[id]; !ok { + return ErrLayerUnknown + } + layer := r.byid[id] + layer.Flags[flag] = value + return r.Save() +} + +func (r *layerStore) Status() ([][2]string, error) { + return r.driver.Status(), nil +} + +func (r *layerStore) Put(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}, diff archive.Reader) (layer *Layer, size int64, err error) { + size = -1 + if err := os.MkdirAll(r.rundir, 0700); err != nil { + return nil, -1, err + } + if err := os.MkdirAll(r.layerdir, 0700); err != nil { + return nil, -1, err + } + if parentLayer, ok := r.byname[parent]; ok { + parent = parentLayer.ID + } + if id == "" { + id = stringid.GenerateRandomID() + _, idInUse := r.byid[id] + for idInUse { + id = stringid.GenerateRandomID() + _, idInUse = r.byid[id] + } + } + if _, idInUse := r.byid[id]; idInUse { + return nil, -1, ErrDuplicateID + } + for _, name := range names { + if _, nameInUse := r.byname[name]; nameInUse { + return nil, -1, ErrDuplicateName + } + } + if writeable { + err = r.driver.CreateReadWrite(id, parent, mountLabel, options) + } else { + err = r.driver.Create(id, parent, mountLabel, options) + } + if err == nil { + newLayer := Layer{ + ID: id, + Parent: parent, + Names: names, + MountLabel: mountLabel, + Flags: make(map[string]interface{}), + } + r.layers = append(r.layers, newLayer) + layer = &r.layers[len(r.layers)-1] + r.byid[id] = layer + for _, name := range names { + r.byname[name] = layer + } + if pslice, ok := r.byparent[parent]; ok { + pslice = append(pslice, layer) + r.byparent[parent] = pslice + } else { + r.byparent[parent] = []*Layer{layer} + } + for flag, value := range flags { + layer.Flags[flag] = value + } + if diff != nil { + layer.Flags[incompleteFlag] = true + err = r.Save() + if err != nil { + // We don't have a record of this layer, but at least + // try to clean it up underneath us. + r.driver.Remove(id) + return nil, -1, err + } + size, err = r.ApplyDiff(layer.ID, diff) + if err != nil { + if r.Delete(layer.ID) != nil { + // Either a driver error or an error saving. + // We now have a layer that's been marked for + // deletion but which we failed to remove. + } + return nil, -1, err + } + delete(layer.Flags, incompleteFlag) + } + err = r.Save() + if err != nil { + // We don't have a record of this layer, but at least + // try to clean it up underneath us. + r.driver.Remove(id) + return nil, -1, err + } + } + return layer, size, err +} + +func (r *layerStore) CreateWithFlags(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}) (layer *Layer, err error) { + layer, _, err = r.Put(id, parent, names, mountLabel, options, writeable, flags, nil) + return layer, err +} + +func (r *layerStore) Create(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool) (layer *Layer, err error) { + return r.CreateWithFlags(id, parent, names, mountLabel, options, writeable, nil) +} + +func (r *layerStore) Mount(id, mountLabel string) (string, error) { + if layer, ok := r.byname[id]; ok { + id = layer.ID + } + if _, ok := r.byid[id]; !ok { + return "", ErrLayerUnknown + } + layer := r.byid[id] + if layer.MountCount > 0 { + layer.MountCount++ + return layer.MountPoint, r.Save() + } + if mountLabel == "" { + if layer, ok := r.byid[id]; ok { + mountLabel = layer.MountLabel + } + } + mountpoint, err := r.driver.Get(id, mountLabel) + if mountpoint != "" && err == nil { + if layer, ok := r.byid[id]; ok { + if layer.MountPoint != "" { + delete(r.bymount, layer.MountPoint) + } + layer.MountPoint = filepath.Clean(mountpoint) + layer.MountCount++ + r.bymount[layer.MountPoint] = layer + err = r.Save() + } + } + return mountpoint, err +} + +func (r *layerStore) Unmount(id string) error { + if layer, ok := r.bymount[filepath.Clean(id)]; ok { + id = layer.ID + } + if layer, ok := r.byname[id]; ok { + id = layer.ID + } + if _, ok := r.byid[id]; !ok { + return ErrLayerUnknown + } + layer := r.byid[id] + if layer.MountCount > 1 { + layer.MountCount-- + return r.Save() + } + err := r.driver.Put(id) + if err == nil || os.IsNotExist(err) { + if layer.MountPoint != "" { + delete(r.bymount, layer.MountPoint) + } + layer.MountCount-- + layer.MountPoint = "" + err = r.Save() + } + return err +} + +func (r *layerStore) removeName(layer *Layer, name string) { + newNames := []string{} + for _, oldName := range layer.Names { + if oldName != name { + newNames = append(newNames, oldName) + } + } + layer.Names = newNames +} + +func (r *layerStore) SetNames(id string, names []string) error { + if layer, ok := r.byname[id]; ok { + id = layer.ID + } + if layer, ok := r.byid[id]; ok { + for _, name := range layer.Names { + delete(r.byname, name) + } + for _, name := range names { + if otherLayer, ok := r.byname[name]; ok { + r.removeName(otherLayer, name) + } + r.byname[name] = layer + } + layer.Names = names + return r.Save() + } + return ErrLayerUnknown +} + +func (r *layerStore) GetMetadata(id string) (string, error) { + if layer, ok := r.byname[id]; ok { + id = layer.ID + } + if layer, ok := r.byid[id]; ok { + return layer.Metadata, nil + } + return "", ErrLayerUnknown +} + +func (r *layerStore) SetMetadata(id, metadata string) error { + if layer, ok := r.byname[id]; ok { + id = layer.ID + } + if layer, ok := r.byid[id]; ok { + layer.Metadata = metadata + return r.Save() + } + return ErrLayerUnknown +} + +func (r *layerStore) tspath(id string) string { + return filepath.Join(r.layerdir, id+tarSplitSuffix) +} + +func (r *layerStore) Delete(id string) error { + if layer, ok := r.byname[id]; ok { + id = layer.ID + } + if _, ok := r.byid[id]; !ok { + return ErrLayerUnknown + } + for r.byid[id].MountCount > 0 { + if err := r.Unmount(id); err != nil { + return err + } + } + err := r.driver.Remove(id) + if err == nil { + os.Remove(r.tspath(id)) + if layer, ok := r.byid[id]; ok { + pslice := r.byparent[layer.Parent] + newPslice := []*Layer{} + for _, candidate := range pslice { + if candidate.ID != id { + newPslice = append(newPslice, candidate) + } + } + delete(r.byid, layer.ID) + if len(newPslice) > 0 { + r.byparent[layer.Parent] = newPslice + } else { + delete(r.byparent, layer.Parent) + } + for _, name := range layer.Names { + delete(r.byname, name) + } + if layer.MountPoint != "" { + delete(r.bymount, layer.MountPoint) + } + newLayers := []Layer{} + for _, candidate := range r.layers { + if candidate.ID != id { + newLayers = append(newLayers, candidate) + } + } + r.layers = newLayers + if err = r.Save(); err != nil { + return err + } + } + } + return err +} + +func (r *layerStore) Lookup(name string) (id string, err error) { + layer, ok := r.byname[name] + if !ok { + layer, ok = r.byid[name] + if !ok { + return "", ErrLayerUnknown + } + } + return layer.ID, nil +} + +func (r *layerStore) Exists(id string) bool { + if layer, ok := r.byname[id]; ok { + id = layer.ID + } + l, exists := r.byid[id] + return l != nil && exists +} + +func (r *layerStore) Get(id string) (*Layer, error) { + if l, ok := r.byname[id]; ok { + return l, nil + } + if l, ok := r.byid[id]; ok { + return l, nil + } + return nil, ErrLayerUnknown +} + +func (r *layerStore) Wipe() error { + ids := []string{} + for id := range r.byid { + ids = append(ids, id) + } + for _, id := range ids { + if err := r.Delete(id); err != nil { + return err + } + } + return nil +} + +func (r *layerStore) Changes(from, to string) ([]archive.Change, error) { + if layer, ok := r.byname[from]; ok { + from = layer.ID + } + if layer, ok := r.byname[to]; ok { + to = layer.ID + } + if from == "" { + if layer, ok := r.byid[to]; ok { + from = layer.Parent + } + } + if to == "" { + return nil, ErrLayerUnknown + } + if _, ok := r.byid[to]; !ok { + return nil, ErrLayerUnknown + } + return r.driver.Changes(to, from) +} + +type simpleGetCloser struct { + r *layerStore + path string + id string +} + +func (s *simpleGetCloser) Get(path string) (io.ReadCloser, error) { + return os.Open(filepath.Join(s.path, path)) +} + +func (s *simpleGetCloser) Close() error { + return s.r.Unmount(s.id) +} + +func (r *layerStore) newFileGetter(id string) (drivers.FileGetCloser, error) { + if getter, ok := r.driver.(drivers.DiffGetterDriver); ok { + return getter.DiffGetter(id) + } + path, err := r.Mount(id, "") + if err != nil { + return nil, err + } + return &simpleGetCloser{ + r: r, + path: path, + id: id, + }, nil +} + +func (r *layerStore) Diff(from, to string) (io.ReadCloser, error) { + var metadata storage.Unpacker + + if layer, ok := r.byname[from]; ok { + from = layer.ID + } + if layer, ok := r.byname[to]; ok { + to = layer.ID + } + if from == "" { + if layer, ok := r.byid[to]; ok { + from = layer.Parent + } + } + if to == "" { + return nil, ErrParentUnknown + } + if _, ok := r.byid[to]; !ok { + return nil, ErrLayerUnknown + } + compression := archive.Uncompressed + if cflag, ok := r.byid[to].Flags[compressionFlag]; ok { + if ctype, ok := cflag.(float64); ok { + compression = archive.Compression(ctype) + } else if ctype, ok := cflag.(archive.Compression); ok { + compression = archive.Compression(ctype) + } + } + if from != r.byid[to].Parent { + diff, err := r.driver.Diff(to, from) + if err == nil && (compression != archive.Uncompressed) { + preader, pwriter := io.Pipe() + compressor, err := archive.CompressStream(pwriter, compression) + if err != nil { + diff.Close() + pwriter.Close() + return nil, err + } + go func() { + io.Copy(compressor, diff) + diff.Close() + compressor.Close() + pwriter.Close() + }() + diff = preader + } + return diff, err + } + + tsfile, err := os.Open(r.tspath(to)) + if err != nil { + if os.IsNotExist(err) { + return r.driver.Diff(to, from) + } + return nil, err + } + defer tsfile.Close() + + decompressor, err := gzip.NewReader(tsfile) + if err != nil { + return nil, err + } + defer decompressor.Close() + + tsbytes, err := ioutil.ReadAll(decompressor) + if err != nil { + return nil, err + } + + metadata = storage.NewJSONUnpacker(bytes.NewBuffer(tsbytes)) + + if fgetter, err := r.newFileGetter(to); err != nil { + return nil, err + } else { + var stream io.ReadCloser + if compression != archive.Uncompressed { + preader, pwriter := io.Pipe() + compressor, err := archive.CompressStream(pwriter, compression) + if err != nil { + fgetter.Close() + pwriter.Close() + preader.Close() + return nil, err + } + go func() { + asm.WriteOutputTarStream(fgetter, metadata, compressor) + compressor.Close() + pwriter.Close() + }() + stream = preader + } else { + stream = asm.NewOutputTarStream(fgetter, metadata) + } + return ioutils.NewReadCloserWrapper(stream, func() error { + err1 := stream.Close() + err2 := fgetter.Close() + if err2 == nil { + return err1 + } + return err2 + }), nil + } +} + +func (r *layerStore) DiffSize(from, to string) (size int64, err error) { + if layer, ok := r.byname[from]; ok { + from = layer.ID + } + if layer, ok := r.byname[to]; ok { + to = layer.ID + } + if from == "" { + if layer, ok := r.byid[to]; ok { + from = layer.Parent + } + } + if to == "" { + return -1, ErrParentUnknown + } + if _, ok := r.byid[to]; !ok { + return -1, ErrLayerUnknown + } + return r.driver.DiffSize(to, from) +} + +func (r *layerStore) ApplyDiff(to string, diff archive.Reader) (size int64, err error) { + if layer, ok := r.byname[to]; ok { + to = layer.ID + } + layer, ok := r.byid[to] + if !ok { + return -1, ErrLayerUnknown + } + + header := make([]byte, 10240) + n, err := diff.Read(header) + if err != nil && err != io.EOF { + return -1, err + } + + compression := archive.DetectCompression(header[:n]) + defragmented := io.MultiReader(bytes.NewBuffer(header[:n]), diff) + + tsdata := bytes.Buffer{} + compressor, err := gzip.NewWriterLevel(&tsdata, gzip.BestSpeed) + if err != nil { + compressor = gzip.NewWriter(&tsdata) + } + metadata := storage.NewJSONPacker(compressor) + if c, err := archive.DecompressStream(defragmented); err != nil { + return -1, err + } else { + defragmented = c + } + payload, err := asm.NewInputTarStream(defragmented, metadata, storage.NewDiscardFilePutter()) + if err != nil { + return -1, err + } + size, err = r.driver.ApplyDiff(layer.ID, layer.Parent, payload) + compressor.Close() + if err == nil { + if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0600); err != nil { + return -1, err + } + } + + if compression != archive.Uncompressed { + layer.Flags[compressionFlag] = compression + } else { + delete(layer.Flags, compressionFlag) + } + + return size, err +} + +func (r *layerStore) Lock() { + r.lockfile.Lock() +} + +func (r *layerStore) Unlock() { + r.lockfile.Unlock() +} + +func (r *layerStore) Touch() error { + return r.lockfile.Touch() +} + +func (r *layerStore) Modified() (bool, error) { + return r.lockfile.Modified() +} + +func (r *layerStore) TouchedSince(when time.Time) bool { + return r.lockfile.TouchedSince(when) +} diff --git a/vendor/src/github.com/containers/storage/storage/lockfile.go b/vendor/src/github.com/containers/storage/storage/lockfile.go new file mode 100644 index 00000000..5cedea8d --- /dev/null +++ b/vendor/src/github.com/containers/storage/storage/lockfile.go @@ -0,0 +1,138 @@ +package storage + +import ( + "os" + "path/filepath" + "sync" + "syscall" + "time" + + "github.com/containers/storage/pkg/stringid" +) + +// A Locker represents a file lock where the file is used to cache an +// identifier of the last party that made changes to whatever's being protected +// by the lock. +type Locker interface { + sync.Locker + + // Touch records, for others sharing the lock, that the caller was the + // last writer. It should only be called with the lock held. + Touch() error + + // Modified() checks if the most recent writer was a party other than the + // last recorded writer. It should only be called with the lock held. + Modified() (bool, error) + + // TouchedSince() checks if the most recent writer modified the file (likely using Touch()) after the specified time. + TouchedSince(when time.Time) bool +} + +type lockfile struct { + mu sync.Mutex + file string + fd uintptr + lw string +} + +var ( + lockfiles map[string]*lockfile + lockfilesLock sync.Mutex +) + +// GetLockfile opens a lock file, creating it if necessary. The Locker object +// return will be returned unlocked. +func GetLockfile(path string) (Locker, error) { + lockfilesLock.Lock() + defer lockfilesLock.Unlock() + if lockfiles == nil { + lockfiles = make(map[string]*lockfile) + } + if locker, ok := lockfiles[filepath.Clean(path)]; ok { + return locker, nil + } + fd, err := syscall.Open(filepath.Clean(path), os.O_RDWR|os.O_CREATE, syscall.S_IRUSR|syscall.S_IWUSR) + if err != nil { + return nil, err + } + locker := &lockfile{file: path, fd: uintptr(fd), lw: stringid.GenerateRandomID()} + lockfiles[filepath.Clean(path)] = locker + return locker, nil +} + +func (l *lockfile) Lock() { + lk := syscall.Flock_t{ + Type: syscall.F_WRLCK, + Whence: int16(os.SEEK_SET), + Start: 0, + Len: 0, + Pid: int32(os.Getpid()), + } + l.mu.Lock() + for syscall.FcntlFlock(l.fd, syscall.F_SETLKW, &lk) != nil { + time.Sleep(10 * time.Millisecond) + } +} + +func (l *lockfile) Unlock() { + lk := syscall.Flock_t{ + Type: syscall.F_UNLCK, + Whence: int16(os.SEEK_SET), + Start: 0, + Len: 0, + Pid: int32(os.Getpid()), + } + for syscall.FcntlFlock(l.fd, syscall.F_SETLKW, &lk) != nil { + time.Sleep(10 * time.Millisecond) + } + l.mu.Unlock() +} + +func (l *lockfile) Touch() error { + l.lw = stringid.GenerateRandomID() + id := []byte(l.lw) + _, err := syscall.Seek(int(l.fd), 0, os.SEEK_SET) + if err != nil { + return err + } + n, err := syscall.Write(int(l.fd), id) + if err != nil { + return err + } + if n != len(id) { + return syscall.ENOSPC + } + err = syscall.Fsync(int(l.fd)) + if err != nil { + return err + } + return nil +} + +func (l *lockfile) Modified() (bool, error) { + id := []byte(l.lw) + _, err := syscall.Seek(int(l.fd), 0, os.SEEK_SET) + if err != nil { + return true, err + } + n, err := syscall.Read(int(l.fd), id) + if err != nil { + return true, err + } + if n != len(id) { + return true, syscall.ENOSPC + } + lw := l.lw + l.lw = string(id) + return l.lw != lw, nil +} + +func (l *lockfile) TouchedSince(when time.Time) bool { + st := syscall.Stat_t{} + err := syscall.Fstat(int(l.fd), &st) + if err != nil { + return true + } + touched := time.Unix(st.Mtim.Unix()) + return when.Before(touched) +} diff --git a/vendor/src/github.com/containers/storage/storage/store.go b/vendor/src/github.com/containers/storage/storage/store.go new file mode 100644 index 00000000..4480eaee --- /dev/null +++ b/vendor/src/github.com/containers/storage/storage/store.go @@ -0,0 +1,2171 @@ +package storage + +import ( + "encoding/base64" + "errors" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "sync" + "time" + + // register all of the built-in drivers + _ "github.com/containers/storage/drivers/register" + + drivers "github.com/containers/storage/drivers" + "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ioutils" + "github.com/containers/storage/pkg/stringid" + "github.com/containers/storage/storageversion" +) + +var ( + ErrLoadError = errors.New("error loading storage metadata") + ErrDuplicateID = errors.New("that ID is already in use") + ErrDuplicateName = errors.New("that name is already in use") + ErrParentIsContainer = errors.New("would-be parent layer is a container") + ErrNotAContainer = errors.New("identifier is not a container") + ErrNotAnImage = errors.New("identifier is not an image") + ErrNotALayer = errors.New("identifier is not a layer") + ErrNotAnID = errors.New("identifier is not a layer, image, or container") + ErrLayerHasChildren = errors.New("layer has children") + ErrLayerUsedByImage = errors.New("layer is in use by an image") + ErrLayerUsedByContainer = errors.New("layer is in use by a container") + ErrImageUsedByContainer = errors.New("image is in use by a container") + ErrIncompleteOptions = errors.New("missing necessary StoreOptions") + ErrSizeUnknown = errors.New("size is not known") + DefaultStoreOptions StoreOptions + stores []*store + storesLock sync.Mutex +) + +// FileBasedStore wraps up the most common methods of the various types of file-based +// data stores that we implement. +type FileBasedStore interface { + Locker + + // Load reloads the contents of the store from disk. It should be called + // with the lock held. + Load() error + + // Save saves the contents of the store to disk. It should be called with + // the lock held, and Touch() should be called afterward before releasing the + // lock. + Save() error +} + +// MetadataStore wraps up methods for getting and setting metadata associated with IDs. +type MetadataStore interface { + // GetMetadata reads metadata associated with an item with the specified ID. + GetMetadata(id string) (string, error) + + // SetMetadata updates the metadata associated with the item with the specified ID. + SetMetadata(id, metadata string) error +} + +// A BigDataStore wraps up the most common methods of the various types of +// file-based lookaside stores that we implement. +type BigDataStore interface { + // SetBigData stores a (potentially large) piece of data associated with this + // ID. + SetBigData(id, key string, data []byte) error + + // GetBigData retrieves a (potentially large) piece of data associated with + // this ID, if it has previously been set. + GetBigData(id, key string) ([]byte, error) + + // GetBigDataSize retrieves the size of a (potentially large) piece of + // data associated with this ID, if it has previously been set. + GetBigDataSize(id, key string) (int64, error) + + // GetBigDataNames() returns a list of the names of previously-stored pieces of + // data. + GetBigDataNames(id string) ([]string, error) +} + +// A FlaggableStore can have flags set and cleared on items which it manages. +type FlaggableStore interface { + // ClearFlag removes a named flag from an item in the store. + ClearFlag(id string, flag string) error + + // SetFlag sets a named flag and its value on an item in the store. + SetFlag(id string, flag string, value interface{}) error +} + +// StoreOptions is used for passing initialization options to GetStore(), for +// initializing a Store object and the underlying storage that it controls. +type StoreOptions struct { + // RunRoot is the filesystem path under which we can store run-time + // information, such as the locations of active mount points, that we + // want to lose if the host is rebooted. + RunRoot string `json:"runroot,omitempty"` + // GraphRoot is the filesystem path under which we will store the + // contents of layers, images, and containers. + GraphRoot string `json:"root,omitempty"` + // GraphDriverName is the underlying storage driver that we'll be + // using. It only needs to be specified the first time a Store is + // initialized for a given RunRoot and GraphRoot. + GraphDriverName string `json:"driver,omitempty"` + // GraphDriverOptions are driver-specific options. + GraphDriverOptions []string `json:"driver-options,omitempty"` + // UidMap and GidMap are used mainly for deciding on the ownership of + // files in layers as they're stored on disk, which is often necessary + // when user namespaces are being used. + UidMap []idtools.IDMap `json:"uidmap,omitempty"` + GidMap []idtools.IDMap `json:"gidmap,omitempty"` +} + +// Store wraps up the various types of file-based stores that we use into a +// singleton object that initializes and manages them all together. +type Store interface { + // GetRunRoot, GetGraphRoot, GetGraphDriverName, and GetGraphOptions retrieve + // settings that were passed to GetStore() when the object was created. + GetRunRoot() string + GetGraphRoot() string + GetGraphDriverName() string + GetGraphOptions() []string + + // GetGraphDriver obtains and returns a handle to the graph Driver object used + // by the Store. + GetGraphDriver() (drivers.Driver, error) + + // GetLayerStore obtains and returns a handle to the layer store object used by + // the Store. + GetLayerStore() (LayerStore, error) + + // GetImageStore obtains and returns a handle to the image store object used by + // the Store. + GetImageStore() (ImageStore, error) + + // GetContainerStore obtains and returns a handle to the container store object + // used by the Store. + GetContainerStore() (ContainerStore, error) + + // CreateLayer creates a new layer in the underlying storage driver, optionally + // having the specified ID (one will be assigned if none is specified), with + // the specified layer (or no layer) as its parent, and with optional names. + // (The writeable flag is ignored.) + CreateLayer(id, parent string, names []string, mountLabel string, writeable bool) (*Layer, error) + + // PutLayer combines the functions of CreateLayer and ApplyDiff, marking the + // layer for automatic removal if applying the diff fails for any reason. + PutLayer(id, parent string, names []string, mountLabel string, writeable bool, diff archive.Reader) (*Layer, int64, error) + + // CreateImage creates a new image, optionally with the specified ID + // (one will be assigned if none is specified), with optional names, + // referring to a specified image, and with optional metadata. An + // image is a record which associates the ID of a layer with a + // additional bookkeeping information which the library stores for the + // convenience of its caller. + CreateImage(id string, names []string, layer, metadata string, options *ImageOptions) (*Image, error) + + // CreateContainer creates a new container, optionally with the specified ID + // (one will be assigned if none is specified), with optional names, + // using the specified image's top layer as the basis for the + // container's layer, and assigning the specified ID to that layer (one + // will be created if none is specified). A container is a layer which + // is associated with additional bookkeeping information which the + // library stores for the convenience of its caller. + CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) + + // GetMetadata retrieves the metadata which is associated with a layer, image, + // or container (whichever the passed-in ID refers to). + GetMetadata(id string) (string, error) + + // SetMetadata updates the metadata which is associated with a layer, image, or + // container (whichever the passed-in ID refers to) to match the specified + // value. The metadata value can be retrieved at any time using GetMetadata, + // or using GetLayer, GetImage, or GetContainer and reading the object directly. + SetMetadata(id, metadata string) error + + // Exists checks if there is a layer, image, or container which has the + // passed-in ID or name. + Exists(id string) bool + + // Status asks for a status report, in the form of key-value pairs, from the + // underlying storage driver. The contents vary from driver to driver. + Status() ([][2]string, error) + + // Delete removes the layer, image, or container which has the passed-in ID or + // name. Note that no safety checks are performed, so this can leave images + // with references to layers which do not exist, and layers with references to + // parents which no longer exist. + Delete(id string) error + + // DeleteLayer attempts to remove the specified layer. If the layer is the + // parent of any other layer, or is referred to by any images, it will return + // an error. + DeleteLayer(id string) error + + // DeleteImage removes the specified image if it is not referred to by + // any containers. If its top layer is then no longer referred to by + // any other images and is not the parent of any other layers, its top + // layer will be removed. If that layer's parent is no longer referred + // to by any other images and is not the parent of any other layers, + // then it, too, will be removed. This procedure will be repeated + // until a layer which should not be removed, or the base layer, is + // reached, at which point the list of removed layers is returned. If + // the commit argument is false, the image and layers are not removed, + // but the list of layers which would be removed is still returned. + DeleteImage(id string, commit bool) (layers []string, err error) + + // DeleteContainer removes the specified container and its layer. If there is + // no matching container, or if the container exists but its layer does not, an + // error will be returned. + DeleteContainer(id string) error + + // Wipe removes all known layers, images, and containers. + Wipe() error + + // Mount attempts to mount a layer, image, or container for access, and returns + // the pathname if it succeeds. + Mount(id, mountLabel string) (string, error) + + // Unmount attempts to unmount a layer, image, or container, given an ID, a + // name, or a mount path. + Unmount(id string) error + + // Changes returns a summary of the changes which would need to be made to one + // layer to make its contents the same as a second layer. If the first layer + // is not specified, the second layer's parent is assumed. Each Change + // structure contains a Path relative to the layer's root directory, and a Kind + // which is either ChangeAdd, ChangeModify, or ChangeDelete. + Changes(from, to string) ([]archive.Change, error) + + // DiffSize returns a count of the size of the tarstream which would specify + // the changes returned by Changes. + DiffSize(from, to string) (int64, error) + + // Diff returns the tarstream which would specify the changes returned by + // Changes. + Diff(from, to string) (io.ReadCloser, error) + + // ApplyDiff applies a tarstream to a layer. Information about the tarstream + // is cached with the layer. Typically, a layer which is populated using a + // tarstream will be expected to not be modified in any other way, either + // before or after the diff is applied. + ApplyDiff(to string, diff archive.Reader) (int64, error) + + // Layers returns a list of the currently known layers. + Layers() ([]Layer, error) + + // Images returns a list of the currently known images. + Images() ([]Image, error) + + // Containers returns a list of the currently known containers. + Containers() ([]Container, error) + + // GetNames returns the list of names for a layer, image, or container. + GetNames(id string) ([]string, error) + + // SetNames changes the list of names for a layer, image, or container. + SetNames(id string, names []string) error + + // ListImageBigData retrieves a list of the (possibly large) chunks of named + // data associated with an image. + ListImageBigData(id string) ([]string, error) + + // GetImageBigData retrieves a (possibly large) chunk of named data associated + // with an image. + GetImageBigData(id, key string) ([]byte, error) + + // GetImageBigDataSize retrieves the size of a (possibly large) chunk + // of named data associated with an image. + GetImageBigDataSize(id, key string) (int64, error) + + // SetImageBigData stores a (possibly large) chunk of named data associated + // with an image. + SetImageBigData(id, key string, data []byte) error + + // ListContainerBigData retrieves a list of the (possibly large) chunks of + // named data associated with a container. + ListContainerBigData(id string) ([]string, error) + + // GetContainerBigData retrieves a (possibly large) chunk of named data + // associated with a container. + GetContainerBigData(id, key string) ([]byte, error) + + // GetContainerBigDataSize retrieves the size of a (possibly large) + // chunk of named data associated with a container. + GetContainerBigDataSize(id, key string) (int64, error) + + // SetContainerBigData stores a (possibly large) chunk of named data + // associated with a container. + SetContainerBigData(id, key string, data []byte) error + + // GetLayer returns a specific layer. + GetLayer(id string) (*Layer, error) + + // GetImage returns a specific image. + GetImage(id string) (*Image, error) + + // GetImagesByTopLayer returns a list of images which reference the specified + // layer as their top layer. They will have different IDs and names + // and may have different metadata, big data items, and flags. + GetImagesByTopLayer(id string) ([]*Image, error) + + // GetContainer returns a specific container. + GetContainer(id string) (*Container, error) + + // GetContainerByLayer returns a specific container based on its layer ID or + // name. + GetContainerByLayer(id string) (*Container, error) + + // GetContainerDirectory returns a path of a directory which the caller + // can use to store data, specific to the container, which the library + // does not directly manage. The directory will be deleted when the + // container is deleted. + GetContainerDirectory(id string) (string, error) + + // SetContainerDirectoryFile is a convenience function which stores + // a piece of data in the specified file relative to the container's + // directory. + SetContainerDirectoryFile(id, file string, data []byte) error + + // GetFromContainerDirectory is a convenience function which reads + // the contents of the specified file relative to the container's + // directory. + GetFromContainerDirectory(id, file string) ([]byte, error) + + // GetContainerRunDirectory returns a path of a directory which the + // caller can use to store data, specific to the container, which the + // library does not directly manage. The directory will be deleted + // when the host system is restarted. + GetContainerRunDirectory(id string) (string, error) + + // SetContainerRunDirectoryFile is a convenience function which stores + // a piece of data in the specified file relative to the container's + // run directory. + SetContainerRunDirectoryFile(id, file string, data []byte) error + + // GetFromContainerRunDirectory is a convenience function which reads + // the contents of the specified file relative to the container's run + // directory. + GetFromContainerRunDirectory(id, file string) ([]byte, error) + + // Lookup returns the ID of a layer, image, or container with the specified + // name or ID. + Lookup(name string) (string, error) + + // Shutdown attempts to free any kernel resources which are being used + // by the underlying driver. If "force" is true, any mounted (i.e., in + // use) layers are unmounted beforehand. If "force" is not true, then + // layers being in use is considered to be an error condition. A list + // of still-mounted layers is returned along with possible errors. + Shutdown(force bool) (layers []string, err error) + + // Version returns version information, in the form of key-value pairs, from + // the storage package. + Version() ([][2]string, error) +} + +// ImageOptions is used for passing options to a Store's CreateImage() method. +type ImageOptions struct { +} + +// ContainerOptions is used for passing options to a Store's CreateContainer() method. +type ContainerOptions struct { +} + +type store struct { + lastLoaded time.Time + runRoot string + graphLock Locker + graphRoot string + graphDriverName string + graphOptions []string + uidMap []idtools.IDMap + gidMap []idtools.IDMap + graphDriver drivers.Driver + layerStore LayerStore + imageStore ImageStore + containerStore ContainerStore +} + +// GetStore attempts to find an already-created Store object matching the +// specified location and graph driver, and if it can't, it creates and +// initializes a new Store object, and the underlying storage that it controls. +func GetStore(options StoreOptions) (Store, error) { + if options.RunRoot == "" && options.GraphRoot == "" && options.GraphDriverName == "" && len(options.GraphDriverOptions) == 0 { + options = DefaultStoreOptions + } + + if options.GraphRoot != "" { + options.GraphRoot = filepath.Clean(options.GraphRoot) + } + if options.RunRoot != "" { + options.RunRoot = filepath.Clean(options.RunRoot) + } + + storesLock.Lock() + defer storesLock.Unlock() + + for _, s := range stores { + if s.graphRoot == options.GraphRoot && (options.GraphDriverName == "" || s.graphDriverName == options.GraphDriverName) { + return s, nil + } + } + + if options.GraphRoot == "" { + return nil, ErrIncompleteOptions + } + if options.RunRoot == "" { + return nil, ErrIncompleteOptions + } + + if err := os.MkdirAll(options.RunRoot, 0700); err != nil && !os.IsExist(err) { + return nil, err + } + for _, subdir := range []string{} { + if err := os.MkdirAll(filepath.Join(options.RunRoot, subdir), 0700); err != nil && !os.IsExist(err) { + return nil, err + } + } + if err := os.MkdirAll(options.GraphRoot, 0700); err != nil && !os.IsExist(err) { + return nil, err + } + for _, subdir := range []string{"mounts", "tmp", options.GraphDriverName} { + if err := os.MkdirAll(filepath.Join(options.GraphRoot, subdir), 0700); err != nil && !os.IsExist(err) { + return nil, err + } + } + + graphLock, err := GetLockfile(filepath.Join(options.GraphRoot, "storage.lock")) + if err != nil { + return nil, err + } + s := &store{ + runRoot: options.RunRoot, + graphLock: graphLock, + graphRoot: options.GraphRoot, + graphDriverName: options.GraphDriverName, + graphOptions: options.GraphDriverOptions, + uidMap: copyIDMap(options.UidMap), + gidMap: copyIDMap(options.GidMap), + } + if err := s.load(); err != nil { + return nil, err + } + + stores = append(stores, s) + + return s, nil +} + +func copyIDMap(idmap []idtools.IDMap) []idtools.IDMap { + m := []idtools.IDMap{} + if idmap != nil { + m = make([]idtools.IDMap, len(idmap)) + copy(m, idmap) + } + if len(m) > 0 { + return m[:] + } + return nil +} + +func (s *store) GetRunRoot() string { + return s.runRoot +} + +func (s *store) GetGraphDriverName() string { + return s.graphDriverName +} + +func (s *store) GetGraphRoot() string { + return s.graphRoot +} + +func (s *store) GetGraphOptions() []string { + return s.graphOptions +} + +func (s *store) load() error { + driver, err := s.GetGraphDriver() + if err != nil { + return err + } + s.graphDriver = driver + s.graphDriverName = driver.String() + driverPrefix := s.graphDriverName + "-" + + rls, err := s.GetLayerStore() + if err != nil { + return err + } + s.layerStore = rls + + gipath := filepath.Join(s.graphRoot, driverPrefix+"images") + if err := os.MkdirAll(gipath, 0700); err != nil { + return err + } + ris, err := newImageStore(gipath) + if err != nil { + return err + } + s.imageStore = ris + gcpath := filepath.Join(s.graphRoot, driverPrefix+"containers") + if err := os.MkdirAll(gcpath, 0700); err != nil { + return err + } + rcs, err := newContainerStore(gcpath) + if err != nil { + return err + } + rcpath := filepath.Join(s.runRoot, driverPrefix+"containers") + if err := os.MkdirAll(rcpath, 0700); err != nil { + return err + } + s.containerStore = rcs + return nil +} + +func (s *store) getGraphDriver() (drivers.Driver, error) { + if s.graphDriver != nil { + return s.graphDriver, nil + } + driver, err := drivers.New(s.graphRoot, s.graphDriverName, s.graphOptions, s.uidMap, s.gidMap) + if err != nil { + return nil, err + } + s.graphDriver = driver + s.graphDriverName = driver.String() + return driver, nil +} + +func (s *store) GetGraphDriver() (drivers.Driver, error) { + s.graphLock.Lock() + defer s.graphLock.Unlock() + if s.graphLock.TouchedSince(s.lastLoaded) { + s.graphDriver = nil + s.layerStore = nil + s.lastLoaded = time.Now() + } + return s.getGraphDriver() +} + +func (s *store) GetLayerStore() (LayerStore, error) { + s.graphLock.Lock() + defer s.graphLock.Unlock() + if s.graphLock.TouchedSince(s.lastLoaded) { + s.graphDriver = nil + s.layerStore = nil + s.lastLoaded = time.Now() + } + if s.layerStore != nil { + return s.layerStore, nil + } + driver, err := s.getGraphDriver() + if err != nil { + return nil, err + } + driverPrefix := s.graphDriverName + "-" + rlpath := filepath.Join(s.runRoot, driverPrefix+"layers") + if err := os.MkdirAll(rlpath, 0700); err != nil { + return nil, err + } + glpath := filepath.Join(s.graphRoot, driverPrefix+"layers") + if err := os.MkdirAll(glpath, 0700); err != nil { + return nil, err + } + rls, err := newLayerStore(rlpath, glpath, driver) + if err != nil { + return nil, err + } + s.layerStore = rls + return s.layerStore, nil +} + +func (s *store) GetImageStore() (ImageStore, error) { + if s.imageStore != nil { + return s.imageStore, nil + } + return nil, ErrLoadError +} + +func (s *store) GetContainerStore() (ContainerStore, error) { + if s.containerStore != nil { + return s.containerStore, nil + } + return nil, ErrLoadError +} + +func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, diff archive.Reader) (*Layer, int64, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, -1, err + } + ristore, err := s.GetImageStore() + if err != nil { + return nil, -1, err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return nil, -1, err + } + + rlstore.Lock() + defer rlstore.Unlock() + defer rlstore.Touch() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + defer ristore.Touch() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + defer rcstore.Touch() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + if id == "" { + id = stringid.GenerateRandomID() + } + if parent != "" { + if l, err := rlstore.Get(parent); err == nil && l != nil { + parent = l.ID + } else { + return nil, -1, ErrLayerUnknown + } + containers, err := rcstore.Containers() + if err != nil { + return nil, -1, err + } + for _, container := range containers { + if container.LayerID == parent { + return nil, -1, ErrParentIsContainer + } + } + } + return rlstore.Put(id, parent, names, mountLabel, nil, writeable, nil, diff) +} + +func (s *store) CreateLayer(id, parent string, names []string, mountLabel string, writeable bool) (*Layer, error) { + layer, _, err := s.PutLayer(id, parent, names, mountLabel, writeable, nil) + return layer, err +} + +func (s *store) CreateImage(id string, names []string, layer, metadata string, options *ImageOptions) (*Image, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + ristore, err := s.GetImageStore() + if err != nil { + return nil, err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + defer ristore.Touch() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + defer rcstore.Touch() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + if id == "" { + id = stringid.GenerateRandomID() + } + + ilayer, err := rlstore.Get(layer) + if err != nil { + return nil, err + } + if ilayer == nil { + return nil, ErrLayerUnknown + } + layer = ilayer.ID + return ristore.Create(id, names, layer, metadata) +} + +func (s *store) CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + ristore, err := s.GetImageStore() + if err != nil { + return nil, err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + defer rlstore.Touch() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + defer rcstore.Touch() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + if id == "" { + id = stringid.GenerateRandomID() + } + + cimage, err := ristore.Get(image) + if err != nil { + return nil, err + } + if cimage == nil { + return nil, ErrImageUnknown + } + clayer, err := rlstore.Create(layer, cimage.TopLayer, nil, "", nil, true) + if err != nil { + return nil, err + } + layer = clayer.ID + container, err := rcstore.Create(id, names, cimage.ID, layer, metadata) + if err != nil || container == nil { + rlstore.Delete(layer) + } + return container, err +} + +func (s *store) SetMetadata(id, metadata string) error { + rlstore, err := s.GetLayerStore() + if err != nil { + return err + } + ristore, err := s.GetImageStore() + if err != nil { + return err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + if rlstore.Exists(id) { + defer rlstore.Touch() + return rlstore.SetMetadata(id, metadata) + } + if ristore.Exists(id) { + defer ristore.Touch() + return ristore.SetMetadata(id, metadata) + } + if rcstore.Exists(id) { + defer rcstore.Touch() + return rcstore.SetMetadata(id, metadata) + } + return ErrNotAnID +} + +func (s *store) GetMetadata(id string) (string, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return "", err + } + ristore, err := s.GetImageStore() + if err != nil { + return "", err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return "", err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + if rlstore.Exists(id) { + return rlstore.GetMetadata(id) + } + if ristore.Exists(id) { + return ristore.GetMetadata(id) + } + if rcstore.Exists(id) { + return rcstore.GetMetadata(id) + } + return "", ErrNotAnID +} + +func (s *store) ListImageBigData(id string) ([]string, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + ristore, err := s.GetImageStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + + return ristore.GetBigDataNames(id) +} + +func (s *store) GetImageBigDataSize(id, key string) (int64, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return -1, err + } + ristore, err := s.GetImageStore() + if err != nil { + return -1, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + + return ristore.GetBigDataSize(id, key) +} + +func (s *store) GetImageBigData(id, key string) ([]byte, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + ristore, err := s.GetImageStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + + return ristore.GetBigData(id, key) +} + +func (s *store) SetImageBigData(id, key string, data []byte) error { + rlstore, err := s.GetLayerStore() + if err != nil { + return err + } + ristore, err := s.GetImageStore() + if err != nil { + return err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + + return ristore.SetBigData(id, key, data) +} + +func (s *store) ListContainerBigData(id string) ([]string, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + ristore, err := s.GetImageStore() + if err != nil { + return nil, err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + return rcstore.GetBigDataNames(id) +} + +func (s *store) GetContainerBigDataSize(id, key string) (int64, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return -1, err + } + ristore, err := s.GetImageStore() + if err != nil { + return -1, err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return -1, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + return rcstore.GetBigDataSize(id, key) +} + +func (s *store) GetContainerBigData(id, key string) ([]byte, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + ristore, err := s.GetImageStore() + if err != nil { + return nil, err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + return rcstore.GetBigData(id, key) +} + +func (s *store) SetContainerBigData(id, key string, data []byte) error { + rlstore, err := s.GetLayerStore() + if err != nil { + return err + } + ristore, err := s.GetImageStore() + if err != nil { + return err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + return rcstore.SetBigData(id, key, data) +} + +func (s *store) Exists(id string) bool { + rcstore, err := s.GetContainerStore() + if err != nil { + return false + } + ristore, err := s.GetImageStore() + if err != nil { + return false + } + rlstore, err := s.GetLayerStore() + if err != nil { + return false + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + if rcstore.Exists(id) { + return true + } + if ristore.Exists(id) { + return true + } + return rlstore.Exists(id) +} + +func (s *store) SetNames(id string, names []string) error { + rcstore, err := s.GetContainerStore() + if err != nil { + return err + } + ristore, err := s.GetImageStore() + if err != nil { + return err + } + rlstore, err := s.GetLayerStore() + if err != nil { + return err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + deduped := []string{} + seen := make(map[string]bool) + for _, name := range names { + if _, wasSeen := seen[name]; !wasSeen { + seen[name] = true + deduped = append(deduped, name) + } + } + + if rlstore.Exists(id) { + return rlstore.SetNames(id, deduped) + } + if ristore.Exists(id) { + return ristore.SetNames(id, deduped) + } + if rcstore.Exists(id) { + return rcstore.SetNames(id, deduped) + } + return ErrLayerUnknown +} + +func (s *store) GetNames(id string) ([]string, error) { + rcstore, err := s.GetContainerStore() + if err != nil { + return nil, err + } + ristore, err := s.GetImageStore() + if err != nil { + return nil, err + } + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + if l, err := rlstore.Get(id); l != nil && err == nil { + return l.Names, nil + } + if i, err := ristore.Get(id); i != nil && err == nil { + return i.Names, nil + } + if c, err := rcstore.Get(id); c != nil && err == nil { + return c.Names, nil + } + return nil, ErrLayerUnknown +} + +func (s *store) Lookup(name string) (string, error) { + rcstore, err := s.GetContainerStore() + if err != nil { + return "", err + } + ristore, err := s.GetImageStore() + if err != nil { + return "", err + } + rlstore, err := s.GetLayerStore() + if err != nil { + return "", err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + if l, err := rlstore.Get(name); l != nil && err == nil { + return l.ID, nil + } + if i, err := ristore.Get(name); i != nil && err == nil { + return i.ID, nil + } + if c, err := rcstore.Get(name); c != nil && err == nil { + return c.ID, nil + } + return "", ErrLayerUnknown +} + +func (s *store) DeleteLayer(id string) error { + rlstore, err := s.GetLayerStore() + if err != nil { + return err + } + ristore, err := s.GetImageStore() + if err != nil { + return err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + if rlstore.Exists(id) { + defer rlstore.Touch() + defer rcstore.Touch() + if l, err := rlstore.Get(id); err != nil { + id = l.ID + } + layers, err := rlstore.Layers() + if err != nil { + return err + } + for _, layer := range layers { + if layer.Parent == id { + return ErrLayerHasChildren + } + } + images, err := ristore.Images() + if err != nil { + return err + } + for _, image := range images { + if image.TopLayer == id { + return ErrLayerUsedByImage + } + } + containers, err := rcstore.Containers() + if err != nil { + return err + } + for _, container := range containers { + if container.LayerID == id { + return ErrLayerUsedByContainer + } + } + return rlstore.Delete(id) + } else { + return ErrNotALayer + } +} + +func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + ristore, err := s.GetImageStore() + if err != nil { + return nil, err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + layersToRemove := []string{} + if ristore.Exists(id) { + image, err := ristore.Get(id) + if err != nil { + return nil, err + } + id = image.ID + defer rlstore.Touch() + defer ristore.Touch() + containers, err := rcstore.Containers() + if err != nil { + return nil, err + } + aContainerByImage := make(map[string]string) + for _, container := range containers { + aContainerByImage[container.ImageID] = container.ID + } + if _, ok := aContainerByImage[id]; ok { + return nil, ErrImageUsedByContainer + } + images, err := ristore.Images() + if err != nil { + return nil, err + } + layers, err := rlstore.Layers() + if err != nil { + return nil, err + } + childrenByParent := make(map[string]*[]string) + for _, layer := range layers { + parent := layer.Parent + if list, ok := childrenByParent[parent]; ok { + newList := append(*list, layer.ID) + childrenByParent[parent] = &newList + } else { + childrenByParent[parent] = &([]string{layer.ID}) + } + } + anyImageByTopLayer := make(map[string]string) + for _, img := range images { + if img.ID != id { + anyImageByTopLayer[img.TopLayer] = img.ID + } + } + if commit { + if err = ristore.Delete(id); err != nil { + return nil, err + } + } + layer := image.TopLayer + lastRemoved := "" + for layer != "" { + if rcstore.Exists(layer) { + break + } + if _, ok := anyImageByTopLayer[layer]; ok { + break + } + parent := "" + if l, err := rlstore.Get(layer); err == nil { + parent = l.Parent + } + otherRefs := 0 + if childList, ok := childrenByParent[layer]; ok && childList != nil { + children := *childList + for _, child := range children { + if child != lastRemoved { + otherRefs++ + } + } + } + if otherRefs != 0 { + break + } + lastRemoved = layer + layersToRemove = append(layersToRemove, lastRemoved) + layer = parent + } + } else { + return nil, ErrNotAnImage + } + if commit { + for _, layer := range layersToRemove { + if err = rlstore.Delete(layer); err != nil { + return nil, err + } + } + } + return layersToRemove, nil +} + +func (s *store) DeleteContainer(id string) error { + rlstore, err := s.GetLayerStore() + if err != nil { + return err + } + ristore, err := s.GetImageStore() + if err != nil { + return err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + if rcstore.Exists(id) { + defer rlstore.Touch() + defer rcstore.Touch() + if container, err := rcstore.Get(id); err == nil { + if rlstore.Exists(container.LayerID) { + if err = rlstore.Delete(container.LayerID); err != nil { + return err + } + if err = rcstore.Delete(id); err != nil { + return err + } + middleDir := s.graphDriverName + "-containers" + gcpath := filepath.Join(s.GetGraphRoot(), middleDir, container.ID) + if err = os.RemoveAll(gcpath); err != nil { + return err + } + rcpath := filepath.Join(s.GetRunRoot(), middleDir, container.ID) + if err = os.RemoveAll(rcpath); err != nil { + return err + } + return nil + } else { + return ErrNotALayer + } + } + } else { + return ErrNotAContainer + } + return nil +} + +func (s *store) Delete(id string) error { + rlstore, err := s.GetLayerStore() + if err != nil { + return err + } + ristore, err := s.GetImageStore() + if err != nil { + return err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + if rcstore.Exists(id) { + defer rlstore.Touch() + defer rcstore.Touch() + if container, err := rcstore.Get(id); err == nil { + if rlstore.Exists(container.LayerID) { + if err = rlstore.Delete(container.LayerID); err != nil { + return err + } + if err = rcstore.Delete(id); err != nil { + return err + } + middleDir := s.graphDriverName + "-containers" + gcpath := filepath.Join(s.GetGraphRoot(), middleDir, container.ID, "userdata") + if err = os.RemoveAll(gcpath); err != nil { + return err + } + rcpath := filepath.Join(s.GetRunRoot(), middleDir, container.ID, "userdata") + if err = os.RemoveAll(rcpath); err != nil { + return err + } + return nil + } else { + return ErrNotALayer + } + } + } + if ristore.Exists(id) { + defer ristore.Touch() + return ristore.Delete(id) + } + if rlstore.Exists(id) { + defer rlstore.Touch() + return rlstore.Delete(id) + } + return ErrLayerUnknown +} + +func (s *store) Wipe() error { + rcstore, err := s.GetContainerStore() + if err != nil { + return err + } + ristore, err := s.GetImageStore() + if err != nil { + return err + } + rlstore, err := s.GetLayerStore() + if err != nil { + return err + } + + rlstore.Lock() + defer rlstore.Unlock() + defer rlstore.Touch() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + defer ristore.Touch() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + defer rcstore.Touch() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + if err = rcstore.Wipe(); err != nil { + return err + } + if err = ristore.Wipe(); err != nil { + return err + } + return rlstore.Wipe() +} + +func (s *store) Status() ([][2]string, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + return rlstore.Status() +} + +func (s *store) Version() ([][2]string, error) { + return [][2]string{ + {"GitCommit", storageversion.GitCommit}, + {"Version", storageversion.Version}, + {"BuildTime", storageversion.BuildTime}, + }, nil +} + +func (s *store) Mount(id, mountLabel string) (string, error) { + rcstore, err := s.GetContainerStore() + if err != nil { + return "", err + } + rlstore, err := s.GetLayerStore() + if err != nil { + return "", err + } + + rlstore.Lock() + defer rlstore.Unlock() + defer rlstore.Touch() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + if c, err := rcstore.Get(id); c != nil && err == nil { + id = c.LayerID + } + return rlstore.Mount(id, mountLabel) +} + +func (s *store) Unmount(id string) error { + rcstore, err := s.GetContainerStore() + if err != nil { + return err + } + rlstore, err := s.GetLayerStore() + if err != nil { + return err + } + + rlstore.Lock() + defer rlstore.Unlock() + defer rlstore.Touch() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + if c, err := rcstore.Get(id); c != nil && err == nil { + id = c.LayerID + } + return rlstore.Unmount(id) +} + +func (s *store) Changes(from, to string) ([]archive.Change, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + + return rlstore.Changes(from, to) +} + +func (s *store) DiffSize(from, to string) (int64, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return -1, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + + return rlstore.DiffSize(from, to) +} + +func (s *store) Diff(from, to string) (io.ReadCloser, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + + return rlstore.Diff(from, to) +} + +func (s *store) ApplyDiff(to string, diff archive.Reader) (int64, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return -1, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + + return rlstore.ApplyDiff(to, diff) +} + +func (s *store) Layers() ([]Layer, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + + return rlstore.Layers() +} + +func (s *store) Images() ([]Image, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + ristore, err := s.GetImageStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + + return ristore.Images() +} + +func (s *store) Containers() ([]Container, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + ristore, err := s.GetImageStore() + if err != nil { + return nil, err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + return rcstore.Containers() +} + +func (s *store) GetLayer(id string) (*Layer, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + + return rlstore.Get(id) +} + +func (s *store) GetImage(id string) (*Image, error) { + ristore, err := s.GetImageStore() + if err != nil { + return nil, err + } + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + + return ristore.Get(id) +} + +func (s *store) GetImagesByTopLayer(id string) ([]*Image, error) { + ristore, err := s.GetImageStore() + if err != nil { + return nil, err + } + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + + layer, err := rlstore.Get(id) + if err != nil { + return nil, err + } + images := []*Image{} + imageList, err := ristore.Images() + if err != nil { + return nil, err + } + for _, image := range imageList { + if image.TopLayer == layer.ID { + images = append(images, &image) + } + } + + return images, nil +} + +func (s *store) GetContainer(id string) (*Container, error) { + ristore, err := s.GetImageStore() + if err != nil { + return nil, err + } + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + return rcstore.Get(id) +} + +func (s *store) GetContainerByLayer(id string) (*Container, error) { + ristore, err := s.GetImageStore() + if err != nil { + return nil, err + } + rlstore, err := s.GetLayerStore() + if err != nil { + return nil, err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return nil, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + layer, err := rlstore.Get(id) + if err != nil { + return nil, err + } + containerList, err := rcstore.Containers() + if err != nil { + return nil, err + } + for _, container := range containerList { + if container.LayerID == layer.ID { + return &container, nil + } + } + + return nil, ErrContainerUnknown +} + +func (s *store) GetContainerDirectory(id string) (string, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return "", err + } + ristore, err := s.GetImageStore() + if err != nil { + return "", err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return "", err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + id, err = rcstore.Lookup(id) + if err != nil { + return "", err + } + + middleDir := s.graphDriverName + "-containers" + gcpath := filepath.Join(s.GetGraphRoot(), middleDir, id, "userdata") + if err := os.MkdirAll(gcpath, 0700); err != nil { + return "", err + } + return gcpath, nil +} + +func (s *store) GetContainerRunDirectory(id string) (string, error) { + rlstore, err := s.GetLayerStore() + if err != nil { + return "", err + } + ristore, err := s.GetImageStore() + if err != nil { + return "", err + } + rcstore, err := s.GetContainerStore() + if err != nil { + return "", err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + ristore.Lock() + defer ristore.Unlock() + if modified, err := ristore.Modified(); modified || err != nil { + ristore.Load() + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + id, err = rcstore.Lookup(id) + if err != nil { + return "", err + } + + middleDir := s.graphDriverName + "-containers" + rcpath := filepath.Join(s.GetRunRoot(), middleDir, id, "userdata") + if err := os.MkdirAll(rcpath, 0700); err != nil { + return "", err + } + return rcpath, nil +} + +func (s *store) SetContainerDirectoryFile(id, file string, data []byte) error { + dir, err := s.GetContainerDirectory(id) + if err != nil { + return err + } + err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0700) + if err != nil { + return err + } + return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0600) +} + +func (s *store) GetFromContainerDirectory(id, file string) ([]byte, error) { + dir, err := s.GetContainerDirectory(id) + if err != nil { + return nil, err + } + return ioutil.ReadFile(filepath.Join(dir, file)) +} + +func (s *store) SetContainerRunDirectoryFile(id, file string, data []byte) error { + dir, err := s.GetContainerRunDirectory(id) + if err != nil { + return err + } + err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0700) + if err != nil { + return err + } + return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0600) +} + +func (s *store) GetFromContainerRunDirectory(id, file string) ([]byte, error) { + dir, err := s.GetContainerRunDirectory(id) + if err != nil { + return nil, err + } + return ioutil.ReadFile(filepath.Join(dir, file)) +} + +func (s *store) Shutdown(force bool) ([]string, error) { + mounted := []string{} + modified := false + + rlstore, err := s.GetLayerStore() + if err != nil { + return mounted, err + } + + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + + s.graphLock.Lock() + defer s.graphLock.Unlock() + layers, err := rlstore.Layers() + if err != nil { + return mounted, err + } + for _, layer := range layers { + if layer.MountCount == 0 { + continue + } + mounted = append(mounted, layer.ID) + if force { + for layer.MountCount > 0 { + err2 := rlstore.Unmount(layer.ID) + if err2 != nil { + if err == nil { + err = err2 + } + break + } + modified = true + } + } + } + if len(mounted) > 0 && err == nil { + err = ErrLayerUsedByContainer + } + if err == nil { + err = s.graphDriver.Cleanup() + s.graphLock.Touch() + modified = true + } + if modified { + rlstore.Touch() + } + return mounted, err +} + +// Convert a BigData key name into an acceptable file name. +func makeBigDataBaseName(key string) string { + reader := strings.NewReader(key) + for reader.Len() > 0 { + ch, size, err := reader.ReadRune() + if err != nil || size != 1 { + break + } + if ch != '.' && !(ch >= '0' && ch <= '9') && !(ch >= 'a' && ch <= 'z') { + break + } + } + if reader.Len() > 0 { + return "=" + base64.StdEncoding.EncodeToString([]byte(key)) + } + return key +} + +func init() { + DefaultStoreOptions.RunRoot = "/var/run/containers" + DefaultStoreOptions.GraphRoot = "/var/lib/containers" + DefaultStoreOptions.GraphDriverName = os.Getenv("STORAGE_DRIVER") + DefaultStoreOptions.GraphDriverOptions = strings.Split(os.Getenv("STORAGE_OPTS"), ",") + if len(DefaultStoreOptions.GraphDriverOptions) == 1 && DefaultStoreOptions.GraphDriverOptions[0] == "" { + DefaultStoreOptions.GraphDriverOptions = nil + } +} diff --git a/vendor/src/github.com/containers/storage/storageversion/version_lib.go b/vendor/src/github.com/containers/storage/storageversion/version_lib.go new file mode 100644 index 00000000..761868e3 --- /dev/null +++ b/vendor/src/github.com/containers/storage/storageversion/version_lib.go @@ -0,0 +1,13 @@ +// +build !autogen + +// Package storageversion is auto-generated at build-time +package storageversion + +// Default build-time variable for library-import. +// This file is overridden on build with build-time informations. +const ( + GitCommit string = "library-import" + Version string = "library-import" + BuildTime string = "library-import" + IAmStatic string = "library-import" +) diff --git a/vendor/src/github.com/docker/distribution/CHANGELOG.md b/vendor/src/github.com/docker/distribution/CHANGELOG.md index d9635264..dce49207 100644 --- a/vendor/src/github.com/docker/distribution/CHANGELOG.md +++ b/vendor/src/github.com/docker/distribution/CHANGELOG.md @@ -39,7 +39,7 @@ - Changes the client Tags `All()` method to follow links - Allow registry clients to connect via HTTP2 - Better handling of OAuth errors in client - + #### Spec - Manifest: clarify relationship between urls and foreign layers diff --git a/vendor/src/github.com/docker/distribution/RELEASE-CHECKLIST.md b/vendor/src/github.com/docker/distribution/RELEASE-CHECKLIST.md index d43ad013..49235cec 100644 --- a/vendor/src/github.com/docker/distribution/RELEASE-CHECKLIST.md +++ b/vendor/src/github.com/docker/distribution/RELEASE-CHECKLIST.md @@ -17,7 +17,7 @@ You will need PGP installed and a PGP key which has been added to your Github ac 50. Push the signed tag -60. Create a new [release](https://github.com/docker/distribution/releases). In the case of a release candidate, tick the `pre-release` checkbox. +60. Create a new [release](https://github.com/docker/distribution/releases). In the case of a release candidate, tick the `pre-release` checkbox. 70. Update the registry binary in [distribution library image repo](https://github.com/docker/distribution-library-image) by running the update script and opening a pull request. @@ -33,3 +33,4 @@ e.g. to release `2.3.1` `2.3 -> 2.3.1` 90. Build a new distribution/registry image on [Docker hub](https://hub.docker.com/u/distribution/dashboard) by adding a new automated build with the new tag and re-building the images. + diff --git a/vendor/src/github.com/docker/distribution/registry/api/errcode/errors.go b/vendor/src/github.com/docker/distribution/registry/api/errcode/errors.go new file mode 100644 index 00000000..6d9bb4b6 --- /dev/null +++ b/vendor/src/github.com/docker/distribution/registry/api/errcode/errors.go @@ -0,0 +1,267 @@ +package errcode + +import ( + "encoding/json" + "fmt" + "strings" +) + +// ErrorCoder is the base interface for ErrorCode and Error allowing +// users of each to just call ErrorCode to get the real ID of each +type ErrorCoder interface { + ErrorCode() ErrorCode +} + +// ErrorCode represents the error type. The errors are serialized via strings +// and the integer format may change and should *never* be exported. +type ErrorCode int + +var _ error = ErrorCode(0) + +// ErrorCode just returns itself +func (ec ErrorCode) ErrorCode() ErrorCode { + return ec +} + +// Error returns the ID/Value +func (ec ErrorCode) Error() string { + // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. + return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) +} + +// Descriptor returns the descriptor for the error code. +func (ec ErrorCode) Descriptor() ErrorDescriptor { + d, ok := errorCodeToDescriptors[ec] + + if !ok { + return ErrorCodeUnknown.Descriptor() + } + + return d +} + +// String returns the canonical identifier for this error code. +func (ec ErrorCode) String() string { + return ec.Descriptor().Value +} + +// Message returned the human-readable error message for this error code. +func (ec ErrorCode) Message() string { + return ec.Descriptor().Message +} + +// MarshalText encodes the receiver into UTF-8-encoded text and returns the +// result. +func (ec ErrorCode) MarshalText() (text []byte, err error) { + return []byte(ec.String()), nil +} + +// UnmarshalText decodes the form generated by MarshalText. +func (ec *ErrorCode) UnmarshalText(text []byte) error { + desc, ok := idToDescriptors[string(text)] + + if !ok { + desc = ErrorCodeUnknown.Descriptor() + } + + *ec = desc.Code + + return nil +} + +// WithMessage creates a new Error struct based on the passed-in info and +// overrides the Message property. +func (ec ErrorCode) WithMessage(message string) Error { + return Error{ + Code: ec, + Message: message, + } +} + +// WithDetail creates a new Error struct based on the passed-in info and +// set the Detail property appropriately +func (ec ErrorCode) WithDetail(detail interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithDetail(detail) +} + +// WithArgs creates a new Error struct and sets the Args slice +func (ec ErrorCode) WithArgs(args ...interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithArgs(args...) +} + +// Error provides a wrapper around ErrorCode with extra Details provided. +type Error struct { + Code ErrorCode `json:"code"` + Message string `json:"message"` + Detail interface{} `json:"detail,omitempty"` + + // TODO(duglin): See if we need an "args" property so we can do the + // variable substitution right before showing the message to the user +} + +var _ error = Error{} + +// ErrorCode returns the ID/Value of this Error +func (e Error) ErrorCode() ErrorCode { + return e.Code +} + +// Error returns a human readable representation of the error. +func (e Error) Error() string { + return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) +} + +// WithDetail will return a new Error, based on the current one, but with +// some Detail info added +func (e Error) WithDetail(detail interface{}) Error { + return Error{ + Code: e.Code, + Message: e.Message, + Detail: detail, + } +} + +// WithArgs uses the passed-in list of interface{} as the substitution +// variables in the Error's Message string, but returns a new Error +func (e Error) WithArgs(args ...interface{}) Error { + return Error{ + Code: e.Code, + Message: fmt.Sprintf(e.Code.Message(), args...), + Detail: e.Detail, + } +} + +// ErrorDescriptor provides relevant information about a given error code. +type ErrorDescriptor struct { + // Code is the error code that this descriptor describes. + Code ErrorCode + + // Value provides a unique, string key, often captilized with + // underscores, to identify the error code. This value is used as the + // keyed value when serializing api errors. + Value string + + // Message is a short, human readable decription of the error condition + // included in API responses. + Message string + + // Description provides a complete account of the errors purpose, suitable + // for use in documentation. + Description string + + // HTTPStatusCode provides the http status code that is associated with + // this error condition. + HTTPStatusCode int +} + +// ParseErrorCode returns the value by the string error code. +// `ErrorCodeUnknown` will be returned if the error is not known. +func ParseErrorCode(value string) ErrorCode { + ed, ok := idToDescriptors[value] + if ok { + return ed.Code + } + + return ErrorCodeUnknown +} + +// Errors provides the envelope for multiple errors and a few sugar methods +// for use within the application. +type Errors []error + +var _ error = Errors{} + +func (errs Errors) Error() string { + switch len(errs) { + case 0: + return "" + case 1: + return errs[0].Error() + default: + msg := "errors:\n" + for _, err := range errs { + msg += err.Error() + "\n" + } + return msg + } +} + +// Len returns the current number of errors. +func (errs Errors) Len() int { + return len(errs) +} + +// MarshalJSON converts slice of error, ErrorCode or Error into a +// slice of Error - then serializes +func (errs Errors) MarshalJSON() ([]byte, error) { + var tmpErrs struct { + Errors []Error `json:"errors,omitempty"` + } + + for _, daErr := range errs { + var err Error + + switch daErr.(type) { + case ErrorCode: + err = daErr.(ErrorCode).WithDetail(nil) + case Error: + err = daErr.(Error) + default: + err = ErrorCodeUnknown.WithDetail(daErr) + + } + + // If the Error struct was setup and they forgot to set the + // Message field (meaning its "") then grab it from the ErrCode + msg := err.Message + if msg == "" { + msg = err.Code.Message() + } + + tmpErrs.Errors = append(tmpErrs.Errors, Error{ + Code: err.Code, + Message: msg, + Detail: err.Detail, + }) + } + + return json.Marshal(tmpErrs) +} + +// UnmarshalJSON deserializes []Error and then converts it into slice of +// Error or ErrorCode +func (errs *Errors) UnmarshalJSON(data []byte) error { + var tmpErrs struct { + Errors []Error + } + + if err := json.Unmarshal(data, &tmpErrs); err != nil { + return err + } + + var newErrs Errors + for _, daErr := range tmpErrs.Errors { + // If Message is empty or exactly matches the Code's message string + // then just use the Code, no need for a full Error struct + if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { + // Error's w/o details get converted to ErrorCode + newErrs = append(newErrs, daErr.Code) + } else { + // Error's w/ details are untouched + newErrs = append(newErrs, Error{ + Code: daErr.Code, + Message: daErr.Message, + Detail: daErr.Detail, + }) + } + } + + *errs = newErrs + return nil +} diff --git a/vendor/src/github.com/docker/distribution/registry/api/errcode/handler.go b/vendor/src/github.com/docker/distribution/registry/api/errcode/handler.go new file mode 100644 index 00000000..49a64a86 --- /dev/null +++ b/vendor/src/github.com/docker/distribution/registry/api/errcode/handler.go @@ -0,0 +1,44 @@ +package errcode + +import ( + "encoding/json" + "net/http" +) + +// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err +// and sets the content-type header to 'application/json'. It will handle +// ErrorCoder and Errors, and if necessary will create an envelope. +func ServeJSON(w http.ResponseWriter, err error) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + var sc int + + switch errs := err.(type) { + case Errors: + if len(errs) < 1 { + break + } + + if err, ok := errs[0].(ErrorCoder); ok { + sc = err.ErrorCode().Descriptor().HTTPStatusCode + } + case ErrorCoder: + sc = errs.ErrorCode().Descriptor().HTTPStatusCode + err = Errors{err} // create an envelope. + default: + // We just have an unhandled error type, so just place in an envelope + // and move along. + err = Errors{err} + } + + if sc == 0 { + sc = http.StatusInternalServerError + } + + w.WriteHeader(sc) + + if err := json.NewEncoder(w).Encode(err); err != nil { + return err + } + + return nil +} diff --git a/vendor/src/github.com/docker/distribution/registry/api/errcode/register.go b/vendor/src/github.com/docker/distribution/registry/api/errcode/register.go new file mode 100644 index 00000000..d1e8826c --- /dev/null +++ b/vendor/src/github.com/docker/distribution/registry/api/errcode/register.go @@ -0,0 +1,138 @@ +package errcode + +import ( + "fmt" + "net/http" + "sort" + "sync" +) + +var ( + errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} + idToDescriptors = map[string]ErrorDescriptor{} + groupToDescriptors = map[string][]ErrorDescriptor{} +) + +var ( + // ErrorCodeUnknown is a generic error that can be used as a last + // resort if there is no situation-specific error message that can be used + ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ + Value: "UNKNOWN", + Message: "unknown error", + Description: `Generic error returned when the error does not have an + API classification.`, + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeUnsupported is returned when an operation is not supported. + ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ + Value: "UNSUPPORTED", + Message: "The operation is unsupported.", + Description: `The operation was unsupported due to a missing + implementation or invalid set of parameters.`, + HTTPStatusCode: http.StatusMethodNotAllowed, + }) + + // ErrorCodeUnauthorized is returned if a request requires + // authentication. + ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ + Value: "UNAUTHORIZED", + Message: "authentication required", + Description: `The access controller was unable to authenticate + the client. Often this will be accompanied by a + Www-Authenticate HTTP response header indicating how to + authenticate.`, + HTTPStatusCode: http.StatusUnauthorized, + }) + + // ErrorCodeDenied is returned if a client does not have sufficient + // permission to perform an action. + ErrorCodeDenied = Register("errcode", ErrorDescriptor{ + Value: "DENIED", + Message: "requested access to the resource is denied", + Description: `The access controller denied access for the + operation on a resource.`, + HTTPStatusCode: http.StatusForbidden, + }) + + // ErrorCodeUnavailable provides a common error to report unavailability + // of a service or endpoint. + ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ + Value: "UNAVAILABLE", + Message: "service unavailable", + Description: "Returned when a service is not available", + HTTPStatusCode: http.StatusServiceUnavailable, + }) + + // ErrorCodeTooManyRequests is returned if a client attempts too many + // times to contact a service endpoint. + ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{ + Value: "TOOMANYREQUESTS", + Message: "too many requests", + Description: `Returned when a client attempts to contact a + service too many times`, + HTTPStatusCode: http.StatusTooManyRequests, + }) +) + +var nextCode = 1000 +var registerLock sync.Mutex + +// Register will make the passed-in error known to the environment and +// return a new ErrorCode +func Register(group string, descriptor ErrorDescriptor) ErrorCode { + registerLock.Lock() + defer registerLock.Unlock() + + descriptor.Code = ErrorCode(nextCode) + + if _, ok := idToDescriptors[descriptor.Value]; ok { + panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) + } + if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { + panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) + } + + groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) + errorCodeToDescriptors[descriptor.Code] = descriptor + idToDescriptors[descriptor.Value] = descriptor + + nextCode++ + return descriptor.Code +} + +type byValue []ErrorDescriptor + +func (a byValue) Len() int { return len(a) } +func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } + +// GetGroupNames returns the list of Error group names that are registered +func GetGroupNames() []string { + keys := []string{} + + for k := range groupToDescriptors { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// GetErrorCodeGroup returns the named group of error descriptors +func GetErrorCodeGroup(name string) []ErrorDescriptor { + desc := groupToDescriptors[name] + sort.Sort(byValue(desc)) + return desc +} + +// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are +// registered, irrespective of what group they're in +func GetErrorAllDescriptors() []ErrorDescriptor { + result := []ErrorDescriptor{} + + for _, group := range GetGroupNames() { + result = append(result, GetErrorCodeGroup(group)...) + } + sort.Sort(byValue(result)) + return result +} diff --git a/vendor/src/github.com/docker/distribution/registry/api/v2/descriptors.go b/vendor/src/github.com/docker/distribution/registry/api/v2/descriptors.go new file mode 100644 index 00000000..9979abae --- /dev/null +++ b/vendor/src/github.com/docker/distribution/registry/api/v2/descriptors.go @@ -0,0 +1,1596 @@ +package v2 + +import ( + "net/http" + "regexp" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" +) + +var ( + nameParameterDescriptor = ParameterDescriptor{ + Name: "name", + Type: "string", + Format: reference.NameRegexp.String(), + Required: true, + Description: `Name of the target repository.`, + } + + referenceParameterDescriptor = ParameterDescriptor{ + Name: "reference", + Type: "string", + Format: reference.TagRegexp.String(), + Required: true, + Description: `Tag or digest of the target manifest.`, + } + + uuidParameterDescriptor = ParameterDescriptor{ + Name: "uuid", + Type: "opaque", + Required: true, + Description: "A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.", + } + + digestPathParameter = ParameterDescriptor{ + Name: "digest", + Type: "path", + Required: true, + Format: digest.DigestRegexp.String(), + Description: `Digest of desired blob.`, + } + + hostHeader = ParameterDescriptor{ + Name: "Host", + Type: "string", + Description: "Standard HTTP Host Header. Should be set to the registry host.", + Format: "", + Examples: []string{"registry-1.docker.io"}, + } + + authHeader = ParameterDescriptor{ + Name: "Authorization", + Type: "string", + Description: "An RFC7235 compliant authorization header.", + Format: " ", + Examples: []string{"Bearer dGhpcyBpcyBhIGZha2UgYmVhcmVyIHRva2VuIQ=="}, + } + + authChallengeHeader = ParameterDescriptor{ + Name: "WWW-Authenticate", + Type: "string", + Description: "An RFC7235 compliant authentication challenge header.", + Format: ` realm="", ..."`, + Examples: []string{ + `Bearer realm="https://auth.docker.com/", service="registry.docker.com", scopes="repository:library/ubuntu:pull"`, + }, + } + + contentLengthZeroHeader = ParameterDescriptor{ + Name: "Content-Length", + Description: "The `Content-Length` header must be zero and the body must be empty.", + Type: "integer", + Format: "0", + } + + dockerUploadUUIDHeader = ParameterDescriptor{ + Name: "Docker-Upload-UUID", + Description: "Identifies the docker upload uuid for the current request.", + Type: "uuid", + Format: "", + } + + digestHeader = ParameterDescriptor{ + Name: "Docker-Content-Digest", + Description: "Digest of the targeted content for the request.", + Type: "digest", + Format: "", + } + + linkHeader = ParameterDescriptor{ + Name: "Link", + Type: "link", + Description: "RFC5988 compliant rel='next' with URL to next result set, if available", + Format: `<?n=&last=>; rel="next"`, + } + + paginationParameters = []ParameterDescriptor{ + { + Name: "n", + Type: "integer", + Description: "Limit the number of entries in each response. It not present, all entries will be returned.", + Format: "", + Required: false, + }, + { + Name: "last", + Type: "string", + Description: "Result set will include values lexically after last.", + Format: "", + Required: false, + }, + } + + unauthorizedResponseDescriptor = ResponseDescriptor{ + Name: "Authentication Required", + StatusCode: http.StatusUnauthorized, + Description: "The client is not authenticated.", + Headers: []ParameterDescriptor{ + authChallengeHeader, + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnauthorized, + }, + } + + repositoryNotFoundResponseDescriptor = ResponseDescriptor{ + Name: "No Such Repository Error", + StatusCode: http.StatusNotFound, + Description: "The repository is not known to the registry.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + }, + } + + deniedResponseDescriptor = ResponseDescriptor{ + Name: "Access Denied", + StatusCode: http.StatusForbidden, + Description: "The client does not have required access to the repository.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeDenied, + }, + } + + tooManyRequestsDescriptor = ResponseDescriptor{ + Name: "Too Many Requests", + StatusCode: http.StatusTooManyRequests, + Description: "The client made too many requests within a time interval.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeTooManyRequests, + }, + } +) + +const ( + manifestBody = `{ + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": "" + }, + ... + ] + ], + "history": , + "signature": +}` + + errorsBody = `{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +}` +) + +// APIDescriptor exports descriptions of the layout of the v2 registry API. +var APIDescriptor = struct { + // RouteDescriptors provides a list of the routes available in the API. + RouteDescriptors []RouteDescriptor +}{ + RouteDescriptors: routeDescriptors, +} + +// RouteDescriptor describes a route specified by name. +type RouteDescriptor struct { + // Name is the name of the route, as specified in RouteNameXXX exports. + // These names a should be considered a unique reference for a route. If + // the route is registered with gorilla, this is the name that will be + // used. + Name string + + // Path is a gorilla/mux-compatible regexp that can be used to match the + // route. For any incoming method and path, only one route descriptor + // should match. + Path string + + // Entity should be a short, human-readalbe description of the object + // targeted by the endpoint. + Entity string + + // Description should provide an accurate overview of the functionality + // provided by the route. + Description string + + // Methods should describe the various HTTP methods that may be used on + // this route, including request and response formats. + Methods []MethodDescriptor +} + +// MethodDescriptor provides a description of the requests that may be +// conducted with the target method. +type MethodDescriptor struct { + + // Method is an HTTP method, such as GET, PUT or POST. + Method string + + // Description should provide an overview of the functionality provided by + // the covered method, suitable for use in documentation. Use of markdown + // here is encouraged. + Description string + + // Requests is a slice of request descriptors enumerating how this + // endpoint may be used. + Requests []RequestDescriptor +} + +// RequestDescriptor covers a particular set of headers and parameters that +// can be carried out with the parent method. Its most helpful to have one +// RequestDescriptor per API use case. +type RequestDescriptor struct { + // Name provides a short identifier for the request, usable as a title or + // to provide quick context for the particular request. + Name string + + // Description should cover the requests purpose, covering any details for + // this particular use case. + Description string + + // Headers describes headers that must be used with the HTTP request. + Headers []ParameterDescriptor + + // PathParameters enumerate the parameterized path components for the + // given request, as defined in the route's regular expression. + PathParameters []ParameterDescriptor + + // QueryParameters provides a list of query parameters for the given + // request. + QueryParameters []ParameterDescriptor + + // Body describes the format of the request body. + Body BodyDescriptor + + // Successes enumerates the possible responses that are considered to be + // the result of a successful request. + Successes []ResponseDescriptor + + // Failures covers the possible failures from this particular request. + Failures []ResponseDescriptor +} + +// ResponseDescriptor describes the components of an API response. +type ResponseDescriptor struct { + // Name provides a short identifier for the response, usable as a title or + // to provide quick context for the particular response. + Name string + + // Description should provide a brief overview of the role of the + // response. + Description string + + // StatusCode specifies the status received by this particular response. + StatusCode int + + // Headers covers any headers that may be returned from the response. + Headers []ParameterDescriptor + + // Fields describes any fields that may be present in the response. + Fields []ParameterDescriptor + + // ErrorCodes enumerates the error codes that may be returned along with + // the response. + ErrorCodes []errcode.ErrorCode + + // Body describes the body of the response, if any. + Body BodyDescriptor +} + +// BodyDescriptor describes a request body and its expected content type. For +// the most part, it should be example json or some placeholder for body +// data in documentation. +type BodyDescriptor struct { + ContentType string + Format string +} + +// ParameterDescriptor describes the format of a request parameter, which may +// be a header, path parameter or query parameter. +type ParameterDescriptor struct { + // Name is the name of the parameter, either of the path component or + // query parameter. + Name string + + // Type specifies the type of the parameter, such as string, integer, etc. + Type string + + // Description provides a human-readable description of the parameter. + Description string + + // Required means the field is required when set. + Required bool + + // Format is a specifying the string format accepted by this parameter. + Format string + + // Regexp is a compiled regular expression that can be used to validate + // the contents of the parameter. + Regexp *regexp.Regexp + + // Examples provides multiple examples for the values that might be valid + // for this parameter. + Examples []string +} + +var routeDescriptors = []RouteDescriptor{ + { + Name: RouteNameBase, + Path: "/v2/", + Entity: "Base", + Description: `Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authentication.`, + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Check that the endpoint implements Docker Registry API V2.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + Successes: []ResponseDescriptor{ + { + Description: "The API implements V2 protocol and is accessible.", + StatusCode: http.StatusOK, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "The registry does not implement the V2 API.", + StatusCode: http.StatusNotFound, + }, + unauthorizedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + }, + }, + { + Name: RouteNameTags, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/tags/list", + Entity: "Tags", + Description: "Retrieve information about tags.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Fetch the tags under the repository identified by `name`.", + Requests: []RequestDescriptor{ + { + Name: "Tags", + Description: "Return all tags for the repository", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Description: "A list of tags for the named repository.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "name": , + "tags": [ + , + ... + ] +}`, + }, + }, + }, + Failures: []ResponseDescriptor{ + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Tags Paginated", + Description: "Return a portion of the tags for the specified repository.", + PathParameters: []ParameterDescriptor{nameParameterDescriptor}, + QueryParameters: paginationParameters, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Description: "A list of tags for the named repository.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + linkHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "name": , + "tags": [ + , + ... + ], +}`, + }, + }, + }, + Failures: []ResponseDescriptor{ + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + }, + }, + { + Name: RouteNameManifest, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/manifests/{reference:" + reference.TagRegexp.String() + "|" + digest.DigestRegexp.String() + "}", + Entity: "Manifest", + Description: "Create, update, delete and retrieve manifests.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + referenceParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Description: "The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image.", + StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + digestHeader, + }, + Body: BodyDescriptor{ + ContentType: "", + Format: manifestBody, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "The name or reference was invalid.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "PUT", + Description: "Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + referenceParameterDescriptor, + }, + Body: BodyDescriptor{ + ContentType: "", + Format: manifestBody, + }, + Successes: []ResponseDescriptor{ + { + Description: "The manifest has been accepted by the registry and is stored under the specified `name` and `tag`.", + StatusCode: http.StatusCreated, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Description: "The canonical location url of the uploaded manifest.", + Format: "", + }, + contentLengthZeroHeader, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Manifest", + Description: "The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request.", + StatusCode: http.StatusBadRequest, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + ErrorCodeManifestInvalid, + ErrorCodeManifestUnverified, + ErrorCodeBlobUnknown, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + { + Name: "Missing Layer(s)", + Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "errors:" [{ + "code": "BLOB_UNKNOWN", + "message": "blob unknown to registry", + "detail": { + "digest": "" + } + }, + ... + ] +}`, + }, + }, + { + Name: "Not allowed", + Description: "Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + }, + }, + }, + }, + { + Method: "DELETE", + Description: "Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + referenceParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusAccepted, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Reference", + Description: "The specified `name` or `reference` were invalid and the delete was unable to proceed.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + { + Name: "Unknown Manifest", + Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeManifestUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Name: "Not allowed", + Description: "Manifest delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled.", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + }, + }, + }, + }, + }, + }, + + { + Name: RouteNameBlob, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}", + Entity: "Blob", + Description: "Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", + Requests: []RequestDescriptor{ + { + Name: "Fetch Blob", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob identified by `digest` is available. The blob content will be present in the body of the request.", + StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "The length of the requested blob content.", + Format: "", + }, + digestHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + }, + { + Description: "The blob identified by `digest` is available at the provided location.", + StatusCode: http.StatusTemporaryRedirect, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Description: "The location where the layer should be accessible.", + Format: "", + }, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeDigestInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", + StatusCode: http.StatusNotFound, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Fetch Blob Part", + Description: "This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Range", + Type: "string", + Description: "HTTP Range header specifying blob chunk.", + Format: "bytes=-", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request.", + StatusCode: http.StatusPartialContent, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "The length of the requested blob chunk.", + Format: "", + }, + { + Name: "Content-Range", + Type: "byte range", + Description: "Content range of blob chunk.", + Format: "bytes -/", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeDigestInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content.", + StatusCode: http.StatusRequestedRangeNotSatisfiable, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "DELETE", + Description: "Delete the blob identified by `name` and `digest`", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusAccepted, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "0", + Format: "0", + }, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + { + Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", + StatusCode: http.StatusNotFound, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + }, + { + Description: "Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled", + StatusCode: http.StatusMethodNotAllowed, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + + // TODO(stevvooe): We may want to add a PUT request here to + // kickoff an upload of a blob, integrated with the blob upload + // API. + }, + }, + + { + Name: RouteNameBlobUpload, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/", + Entity: "Initiate Blob Upload", + Description: "Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads.", + Methods: []MethodDescriptor{ + { + Method: "POST", + Description: "Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request.", + Requests: []RequestDescriptor{ + { + Name: "Initiate Monolithic Blob Upload", + Description: "Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Length", + Type: "integer", + Format: "", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "digest", + Type: "query", + Format: "", + Regexp: digest.DigestRegexp, + Description: `Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.`, + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octect-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob has been created in the registry and is available at the provided location.", + StatusCode: http.StatusCreated, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + { + Name: "Not allowed", + Description: "Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Initiate Resumable Blob Upload", + Description: "Initiate a resumable blob upload with an empty request body.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Description: "The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header.", + StatusCode: http.StatusAccepted, + Headers: []ParameterDescriptor{ + contentLengthZeroHeader, + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Format: "0-0", + Description: "Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.", + }, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Mount Blob", + Description: "Mount a blob identified by the `mount` parameter from another repository.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "mount", + Type: "query", + Format: "", + Regexp: digest.DigestRegexp, + Description: `Digest of blob to mount from the source repository.`, + }, + { + Name: "from", + Type: "query", + Format: "", + Regexp: reference.NameRegexp, + Description: `Name of the source repository.`, + }, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob has been mounted in the repository and is available at the provided location.", + StatusCode: http.StatusCreated, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + { + Name: "Not allowed", + Description: "Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + }, + }, + + { + Name: RouteNameBlobUploadChunk, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}", + Entity: "Blob Upload", + Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload.", + Requests: []RequestDescriptor{ + { + Description: "Retrieve the progress of the current upload, as reported by the `Range` header.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Progress", + Description: "The upload is known and in progress. The last received offset is available in the `Range` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "PATCH", + Description: "Upload a chunk of data for the specified upload.", + Requests: []RequestDescriptor{ + { + Name: "Stream upload", + Description: "Upload a stream of data to upload without completing the upload.", + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Name: "Data Accepted", + Description: "The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Chunked upload", + Description: "Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range.", + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Range", + Type: "header", + Format: "-", + Required: true, + Description: "Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.", + }, + { + Name: "Content-Length", + Type: "integer", + Format: "", + Description: "Length of the chunk being uploaded, corresponding the length of the request body.", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Name: "Chunk Accepted", + Description: "The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid.", + StatusCode: http.StatusRequestedRangeNotSatisfiable, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "PUT", + Description: "Complete the upload specified by `uuid`, optionally appending the body as the final chunk.", + Requests: []RequestDescriptor{ + { + Description: "Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Length", + Type: "integer", + Format: "", + Description: "Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "digest", + Type: "string", + Format: "", + Regexp: digest.DigestRegexp, + Required: true, + Description: `Digest of uploaded blob.`, + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Complete", + Description: "The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + Description: "The canonical location of the blob for retrieval", + }, + { + Name: "Content-Range", + Type: "header", + Format: "-", + Description: "Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.", + }, + contentLengthZeroHeader, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + errcode.ErrorCodeUnsupported, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "DELETE", + Description: "Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout.", + Requests: []RequestDescriptor{ + { + Description: "Cancel the upload specified by `uuid`.", + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Deleted", + Description: "The upload has been successfully deleted.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + contentLengthZeroHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "An error was encountered processing the delete. The client may ignore this error.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + }, + }, + { + Name: RouteNameCatalog, + Path: "/v2/_catalog", + Entity: "Catalog", + Description: "List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Retrieve a sorted, json list of repositories available in the registry.", + Requests: []RequestDescriptor{ + { + Name: "Catalog Fetch", + Description: "Request an unabridged list of repositories available. The implementation may impose a maximum limit and return a partial set with pagination links.", + Successes: []ResponseDescriptor{ + { + Description: "Returns the unabridged list of repositories as a json response.", + StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "repositories": [ + , + ... + ] +}`, + }, + }, + }, + }, + { + Name: "Catalog Fetch Paginated", + Description: "Return the specified portion of repositories.", + QueryParameters: paginationParameters, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "repositories": [ + , + ... + ] + "next": "?last=&n=" +}`, + }, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + linkHeader, + }, + }, + }, + }, + }, + }, + }, + }, +} + +var routeDescriptorsMap map[string]RouteDescriptor + +func init() { + routeDescriptorsMap = make(map[string]RouteDescriptor, len(routeDescriptors)) + + for _, descriptor := range routeDescriptors { + routeDescriptorsMap[descriptor.Name] = descriptor + } +} diff --git a/vendor/src/github.com/docker/distribution/registry/api/v2/doc.go b/vendor/src/github.com/docker/distribution/registry/api/v2/doc.go new file mode 100644 index 00000000..cde01195 --- /dev/null +++ b/vendor/src/github.com/docker/distribution/registry/api/v2/doc.go @@ -0,0 +1,9 @@ +// Package v2 describes routes, urls and the error codes used in the Docker +// Registry JSON HTTP API V2. In addition to declarations, descriptors are +// provided for routes and error codes that can be used for implementation and +// automatically generating documentation. +// +// Definitions here are considered to be locked down for the V2 registry api. +// Any changes must be considered carefully and should not proceed without a +// change proposal in docker core. +package v2 diff --git a/vendor/src/github.com/docker/distribution/registry/api/v2/errors.go b/vendor/src/github.com/docker/distribution/registry/api/v2/errors.go new file mode 100644 index 00000000..97d6923a --- /dev/null +++ b/vendor/src/github.com/docker/distribution/registry/api/v2/errors.go @@ -0,0 +1,136 @@ +package v2 + +import ( + "net/http" + + "github.com/docker/distribution/registry/api/errcode" +) + +const errGroup = "registry.api.v2" + +var ( + // ErrorCodeDigestInvalid is returned when uploading a blob if the + // provided digest does not match the blob contents. + ErrorCodeDigestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "DIGEST_INVALID", + Message: "provided digest did not match uploaded content", + Description: `When a blob is uploaded, the registry will check that + the content matches the digest provided by the client. The error may + include a detail structure with the key "digest", including the + invalid digest string. This error may also be returned when a manifest + includes an invalid layer digest.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeSizeInvalid is returned when uploading a blob if the provided + ErrorCodeSizeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "SIZE_INVALID", + Message: "provided length did not match content length", + Description: `When a layer is uploaded, the provided size will be + checked against the uploaded content. If they do not match, this error + will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeNameInvalid is returned when the name in the manifest does not + // match the provided name. + ErrorCodeNameInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NAME_INVALID", + Message: "invalid repository name", + Description: `Invalid repository name encountered either during + manifest validation or any API operation.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeTagInvalid is returned when the tag in the manifest does not + // match the provided tag. + ErrorCodeTagInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "TAG_INVALID", + Message: "manifest tag did not match URI", + Description: `During a manifest upload, if the tag in the manifest + does not match the uri tag, this error will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeNameUnknown when the repository name is not known. + ErrorCodeNameUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NAME_UNKNOWN", + Message: "repository name not known to registry", + Description: `This is returned if the name used during an operation is + unknown to the registry.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeManifestUnknown returned when image manifest is unknown. + ErrorCodeManifestUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_UNKNOWN", + Message: "manifest unknown", + Description: `This error is returned when the manifest, identified by + name and tag is unknown to the repository.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeManifestInvalid returned when an image manifest is invalid, + // typically during a PUT operation. This error encompasses all errors + // encountered during manifest validation that aren't signature errors. + ErrorCodeManifestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_INVALID", + Message: "manifest invalid", + Description: `During upload, manifests undergo several checks ensuring + validity. If those checks fail, this error may be returned, unless a + more specific error is included. The detail will contain information + the failed validation.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeManifestUnverified is returned when the manifest fails + // signature verification. + ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_UNVERIFIED", + Message: "manifest failed signature verification", + Description: `During manifest upload, if the manifest fails signature + verification, this error will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeManifestBlobUnknown is returned when a manifest blob is + // unknown to the registry. + ErrorCodeManifestBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_BLOB_UNKNOWN", + Message: "blob unknown to registry", + Description: `This error may be returned when a manifest blob is + unknown to the registry.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeBlobUnknown is returned when a blob is unknown to the + // registry. This can happen when the manifest references a nonexistent + // layer or the result is not found by a blob fetch. + ErrorCodeBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BLOB_UNKNOWN", + Message: "blob unknown to registry", + Description: `This error may be returned when a blob is unknown to the + registry in a specified repository. This can be returned with a + standard get or if a manifest references an unknown layer during + upload.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeBlobUploadUnknown is returned when an upload is unknown. + ErrorCodeBlobUploadUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BLOB_UPLOAD_UNKNOWN", + Message: "blob upload unknown to registry", + Description: `If a blob upload has been cancelled or was never + started, this error code may be returned.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeBlobUploadInvalid is returned when an upload is invalid. + ErrorCodeBlobUploadInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BLOB_UPLOAD_INVALID", + Message: "blob upload invalid", + Description: `The blob upload encountered an error and can no + longer proceed.`, + HTTPStatusCode: http.StatusNotFound, + }) +) diff --git a/vendor/src/github.com/docker/distribution/registry/api/v2/headerparser.go b/vendor/src/github.com/docker/distribution/registry/api/v2/headerparser.go new file mode 100644 index 00000000..9bc41a3a --- /dev/null +++ b/vendor/src/github.com/docker/distribution/registry/api/v2/headerparser.go @@ -0,0 +1,161 @@ +package v2 + +import ( + "fmt" + "regexp" + "strings" + "unicode" +) + +var ( + // according to rfc7230 + reToken = regexp.MustCompile(`^[^"(),/:;<=>?@[\]{}[:space:][:cntrl:]]+`) + reQuotedValue = regexp.MustCompile(`^[^\\"]+`) + reEscapedCharacter = regexp.MustCompile(`^[[:blank:][:graph:]]`) +) + +// parseForwardedHeader is a benevolent parser of Forwarded header defined in rfc7239. The header contains +// a comma-separated list of forwarding key-value pairs. Each list element is set by single proxy. The +// function parses only the first element of the list, which is set by the very first proxy. It returns a map +// of corresponding key-value pairs and an unparsed slice of the input string. +// +// Examples of Forwarded header values: +// +// 1. Forwarded: For=192.0.2.43; Proto=https,For="[2001:db8:cafe::17]",For=unknown +// 2. Forwarded: for="192.0.2.43:443"; host="registry.example.org", for="10.10.05.40:80" +// +// The first will be parsed into {"for": "192.0.2.43", "proto": "https"} while the second into +// {"for": "192.0.2.43:443", "host": "registry.example.org"}. +func parseForwardedHeader(forwarded string) (map[string]string, string, error) { + // Following are states of forwarded header parser. Any state could transition to a failure. + const ( + // terminating state; can transition to Parameter + stateElement = iota + // terminating state; can transition to KeyValueDelimiter + stateParameter + // can transition to Value + stateKeyValueDelimiter + // can transition to one of { QuotedValue, PairEnd } + stateValue + // can transition to one of { EscapedCharacter, PairEnd } + stateQuotedValue + // can transition to one of { QuotedValue } + stateEscapedCharacter + // terminating state; can transition to one of { Parameter, Element } + statePairEnd + ) + + var ( + parameter string + value string + parse = forwarded[:] + res = map[string]string{} + state = stateElement + ) + +Loop: + for { + // skip spaces unless in quoted value + if state != stateQuotedValue && state != stateEscapedCharacter { + parse = strings.TrimLeftFunc(parse, unicode.IsSpace) + } + + if len(parse) == 0 { + if state != stateElement && state != statePairEnd && state != stateParameter { + return nil, parse, fmt.Errorf("unexpected end of input") + } + // terminating + break + } + + switch state { + // terminate at list element delimiter + case stateElement: + if parse[0] == ',' { + parse = parse[1:] + break Loop + } + state = stateParameter + + // parse parameter (the key of key-value pair) + case stateParameter: + match := reToken.FindString(parse) + if len(match) == 0 { + return nil, parse, fmt.Errorf("failed to parse token at position %d", len(forwarded)-len(parse)) + } + parameter = strings.ToLower(match) + parse = parse[len(match):] + state = stateKeyValueDelimiter + + // parse '=' + case stateKeyValueDelimiter: + if parse[0] != '=' { + return nil, parse, fmt.Errorf("expected '=', not '%c' at position %d", parse[0], len(forwarded)-len(parse)) + } + parse = parse[1:] + state = stateValue + + // parse value or quoted value + case stateValue: + if parse[0] == '"' { + parse = parse[1:] + state = stateQuotedValue + } else { + value = reToken.FindString(parse) + if len(value) == 0 { + return nil, parse, fmt.Errorf("failed to parse value at position %d", len(forwarded)-len(parse)) + } + if _, exists := res[parameter]; exists { + return nil, parse, fmt.Errorf("duplicate parameter %q at position %d", parameter, len(forwarded)-len(parse)) + } + res[parameter] = value + parse = parse[len(value):] + value = "" + state = statePairEnd + } + + // parse a part of quoted value until the first backslash + case stateQuotedValue: + match := reQuotedValue.FindString(parse) + value += match + parse = parse[len(match):] + switch { + case len(parse) == 0: + return nil, parse, fmt.Errorf("unterminated quoted string") + case parse[0] == '"': + res[parameter] = value + value = "" + parse = parse[1:] + state = statePairEnd + case parse[0] == '\\': + parse = parse[1:] + state = stateEscapedCharacter + } + + // parse escaped character in a quoted string, ignore the backslash + // transition back to QuotedValue state + case stateEscapedCharacter: + c := reEscapedCharacter.FindString(parse) + if len(c) == 0 { + return nil, parse, fmt.Errorf("invalid escape sequence at position %d", len(forwarded)-len(parse)-1) + } + value += c + parse = parse[1:] + state = stateQuotedValue + + // expect either a new key-value pair, new list or end of input + case statePairEnd: + switch parse[0] { + case ';': + parse = parse[1:] + state = stateParameter + case ',': + state = stateElement + default: + return nil, parse, fmt.Errorf("expected ',' or ';', not %c at position %d", parse[0], len(forwarded)-len(parse)) + } + } + } + + return res, parse, nil +} diff --git a/vendor/src/github.com/docker/distribution/registry/api/v2/routes.go b/vendor/src/github.com/docker/distribution/registry/api/v2/routes.go new file mode 100644 index 00000000..5b80d5be --- /dev/null +++ b/vendor/src/github.com/docker/distribution/registry/api/v2/routes.go @@ -0,0 +1,49 @@ +package v2 + +import "github.com/gorilla/mux" + +// The following are definitions of the name under which all V2 routes are +// registered. These symbols can be used to look up a route based on the name. +const ( + RouteNameBase = "base" + RouteNameManifest = "manifest" + RouteNameTags = "tags" + RouteNameBlob = "blob" + RouteNameBlobUpload = "blob-upload" + RouteNameBlobUploadChunk = "blob-upload-chunk" + RouteNameCatalog = "catalog" +) + +var allEndpoints = []string{ + RouteNameManifest, + RouteNameCatalog, + RouteNameTags, + RouteNameBlob, + RouteNameBlobUpload, + RouteNameBlobUploadChunk, +} + +// Router builds a gorilla router with named routes for the various API +// methods. This can be used directly by both server implementations and +// clients. +func Router() *mux.Router { + return RouterWithPrefix("") +} + +// RouterWithPrefix builds a gorilla router with a configured prefix +// on all routes. +func RouterWithPrefix(prefix string) *mux.Router { + rootRouter := mux.NewRouter() + router := rootRouter + if prefix != "" { + router = router.PathPrefix(prefix).Subrouter() + } + + router.StrictSlash(true) + + for _, descriptor := range routeDescriptors { + router.Path(descriptor.Path).Name(descriptor.Name) + } + + return rootRouter +} diff --git a/vendor/src/github.com/docker/distribution/registry/api/v2/urls.go b/vendor/src/github.com/docker/distribution/registry/api/v2/urls.go new file mode 100644 index 00000000..e2e242ea --- /dev/null +++ b/vendor/src/github.com/docker/distribution/registry/api/v2/urls.go @@ -0,0 +1,314 @@ +package v2 + +import ( + "net" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/docker/distribution/reference" + "github.com/gorilla/mux" +) + +// URLBuilder creates registry API urls from a single base endpoint. It can be +// used to create urls for use in a registry client or server. +// +// All urls will be created from the given base, including the api version. +// For example, if a root of "/foo/" is provided, urls generated will be fall +// under "/foo/v2/...". Most application will only provide a schema, host and +// port, such as "https://localhost:5000/". +type URLBuilder struct { + root *url.URL // url root (ie http://localhost/) + router *mux.Router + relative bool +} + +// NewURLBuilder creates a URLBuilder with provided root url object. +func NewURLBuilder(root *url.URL, relative bool) *URLBuilder { + return &URLBuilder{ + root: root, + router: Router(), + relative: relative, + } +} + +// NewURLBuilderFromString workes identically to NewURLBuilder except it takes +// a string argument for the root, returning an error if it is not a valid +// url. +func NewURLBuilderFromString(root string, relative bool) (*URLBuilder, error) { + u, err := url.Parse(root) + if err != nil { + return nil, err + } + + return NewURLBuilder(u, relative), nil +} + +// NewURLBuilderFromRequest uses information from an *http.Request to +// construct the root url. +func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder { + var scheme string + + forwardedProto := r.Header.Get("X-Forwarded-Proto") + // TODO: log the error + forwardedHeader, _, _ := parseForwardedHeader(r.Header.Get("Forwarded")) + + switch { + case len(forwardedProto) > 0: + scheme = forwardedProto + case len(forwardedHeader["proto"]) > 0: + scheme = forwardedHeader["proto"] + case r.TLS != nil: + scheme = "https" + case len(r.URL.Scheme) > 0: + scheme = r.URL.Scheme + default: + scheme = "http" + } + + host := r.Host + + if forwardedHost := r.Header.Get("X-Forwarded-Host"); len(forwardedHost) > 0 { + // According to the Apache mod_proxy docs, X-Forwarded-Host can be a + // comma-separated list of hosts, to which each proxy appends the + // requested host. We want to grab the first from this comma-separated + // list. + hosts := strings.SplitN(forwardedHost, ",", 2) + host = strings.TrimSpace(hosts[0]) + } else if addr, exists := forwardedHeader["for"]; exists { + host = addr + } else if h, exists := forwardedHeader["host"]; exists { + host = h + } + + portLessHost, port := host, "" + if !isIPv6Address(portLessHost) { + // with go 1.6, this would treat the last part of IPv6 address as a port + portLessHost, port, _ = net.SplitHostPort(host) + } + if forwardedPort := r.Header.Get("X-Forwarded-Port"); len(port) == 0 && len(forwardedPort) > 0 { + ports := strings.SplitN(forwardedPort, ",", 2) + forwardedPort = strings.TrimSpace(ports[0]) + if _, err := strconv.ParseInt(forwardedPort, 10, 32); err == nil { + port = forwardedPort + } + } + + if len(portLessHost) > 0 { + host = portLessHost + } + if len(port) > 0 { + // remove enclosing brackets of ipv6 address otherwise they will be duplicated + if len(host) > 1 && host[0] == '[' && host[len(host)-1] == ']' { + host = host[1 : len(host)-1] + } + // JoinHostPort properly encloses ipv6 addresses in square brackets + host = net.JoinHostPort(host, port) + } else if isIPv6Address(host) && host[0] != '[' { + // ipv6 needs to be enclosed in square brackets in urls + host = "[" + host + "]" + } + + basePath := routeDescriptorsMap[RouteNameBase].Path + + requestPath := r.URL.Path + index := strings.Index(requestPath, basePath) + + u := &url.URL{ + Scheme: scheme, + Host: host, + } + + if index > 0 { + // N.B. index+1 is important because we want to include the trailing / + u.Path = requestPath[0 : index+1] + } + + return NewURLBuilder(u, relative) +} + +// BuildBaseURL constructs a base url for the API, typically just "/v2/". +func (ub *URLBuilder) BuildBaseURL() (string, error) { + route := ub.cloneRoute(RouteNameBase) + + baseURL, err := route.URL() + if err != nil { + return "", err + } + + return baseURL.String(), nil +} + +// BuildCatalogURL constructs a url get a catalog of repositories +func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) { + route := ub.cloneRoute(RouteNameCatalog) + + catalogURL, err := route.URL() + if err != nil { + return "", err + } + + return appendValuesURL(catalogURL, values...).String(), nil +} + +// BuildTagsURL constructs a url to list the tags in the named repository. +func (ub *URLBuilder) BuildTagsURL(name reference.Named) (string, error) { + route := ub.cloneRoute(RouteNameTags) + + tagsURL, err := route.URL("name", name.Name()) + if err != nil { + return "", err + } + + return tagsURL.String(), nil +} + +// BuildManifestURL constructs a url for the manifest identified by name and +// reference. The argument reference may be either a tag or digest. +func (ub *URLBuilder) BuildManifestURL(ref reference.Named) (string, error) { + route := ub.cloneRoute(RouteNameManifest) + + tagOrDigest := "" + switch v := ref.(type) { + case reference.Tagged: + tagOrDigest = v.Tag() + case reference.Digested: + tagOrDigest = v.Digest().String() + } + + manifestURL, err := route.URL("name", ref.Name(), "reference", tagOrDigest) + if err != nil { + return "", err + } + + return manifestURL.String(), nil +} + +// BuildBlobURL constructs the url for the blob identified by name and dgst. +func (ub *URLBuilder) BuildBlobURL(ref reference.Canonical) (string, error) { + route := ub.cloneRoute(RouteNameBlob) + + layerURL, err := route.URL("name", ref.Name(), "digest", ref.Digest().String()) + if err != nil { + return "", err + } + + return layerURL.String(), nil +} + +// BuildBlobUploadURL constructs a url to begin a blob upload in the +// repository identified by name. +func (ub *URLBuilder) BuildBlobUploadURL(name reference.Named, values ...url.Values) (string, error) { + route := ub.cloneRoute(RouteNameBlobUpload) + + uploadURL, err := route.URL("name", name.Name()) + if err != nil { + return "", err + } + + return appendValuesURL(uploadURL, values...).String(), nil +} + +// BuildBlobUploadChunkURL constructs a url for the upload identified by uuid, +// including any url values. This should generally not be used by clients, as +// this url is provided by server implementations during the blob upload +// process. +func (ub *URLBuilder) BuildBlobUploadChunkURL(name reference.Named, uuid string, values ...url.Values) (string, error) { + route := ub.cloneRoute(RouteNameBlobUploadChunk) + + uploadURL, err := route.URL("name", name.Name(), "uuid", uuid) + if err != nil { + return "", err + } + + return appendValuesURL(uploadURL, values...).String(), nil +} + +// clondedRoute returns a clone of the named route from the router. Routes +// must be cloned to avoid modifying them during url generation. +func (ub *URLBuilder) cloneRoute(name string) clonedRoute { + route := new(mux.Route) + root := new(url.URL) + + *route = *ub.router.GetRoute(name) // clone the route + *root = *ub.root + + return clonedRoute{Route: route, root: root, relative: ub.relative} +} + +type clonedRoute struct { + *mux.Route + root *url.URL + relative bool +} + +func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { + routeURL, err := cr.Route.URL(pairs...) + if err != nil { + return nil, err + } + + if cr.relative { + return routeURL, nil + } + + if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" { + routeURL.Path = routeURL.Path[1:] + } + + url := cr.root.ResolveReference(routeURL) + url.Scheme = cr.root.Scheme + return url, nil +} + +// appendValuesURL appends the parameters to the url. +func appendValuesURL(u *url.URL, values ...url.Values) *url.URL { + merged := u.Query() + + for _, v := range values { + for k, vv := range v { + merged[k] = append(merged[k], vv...) + } + } + + u.RawQuery = merged.Encode() + return u +} + +// appendValues appends the parameters to the url. Panics if the string is not +// a url. +func appendValues(u string, values ...url.Values) string { + up, err := url.Parse(u) + + if err != nil { + panic(err) // should never happen + } + + return appendValuesURL(up, values...).String() +} + +// isIPv6Address returns true if given string is a valid IPv6 address. No port is allowed. The address may be +// enclosed in square brackets. +func isIPv6Address(host string) bool { + if len(host) > 1 && host[0] == '[' && host[len(host)-1] == ']' { + host = host[1 : len(host)-1] + } + // The IPv6 scoped addressing zone identifier starts after the last percent sign. + if i := strings.LastIndexByte(host, '%'); i > 0 { + host = host[:i] + } + ip := net.ParseIP(host) + if ip == nil { + return false + } + if ip.To16() == nil { + return false + } + if ip.To4() == nil { + return true + } + // dot can be present in ipv4-mapped address, it needs to come after a colon though + i := strings.IndexAny(host, ":.") + return i >= 0 && host[i] == ':' +} diff --git a/vendor/src/github.com/docker/distribution/registry/client/auth/challenge/addr.go b/vendor/src/github.com/docker/distribution/registry/client/auth/challenge/addr.go new file mode 100644 index 00000000..2c3ebe16 --- /dev/null +++ b/vendor/src/github.com/docker/distribution/registry/client/auth/challenge/addr.go @@ -0,0 +1,27 @@ +package challenge + +import ( + "net/url" + "strings" +) + +// FROM: https://golang.org/src/net/http/http.go +// Given a string of the form "host", "host:port", or "[ipv6::address]:port", +// return true if the string includes a port. +func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } + +// FROM: http://golang.org/src/net/http/transport.go +var portMap = map[string]string{ + "http": "80", + "https": "443", +} + +// canonicalAddr returns url.Host but always with a ":port" suffix +// FROM: http://golang.org/src/net/http/transport.go +func canonicalAddr(url *url.URL) string { + addr := url.Host + if !hasPort(addr) { + return addr + ":" + portMap[url.Scheme] + } + return addr +} diff --git a/vendor/src/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go b/vendor/src/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go new file mode 100644 index 00000000..c9bdfc35 --- /dev/null +++ b/vendor/src/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go @@ -0,0 +1,237 @@ +package challenge + +import ( + "fmt" + "net/http" + "net/url" + "strings" + "sync" +) + +// Challenge carries information from a WWW-Authenticate response header. +// See RFC 2617. +type Challenge struct { + // Scheme is the auth-scheme according to RFC 2617 + Scheme string + + // Parameters are the auth-params according to RFC 2617 + Parameters map[string]string +} + +// Manager manages the challenges for endpoints. +// The challenges are pulled out of HTTP responses. Only +// responses which expect challenges should be added to +// the manager, since a non-unauthorized request will be +// viewed as not requiring challenges. +type Manager interface { + // GetChallenges returns the challenges for the given + // endpoint URL. + GetChallenges(endpoint url.URL) ([]Challenge, error) + + // AddResponse adds the response to the challenge + // manager. The challenges will be parsed out of + // the WWW-Authenicate headers and added to the + // URL which was produced the response. If the + // response was authorized, any challenges for the + // endpoint will be cleared. + AddResponse(resp *http.Response) error +} + +// NewSimpleManager returns an instance of +// Manger which only maps endpoints to challenges +// based on the responses which have been added the +// manager. The simple manager will make no attempt to +// perform requests on the endpoints or cache the responses +// to a backend. +func NewSimpleManager() Manager { + return &simpleManager{ + Challanges: make(map[string][]Challenge), + } +} + +type simpleManager struct { + sync.RWMutex + Challanges map[string][]Challenge +} + +func normalizeURL(endpoint *url.URL) { + endpoint.Host = strings.ToLower(endpoint.Host) + endpoint.Host = canonicalAddr(endpoint) +} + +func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { + normalizeURL(&endpoint) + + m.RLock() + defer m.RUnlock() + challenges := m.Challanges[endpoint.String()] + return challenges, nil +} + +func (m *simpleManager) AddResponse(resp *http.Response) error { + challenges := ResponseChallenges(resp) + if resp.Request == nil { + return fmt.Errorf("missing request reference") + } + urlCopy := url.URL{ + Path: resp.Request.URL.Path, + Host: resp.Request.URL.Host, + Scheme: resp.Request.URL.Scheme, + } + normalizeURL(&urlCopy) + + m.Lock() + defer m.Unlock() + m.Challanges[urlCopy.String()] = challenges + return nil +} + +// Octet types from RFC 2616. +type octetType byte + +var octetTypes [256]octetType + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 + if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// ResponseChallenges returns a list of authorization challenges +// for the given http Response. Challenges are only checked if +// the response status code was a 401. +func ResponseChallenges(resp *http.Response) []Challenge { + if resp.StatusCode == http.StatusUnauthorized { + // Parse the WWW-Authenticate Header and store the challenges + // on this endpoint object. + return parseAuthHeader(resp.Header) + } + + return nil +} + +func parseAuthHeader(header http.Header) []Challenge { + challenges := []Challenge{} + for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { + v, p := parseValueAndParams(h) + if v != "" { + challenges = append(challenges, Challenge{Scheme: v, Parameters: p}) + } + } + return challenges +} + +func parseValueAndParams(header string) (value string, params map[string]string) { + params = make(map[string]string) + value, s := expectToken(header) + if value == "" { + return + } + value = strings.ToLower(value) + s = "," + skipSpace(s) + for strings.HasPrefix(s, ",") { + var pkey string + pkey, s = expectToken(skipSpace(s[1:])) + if pkey == "" { + return + } + if !strings.HasPrefix(s, "=") { + return + } + var pvalue string + pvalue, s = expectTokenOrQuoted(s[1:]) + if pvalue == "" { + return + } + pkey = strings.ToLower(pkey) + params[pkey] = pvalue + s = skipSpace(s) + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/vendor/src/github.com/docker/distribution/registry/client/blob_writer.go b/vendor/src/github.com/docker/distribution/registry/client/blob_writer.go new file mode 100644 index 00000000..e3ffcb00 --- /dev/null +++ b/vendor/src/github.com/docker/distribution/registry/client/blob_writer.go @@ -0,0 +1,162 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" +) + +type httpBlobUpload struct { + statter distribution.BlobStatter + client *http.Client + + uuid string + startedAt time.Time + + location string // always the last value of the location header. + offset int64 + closed bool +} + +func (hbu *httpBlobUpload) Reader() (io.ReadCloser, error) { + panic("Not implemented") +} + +func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error { + if resp.StatusCode == http.StatusNotFound { + return distribution.ErrBlobUploadUnknown + } + return HandleErrorResponse(resp) +} + +func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { + req, err := http.NewRequest("PATCH", hbu.location, ioutil.NopCloser(r)) + if err != nil { + return 0, err + } + defer req.Body.Close() + + resp, err := hbu.client.Do(req) + if err != nil { + return 0, err + } + + if !SuccessStatus(resp.StatusCode) { + return 0, hbu.handleErrorResponse(resp) + } + + hbu.uuid = resp.Header.Get("Docker-Upload-UUID") + hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) + if err != nil { + return 0, err + } + rng := resp.Header.Get("Range") + var start, end int64 + if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { + return 0, err + } else if n != 2 || end < start { + return 0, fmt.Errorf("bad range format: %s", rng) + } + + return (end - start + 1), nil + +} + +func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { + req, err := http.NewRequest("PATCH", hbu.location, bytes.NewReader(p)) + if err != nil { + return 0, err + } + req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1))) + req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p))) + req.Header.Set("Content-Type", "application/octet-stream") + + resp, err := hbu.client.Do(req) + if err != nil { + return 0, err + } + + if !SuccessStatus(resp.StatusCode) { + return 0, hbu.handleErrorResponse(resp) + } + + hbu.uuid = resp.Header.Get("Docker-Upload-UUID") + hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) + if err != nil { + return 0, err + } + rng := resp.Header.Get("Range") + var start, end int + if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { + return 0, err + } else if n != 2 || end < start { + return 0, fmt.Errorf("bad range format: %s", rng) + } + + return (end - start + 1), nil + +} + +func (hbu *httpBlobUpload) Size() int64 { + return hbu.offset +} + +func (hbu *httpBlobUpload) ID() string { + return hbu.uuid +} + +func (hbu *httpBlobUpload) StartedAt() time.Time { + return hbu.startedAt +} + +func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { + // TODO(dmcgowan): Check if already finished, if so just fetch + req, err := http.NewRequest("PUT", hbu.location, nil) + if err != nil { + return distribution.Descriptor{}, err + } + + values := req.URL.Query() + values.Set("digest", desc.Digest.String()) + req.URL.RawQuery = values.Encode() + + resp, err := hbu.client.Do(req) + if err != nil { + return distribution.Descriptor{}, err + } + defer resp.Body.Close() + + if !SuccessStatus(resp.StatusCode) { + return distribution.Descriptor{}, hbu.handleErrorResponse(resp) + } + + return hbu.statter.Stat(ctx, desc.Digest) +} + +func (hbu *httpBlobUpload) Cancel(ctx context.Context) error { + req, err := http.NewRequest("DELETE", hbu.location, nil) + if err != nil { + return err + } + resp, err := hbu.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) { + return nil + } + return hbu.handleErrorResponse(resp) +} + +func (hbu *httpBlobUpload) Close() error { + hbu.closed = true + return nil +} diff --git a/vendor/src/github.com/docker/distribution/registry/client/errors.go b/vendor/src/github.com/docker/distribution/registry/client/errors.go new file mode 100644 index 00000000..52d49d5d --- /dev/null +++ b/vendor/src/github.com/docker/distribution/registry/client/errors.go @@ -0,0 +1,139 @@ +package client + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/client/auth/challenge" +) + +// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty +// errcode.Errors slice. +var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body") + +// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is +// returned when making a registry api call. +type UnexpectedHTTPStatusError struct { + Status string +} + +func (e *UnexpectedHTTPStatusError) Error() string { + return fmt.Sprintf("received unexpected HTTP status: %s", e.Status) +} + +// UnexpectedHTTPResponseError is returned when an expected HTTP status code +// is returned, but the content was unexpected and failed to be parsed. +type UnexpectedHTTPResponseError struct { + ParseErr error + StatusCode int + Response []byte +} + +func (e *UnexpectedHTTPResponseError) Error() string { + return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response)) +} + +func parseHTTPErrorResponse(statusCode int, r io.Reader) error { + var errors errcode.Errors + body, err := ioutil.ReadAll(r) + if err != nil { + return err + } + + // For backward compatibility, handle irregularly formatted + // messages that contain a "details" field. + var detailsErr struct { + Details string `json:"details"` + } + err = json.Unmarshal(body, &detailsErr) + if err == nil && detailsErr.Details != "" { + switch statusCode { + case http.StatusUnauthorized: + return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details) + case http.StatusTooManyRequests: + return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details) + default: + return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details) + } + } + + if err := json.Unmarshal(body, &errors); err != nil { + return &UnexpectedHTTPResponseError{ + ParseErr: err, + StatusCode: statusCode, + Response: body, + } + } + + if len(errors) == 0 { + // If there was no error specified in the body, return + // UnexpectedHTTPResponseError. + return &UnexpectedHTTPResponseError{ + ParseErr: ErrNoErrorsInBody, + StatusCode: statusCode, + Response: body, + } + } + + return errors +} + +func makeErrorList(err error) []error { + if errL, ok := err.(errcode.Errors); ok { + return []error(errL) + } + return []error{err} +} + +func mergeErrors(err1, err2 error) error { + return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...)) +} + +// HandleErrorResponse returns error parsed from HTTP response for an +// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An +// UnexpectedHTTPStatusError returned for response code outside of expected +// range. +func HandleErrorResponse(resp *http.Response) error { + if resp.StatusCode >= 400 && resp.StatusCode < 500 { + // Check for OAuth errors within the `WWW-Authenticate` header first + // See https://tools.ietf.org/html/rfc6750#section-3 + for _, c := range challenge.ResponseChallenges(resp) { + if c.Scheme == "bearer" { + var err errcode.Error + // codes defined at https://tools.ietf.org/html/rfc6750#section-3.1 + switch c.Parameters["error"] { + case "invalid_token": + err.Code = errcode.ErrorCodeUnauthorized + case "insufficient_scope": + err.Code = errcode.ErrorCodeDenied + default: + continue + } + if description := c.Parameters["error_description"]; description != "" { + err.Message = description + } else { + err.Message = err.Code.Message() + } + + return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body)) + } + } + err := parseHTTPErrorResponse(resp.StatusCode, resp.Body) + if uErr, ok := err.(*UnexpectedHTTPResponseError); ok && resp.StatusCode == 401 { + return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) + } + return err + } + return &UnexpectedHTTPStatusError{Status: resp.Status} +} + +// SuccessStatus returns true if the argument is a successful HTTP response +// code (in the range 200 - 399 inclusive). +func SuccessStatus(status int) bool { + return status >= 200 && status <= 399 +} diff --git a/vendor/src/github.com/docker/distribution/registry/client/repository.go b/vendor/src/github.com/docker/distribution/registry/client/repository.go new file mode 100644 index 00000000..1ebd0b18 --- /dev/null +++ b/vendor/src/github.com/docker/distribution/registry/client/repository.go @@ -0,0 +1,853 @@ +package client + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/distribution/registry/storage/cache" + "github.com/docker/distribution/registry/storage/cache/memory" +) + +// Registry provides an interface for calling Repositories, which returns a catalog of repositories. +type Registry interface { + Repositories(ctx context.Context, repos []string, last string) (n int, err error) +} + +// checkHTTPRedirect is a callback that can manipulate redirected HTTP +// requests. It is used to preserve Accept and Range headers. +func checkHTTPRedirect(req *http.Request, via []*http.Request) error { + if len(via) >= 10 { + return errors.New("stopped after 10 redirects") + } + + if len(via) > 0 { + for headerName, headerVals := range via[0].Header { + if headerName != "Accept" && headerName != "Range" { + continue + } + for _, val := range headerVals { + // Don't add to redirected request if redirected + // request already has a header with the same + // name and value. + hasValue := false + for _, existingVal := range req.Header[headerName] { + if existingVal == val { + hasValue = true + break + } + } + if !hasValue { + req.Header.Add(headerName, val) + } + } + } + } + + return nil +} + +// NewRegistry creates a registry namespace which can be used to get a listing of repositories +func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) { + ub, err := v2.NewURLBuilderFromString(baseURL, false) + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: transport, + Timeout: 1 * time.Minute, + CheckRedirect: checkHTTPRedirect, + } + + return ®istry{ + client: client, + ub: ub, + context: ctx, + }, nil +} + +type registry struct { + client *http.Client + ub *v2.URLBuilder + context context.Context +} + +// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size +// of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there +// are no more entries +func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) { + var numFilled int + var returnErr error + + values := buildCatalogValues(len(entries), last) + u, err := r.ub.BuildCatalogURL(values) + if err != nil { + return 0, err + } + + resp, err := r.client.Get(u) + if err != nil { + return 0, err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + var ctlg struct { + Repositories []string `json:"repositories"` + } + decoder := json.NewDecoder(resp.Body) + + if err := decoder.Decode(&ctlg); err != nil { + return 0, err + } + + for cnt := range ctlg.Repositories { + entries[cnt] = ctlg.Repositories[cnt] + } + numFilled = len(ctlg.Repositories) + + link := resp.Header.Get("Link") + if link == "" { + returnErr = io.EOF + } + } else { + return 0, HandleErrorResponse(resp) + } + + return numFilled, returnErr +} + +// NewRepository creates a new Repository for the given repository name and base URL. +func NewRepository(ctx context.Context, name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { + ub, err := v2.NewURLBuilderFromString(baseURL, false) + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: transport, + CheckRedirect: checkHTTPRedirect, + // TODO(dmcgowan): create cookie jar + } + + return &repository{ + client: client, + ub: ub, + name: name, + context: ctx, + }, nil +} + +type repository struct { + client *http.Client + ub *v2.URLBuilder + context context.Context + name reference.Named +} + +func (r *repository) Named() reference.Named { + return r.name +} + +func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { + statter := &blobStatter{ + name: r.name, + ub: r.ub, + client: r.client, + } + return &blobs{ + name: r.name, + ub: r.ub, + client: r.client, + statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter), + } +} + +func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + // todo(richardscothern): options should be sent over the wire + return &manifests{ + name: r.name, + ub: r.ub, + client: r.client, + etags: make(map[string]string), + }, nil +} + +func (r *repository) Tags(ctx context.Context) distribution.TagService { + return &tags{ + client: r.client, + ub: r.ub, + context: r.context, + name: r.Named(), + } +} + +// tags implements remote tagging operations. +type tags struct { + client *http.Client + ub *v2.URLBuilder + context context.Context + name reference.Named +} + +// All returns all tags +func (t *tags) All(ctx context.Context) ([]string, error) { + var tags []string + + u, err := t.ub.BuildTagsURL(t.name) + if err != nil { + return tags, err + } + + for { + resp, err := t.client.Get(u) + if err != nil { + return tags, err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return tags, err + } + + tagsResponse := struct { + Tags []string `json:"tags"` + }{} + if err := json.Unmarshal(b, &tagsResponse); err != nil { + return tags, err + } + tags = append(tags, tagsResponse.Tags...) + if link := resp.Header.Get("Link"); link != "" { + u = strings.Trim(strings.Split(link, ";")[0], "<>") + } else { + return tags, nil + } + } else { + return tags, HandleErrorResponse(resp) + } + } +} + +func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) { + desc := distribution.Descriptor{} + headers := response.Header + + ctHeader := headers.Get("Content-Type") + if ctHeader == "" { + return distribution.Descriptor{}, errors.New("missing or empty Content-Type header") + } + desc.MediaType = ctHeader + + digestHeader := headers.Get("Docker-Content-Digest") + if digestHeader == "" { + bytes, err := ioutil.ReadAll(response.Body) + if err != nil { + return distribution.Descriptor{}, err + } + _, desc, err := distribution.UnmarshalManifest(ctHeader, bytes) + if err != nil { + return distribution.Descriptor{}, err + } + return desc, nil + } + + dgst, err := digest.ParseDigest(digestHeader) + if err != nil { + return distribution.Descriptor{}, err + } + desc.Digest = dgst + + lengthHeader := headers.Get("Content-Length") + if lengthHeader == "" { + return distribution.Descriptor{}, errors.New("missing or empty Content-Length header") + } + length, err := strconv.ParseInt(lengthHeader, 10, 64) + if err != nil { + return distribution.Descriptor{}, err + } + desc.Size = length + + return desc, nil + +} + +// Get issues a HEAD request for a Manifest against its named endpoint in order +// to construct a descriptor for the tag. If the registry doesn't support HEADing +// a manifest, fallback to GET. +func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { + ref, err := reference.WithTag(t.name, tag) + if err != nil { + return distribution.Descriptor{}, err + } + u, err := t.ub.BuildManifestURL(ref) + if err != nil { + return distribution.Descriptor{}, err + } + + newRequest := func(method string) (*http.Response, error) { + req, err := http.NewRequest(method, u, nil) + if err != nil { + return nil, err + } + + for _, t := range distribution.ManifestMediaTypes() { + req.Header.Add("Accept", t) + } + resp, err := t.client.Do(req) + return resp, err + } + + resp, err := newRequest("HEAD") + if err != nil { + return distribution.Descriptor{}, err + } + defer resp.Body.Close() + + switch { + case resp.StatusCode >= 200 && resp.StatusCode < 400: + return descriptorFromResponse(resp) + default: + // if the response is an error - there will be no body to decode. + // Issue a GET request: + // - for data from a server that does not handle HEAD + // - to get error details in case of a failure + resp, err = newRequest("GET") + if err != nil { + return distribution.Descriptor{}, err + } + defer resp.Body.Close() + + if resp.StatusCode >= 200 && resp.StatusCode < 400 { + return descriptorFromResponse(resp) + } + return distribution.Descriptor{}, HandleErrorResponse(resp) + } +} + +func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { + panic("not implemented") +} + +func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { + panic("not implemented") +} + +func (t *tags) Untag(ctx context.Context, tag string) error { + panic("not implemented") +} + +type manifests struct { + name reference.Named + ub *v2.URLBuilder + client *http.Client + etags map[string]string +} + +func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { + ref, err := reference.WithDigest(ms.name, dgst) + if err != nil { + return false, err + } + u, err := ms.ub.BuildManifestURL(ref) + if err != nil { + return false, err + } + + resp, err := ms.client.Head(u) + if err != nil { + return false, err + } + + if SuccessStatus(resp.StatusCode) { + return true, nil + } else if resp.StatusCode == http.StatusNotFound { + return false, nil + } + return false, HandleErrorResponse(resp) +} + +// AddEtagToTag allows a client to supply an eTag to Get which will be +// used for a conditional HTTP request. If the eTag matches, a nil manifest +// and ErrManifestNotModified error will be returned. etag is automatically +// quoted when added to this map. +func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption { + return etagOption{tag, etag} +} + +type etagOption struct{ tag, etag string } + +func (o etagOption) Apply(ms distribution.ManifestService) error { + if ms, ok := ms.(*manifests); ok { + ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag) + return nil + } + return fmt.Errorf("etag options is a client-only option") +} + +// ReturnContentDigest allows a client to set a the content digest on +// a successful request from the 'Docker-Content-Digest' header. This +// returned digest is represents the digest which the registry uses +// to refer to the content and can be used to delete the content. +func ReturnContentDigest(dgst *digest.Digest) distribution.ManifestServiceOption { + return contentDigestOption{dgst} +} + +type contentDigestOption struct{ digest *digest.Digest } + +func (o contentDigestOption) Apply(ms distribution.ManifestService) error { + return nil +} + +func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + var ( + digestOrTag string + ref reference.Named + err error + contentDgst *digest.Digest + ) + + for _, option := range options { + if opt, ok := option.(distribution.WithTagOption); ok { + digestOrTag = opt.Tag + ref, err = reference.WithTag(ms.name, opt.Tag) + if err != nil { + return nil, err + } + } else if opt, ok := option.(contentDigestOption); ok { + contentDgst = opt.digest + } else { + err := option.Apply(ms) + if err != nil { + return nil, err + } + } + } + + if digestOrTag == "" { + digestOrTag = dgst.String() + ref, err = reference.WithDigest(ms.name, dgst) + if err != nil { + return nil, err + } + } + + u, err := ms.ub.BuildManifestURL(ref) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return nil, err + } + + for _, t := range distribution.ManifestMediaTypes() { + req.Header.Add("Accept", t) + } + + if _, ok := ms.etags[digestOrTag]; ok { + req.Header.Set("If-None-Match", ms.etags[digestOrTag]) + } + + resp, err := ms.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode == http.StatusNotModified { + return nil, distribution.ErrManifestNotModified + } else if SuccessStatus(resp.StatusCode) { + if contentDgst != nil { + dgst, err := digest.ParseDigest(resp.Header.Get("Docker-Content-Digest")) + if err == nil { + *contentDgst = dgst + } + } + mt := resp.Header.Get("Content-Type") + body, err := ioutil.ReadAll(resp.Body) + + if err != nil { + return nil, err + } + m, _, err := distribution.UnmarshalManifest(mt, body) + if err != nil { + return nil, err + } + return m, nil + } + return nil, HandleErrorResponse(resp) +} + +// Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the +// tag name in order to build the correct upload URL. +func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + ref := ms.name + var tagged bool + + for _, option := range options { + if opt, ok := option.(distribution.WithTagOption); ok { + var err error + ref, err = reference.WithTag(ref, opt.Tag) + if err != nil { + return "", err + } + tagged = true + } else { + err := option.Apply(ms) + if err != nil { + return "", err + } + } + } + mediaType, p, err := m.Payload() + if err != nil { + return "", err + } + + if !tagged { + // generate a canonical digest and Put by digest + _, d, err := distribution.UnmarshalManifest(mediaType, p) + if err != nil { + return "", err + } + ref, err = reference.WithDigest(ref, d.Digest) + if err != nil { + return "", err + } + } + + manifestURL, err := ms.ub.BuildManifestURL(ref) + if err != nil { + return "", err + } + + putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p)) + if err != nil { + return "", err + } + + putRequest.Header.Set("Content-Type", mediaType) + + resp, err := ms.client.Do(putRequest) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + dgstHeader := resp.Header.Get("Docker-Content-Digest") + dgst, err := digest.ParseDigest(dgstHeader) + if err != nil { + return "", err + } + + return dgst, nil + } + + return "", HandleErrorResponse(resp) +} + +func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error { + ref, err := reference.WithDigest(ms.name, dgst) + if err != nil { + return err + } + u, err := ms.ub.BuildManifestURL(ref) + if err != nil { + return err + } + req, err := http.NewRequest("DELETE", u, nil) + if err != nil { + return err + } + + resp, err := ms.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + return nil + } + return HandleErrorResponse(resp) +} + +// todo(richardscothern): Restore interface and implementation with merge of #1050 +/*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { + panic("not supported") +}*/ + +type blobs struct { + name reference.Named + ub *v2.URLBuilder + client *http.Client + + statter distribution.BlobDescriptorService + distribution.BlobDeleter +} + +func sanitizeLocation(location, base string) (string, error) { + baseURL, err := url.Parse(base) + if err != nil { + return "", err + } + + locationURL, err := url.Parse(location) + if err != nil { + return "", err + } + + return baseURL.ResolveReference(locationURL).String(), nil +} + +func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return bs.statter.Stat(ctx, dgst) + +} + +func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + reader, err := bs.Open(ctx, dgst) + if err != nil { + return nil, err + } + defer reader.Close() + + return ioutil.ReadAll(reader) +} + +func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + ref, err := reference.WithDigest(bs.name, dgst) + if err != nil { + return nil, err + } + blobURL, err := bs.ub.BuildBlobURL(ref) + if err != nil { + return nil, err + } + + return transport.NewHTTPReadSeeker(bs.client, blobURL, + func(resp *http.Response) error { + if resp.StatusCode == http.StatusNotFound { + return distribution.ErrBlobUnknown + } + return HandleErrorResponse(resp) + }), nil +} + +func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + panic("not implemented") +} + +func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + writer, err := bs.Create(ctx) + if err != nil { + return distribution.Descriptor{}, err + } + dgstr := digest.Canonical.New() + n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash())) + if err != nil { + return distribution.Descriptor{}, err + } + if n < int64(len(p)) { + return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p)) + } + + desc := distribution.Descriptor{ + MediaType: mediaType, + Size: int64(len(p)), + Digest: dgstr.Digest(), + } + + return writer.Commit(ctx, desc) +} + +type optionFunc func(interface{}) error + +func (f optionFunc) Apply(v interface{}) error { + return f(v) +} + +// WithMountFrom returns a BlobCreateOption which designates that the blob should be +// mounted from the given canonical reference. +func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { + return optionFunc(func(v interface{}) error { + opts, ok := v.(*distribution.CreateOptions) + if !ok { + return fmt.Errorf("unexpected options type: %T", v) + } + + opts.Mount.ShouldMount = true + opts.Mount.From = ref + + return nil + }) +} + +func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + var opts distribution.CreateOptions + + for _, option := range options { + err := option.Apply(&opts) + if err != nil { + return nil, err + } + } + + var values []url.Values + + if opts.Mount.ShouldMount { + values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}}) + } + + u, err := bs.ub.BuildBlobUploadURL(bs.name, values...) + if err != nil { + return nil, err + } + + resp, err := bs.client.Post(u, "", nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusCreated: + desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest()) + if err != nil { + return nil, err + } + return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} + case http.StatusAccepted: + // TODO(dmcgowan): Check for invalid UUID + uuid := resp.Header.Get("Docker-Upload-UUID") + location, err := sanitizeLocation(resp.Header.Get("Location"), u) + if err != nil { + return nil, err + } + + return &httpBlobUpload{ + statter: bs.statter, + client: bs.client, + uuid: uuid, + startedAt: time.Now(), + location: location, + }, nil + default: + return nil, HandleErrorResponse(resp) + } +} + +func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + panic("not implemented") +} + +func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error { + return bs.statter.Clear(ctx, dgst) +} + +type blobStatter struct { + name reference.Named + ub *v2.URLBuilder + client *http.Client +} + +func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + ref, err := reference.WithDigest(bs.name, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + u, err := bs.ub.BuildBlobURL(ref) + if err != nil { + return distribution.Descriptor{}, err + } + + resp, err := bs.client.Head(u) + if err != nil { + return distribution.Descriptor{}, err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + lengthHeader := resp.Header.Get("Content-Length") + if lengthHeader == "" { + return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u) + } + + length, err := strconv.ParseInt(lengthHeader, 10, 64) + if err != nil { + return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err) + } + + return distribution.Descriptor{ + MediaType: resp.Header.Get("Content-Type"), + Size: length, + Digest: dgst, + }, nil + } else if resp.StatusCode == http.StatusNotFound { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + return distribution.Descriptor{}, HandleErrorResponse(resp) +} + +func buildCatalogValues(maxEntries int, last string) url.Values { + values := url.Values{} + + if maxEntries > 0 { + values.Add("n", strconv.Itoa(maxEntries)) + } + + if last != "" { + values.Add("last", last) + } + + return values +} + +func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { + ref, err := reference.WithDigest(bs.name, dgst) + if err != nil { + return err + } + blobURL, err := bs.ub.BuildBlobURL(ref) + if err != nil { + return err + } + + req, err := http.NewRequest("DELETE", blobURL, nil) + if err != nil { + return err + } + + resp, err := bs.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + return nil + } + return HandleErrorResponse(resp) +} + +func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + return nil +} diff --git a/vendor/src/github.com/docker/distribution/registry/client/transport/http_reader.go b/vendor/src/github.com/docker/distribution/registry/client/transport/http_reader.go new file mode 100644 index 00000000..e5ff09d7 --- /dev/null +++ b/vendor/src/github.com/docker/distribution/registry/client/transport/http_reader.go @@ -0,0 +1,251 @@ +package transport + +import ( + "errors" + "fmt" + "io" + "net/http" + "os" + "regexp" + "strconv" +) + +var ( + contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`) + + // ErrWrongCodeForByteRange is returned if the client sends a request + // with a Range header but the server returns a 2xx or 3xx code other + // than 206 Partial Content. + ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request") +) + +// ReadSeekCloser combines io.ReadSeeker with io.Closer. +type ReadSeekCloser interface { + io.ReadSeeker + io.Closer +} + +// NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET +// request. When seeking and starting a read from a non-zero offset +// the a "Range" header will be added which sets the offset. +// TODO(dmcgowan): Move this into a separate utility package +func NewHTTPReadSeeker(client *http.Client, url string, errorHandler func(*http.Response) error) ReadSeekCloser { + return &httpReadSeeker{ + client: client, + url: url, + errorHandler: errorHandler, + } +} + +type httpReadSeeker struct { + client *http.Client + url string + + // errorHandler creates an error from an unsuccessful HTTP response. + // This allows the error to be created with the HTTP response body + // without leaking the body through a returned error. + errorHandler func(*http.Response) error + + size int64 + + // rc is the remote read closer. + rc io.ReadCloser + // readerOffset tracks the offset as of the last read. + readerOffset int64 + // seekOffset allows Seek to override the offset. Seek changes + // seekOffset instead of changing readOffset directly so that + // connection resets can be delayed and possibly avoided if the + // seek is undone (i.e. seeking to the end and then back to the + // beginning). + seekOffset int64 + err error +} + +func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { + if hrs.err != nil { + return 0, hrs.err + } + + // If we sought to a different position, we need to reset the + // connection. This logic is here instead of Seek so that if + // a seek is undone before the next read, the connection doesn't + // need to be closed and reopened. A common example of this is + // seeking to the end to determine the length, and then seeking + // back to the original position. + if hrs.readerOffset != hrs.seekOffset { + hrs.reset() + } + + hrs.readerOffset = hrs.seekOffset + + rd, err := hrs.reader() + if err != nil { + return 0, err + } + + n, err = rd.Read(p) + hrs.seekOffset += int64(n) + hrs.readerOffset += int64(n) + + return n, err +} + +func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { + if hrs.err != nil { + return 0, hrs.err + } + + lastReaderOffset := hrs.readerOffset + + if whence == os.SEEK_SET && hrs.rc == nil { + // If no request has been made yet, and we are seeking to an + // absolute position, set the read offset as well to avoid an + // unnecessary request. + hrs.readerOffset = offset + } + + _, err := hrs.reader() + if err != nil { + hrs.readerOffset = lastReaderOffset + return 0, err + } + + newOffset := hrs.seekOffset + + switch whence { + case os.SEEK_CUR: + newOffset += offset + case os.SEEK_END: + if hrs.size < 0 { + return 0, errors.New("content length not known") + } + newOffset = hrs.size + offset + case os.SEEK_SET: + newOffset = offset + } + + if newOffset < 0 { + err = errors.New("cannot seek to negative position") + } else { + hrs.seekOffset = newOffset + } + + return hrs.seekOffset, err +} + +func (hrs *httpReadSeeker) Close() error { + if hrs.err != nil { + return hrs.err + } + + // close and release reader chain + if hrs.rc != nil { + hrs.rc.Close() + } + + hrs.rc = nil + + hrs.err = errors.New("httpLayer: closed") + + return nil +} + +func (hrs *httpReadSeeker) reset() { + if hrs.err != nil { + return + } + if hrs.rc != nil { + hrs.rc.Close() + hrs.rc = nil + } +} + +func (hrs *httpReadSeeker) reader() (io.Reader, error) { + if hrs.err != nil { + return nil, hrs.err + } + + if hrs.rc != nil { + return hrs.rc, nil + } + + req, err := http.NewRequest("GET", hrs.url, nil) + if err != nil { + return nil, err + } + + if hrs.readerOffset > 0 { + // If we are at different offset, issue a range request from there. + req.Header.Add("Range", fmt.Sprintf("bytes=%d-", hrs.readerOffset)) + // TODO: get context in here + // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range")) + } + + req.Header.Add("Accept-Encoding", "identity") + resp, err := hrs.client.Do(req) + if err != nil { + return nil, err + } + + // Normally would use client.SuccessStatus, but that would be a cyclic + // import + if resp.StatusCode >= 200 && resp.StatusCode <= 399 { + if hrs.readerOffset > 0 { + if resp.StatusCode != http.StatusPartialContent { + return nil, ErrWrongCodeForByteRange + } + + contentRange := resp.Header.Get("Content-Range") + if contentRange == "" { + return nil, errors.New("no Content-Range header found in HTTP 206 response") + } + + submatches := contentRangeRegexp.FindStringSubmatch(contentRange) + if len(submatches) < 4 { + return nil, fmt.Errorf("could not parse Content-Range header: %s", contentRange) + } + + startByte, err := strconv.ParseUint(submatches[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("could not parse start of range in Content-Range header: %s", contentRange) + } + + if startByte != uint64(hrs.readerOffset) { + return nil, fmt.Errorf("received Content-Range starting at offset %d instead of requested %d", startByte, hrs.readerOffset) + } + + endByte, err := strconv.ParseUint(submatches[2], 10, 64) + if err != nil { + return nil, fmt.Errorf("could not parse end of range in Content-Range header: %s", contentRange) + } + + if submatches[3] == "*" { + hrs.size = -1 + } else { + size, err := strconv.ParseUint(submatches[3], 10, 64) + if err != nil { + return nil, fmt.Errorf("could not parse total size in Content-Range header: %s", contentRange) + } + + if endByte+1 != size { + return nil, fmt.Errorf("range in Content-Range stops before the end of the content: %s", contentRange) + } + + hrs.size = int64(size) + } + } else if resp.StatusCode == http.StatusOK { + hrs.size = resp.ContentLength + } else { + hrs.size = -1 + } + hrs.rc = resp.Body + } else { + defer resp.Body.Close() + if hrs.errorHandler != nil { + return nil, hrs.errorHandler(resp) + } + return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) + } + + return hrs.rc, nil +} diff --git a/vendor/src/github.com/docker/distribution/registry/client/transport/transport.go b/vendor/src/github.com/docker/distribution/registry/client/transport/transport.go new file mode 100644 index 00000000..30e45fab --- /dev/null +++ b/vendor/src/github.com/docker/distribution/registry/client/transport/transport.go @@ -0,0 +1,147 @@ +package transport + +import ( + "io" + "net/http" + "sync" +) + +// RequestModifier represents an object which will do an inplace +// modification of an HTTP request. +type RequestModifier interface { + ModifyRequest(*http.Request) error +} + +type headerModifier http.Header + +// NewHeaderRequestModifier returns a new RequestModifier which will +// add the given headers to a request. +func NewHeaderRequestModifier(header http.Header) RequestModifier { + return headerModifier(header) +} + +func (h headerModifier) ModifyRequest(req *http.Request) error { + for k, s := range http.Header(h) { + req.Header[k] = append(req.Header[k], s...) + } + + return nil +} + +// NewTransport creates a new transport which will apply modifiers to +// the request on a RoundTrip call. +func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper { + return &transport{ + Modifiers: modifiers, + Base: base, + } +} + +// transport is an http.RoundTripper that makes HTTP requests after +// copying and modifying the request +type transport struct { + Modifiers []RequestModifier + Base http.RoundTripper + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// RoundTrip authorizes and authenticates the request with an +// access token. If no token exists or token is expired, +// tries to refresh/fetch a new token. +func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { + req2 := cloneRequest(req) + for _, modifier := range t.Modifiers { + if err := modifier.ModifyRequest(req2); err != nil { + return nil, err + } + } + + t.setModReq(req, req2) + res, err := t.base().RoundTrip(req2) + if err != nil { + t.setModReq(req, nil) + return nil, err + } + res.Body = &onEOFReader{ + rc: res.Body, + fn: func() { t.setModReq(req, nil) }, + } + return res, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + t.mu.Lock() + modReq := t.modReq[req] + delete(t.modReq, req) + t.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func (t *transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +func (t *transport) setModReq(orig, mod *http.Request) { + t.mu.Lock() + defer t.mu.Unlock() + if t.modReq == nil { + t.modReq = make(map[*http.Request]*http.Request) + } + if mod == nil { + delete(t.modReq, orig) + } else { + t.modReq[orig] = mod + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + + return r2 +} + +type onEOFReader struct { + rc io.ReadCloser + fn func() +} + +func (r *onEOFReader) Read(p []byte) (n int, err error) { + n, err = r.rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +func (r *onEOFReader) Close() error { + err := r.rc.Close() + r.runFunc() + return err +} + +func (r *onEOFReader) runFunc() { + if fn := r.fn; fn != nil { + fn() + r.fn = nil + } +} diff --git a/vendor/src/github.com/docker/distribution/registry/storage/cache/cache.go b/vendor/src/github.com/docker/distribution/registry/storage/cache/cache.go new file mode 100644 index 00000000..10a39091 --- /dev/null +++ b/vendor/src/github.com/docker/distribution/registry/storage/cache/cache.go @@ -0,0 +1,35 @@ +// Package cache provides facilities to speed up access to the storage +// backend. +package cache + +import ( + "fmt" + + "github.com/docker/distribution" +) + +// BlobDescriptorCacheProvider provides repository scoped +// BlobDescriptorService cache instances and a global descriptor cache. +type BlobDescriptorCacheProvider interface { + distribution.BlobDescriptorService + + RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) +} + +// ValidateDescriptor provides a helper function to ensure that caches have +// common criteria for admitting descriptors. +func ValidateDescriptor(desc distribution.Descriptor) error { + if err := desc.Digest.Validate(); err != nil { + return err + } + + if desc.Size < 0 { + return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size) + } + + if desc.MediaType == "" { + return fmt.Errorf("cache: empty mediatype on descriptor: %v", desc) + } + + return nil +} diff --git a/vendor/src/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go b/vendor/src/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go new file mode 100644 index 00000000..94ca8a90 --- /dev/null +++ b/vendor/src/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go @@ -0,0 +1,101 @@ +package cache + +import ( + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + + "github.com/docker/distribution" +) + +// Metrics is used to hold metric counters +// related to the number of times a cache was +// hit or missed. +type Metrics struct { + Requests uint64 + Hits uint64 + Misses uint64 +} + +// MetricsTracker represents a metric tracker +// which simply counts the number of hits and misses. +type MetricsTracker interface { + Hit() + Miss() + Metrics() Metrics +} + +type cachedBlobStatter struct { + cache distribution.BlobDescriptorService + backend distribution.BlobDescriptorService + tracker MetricsTracker +} + +// NewCachedBlobStatter creates a new statter which prefers a cache and +// falls back to a backend. +func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService { + return &cachedBlobStatter{ + cache: cache, + backend: backend, + } +} + +// NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and +// falls back to a backend. Hits and misses will send to the tracker. +func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService, tracker MetricsTracker) distribution.BlobStatter { + return &cachedBlobStatter{ + cache: cache, + backend: backend, + tracker: tracker, + } +} + +func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + desc, err := cbds.cache.Stat(ctx, dgst) + if err != nil { + if err != distribution.ErrBlobUnknown { + context.GetLogger(ctx).Errorf("error retrieving descriptor from cache: %v", err) + } + + goto fallback + } + + if cbds.tracker != nil { + cbds.tracker.Hit() + } + return desc, nil +fallback: + if cbds.tracker != nil { + cbds.tracker.Miss() + } + desc, err = cbds.backend.Stat(ctx, dgst) + if err != nil { + return desc, err + } + + if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { + context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) + } + + return desc, err + +} + +func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error { + err := cbds.cache.Clear(ctx, dgst) + if err != nil { + return err + } + + err = cbds.backend.Clear(ctx, dgst) + if err != nil { + return err + } + return nil +} + +func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { + context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) + } + return nil +} diff --git a/vendor/src/github.com/docker/distribution/registry/storage/cache/memory/memory.go b/vendor/src/github.com/docker/distribution/registry/storage/cache/memory/memory.go new file mode 100644 index 00000000..cf125e18 --- /dev/null +++ b/vendor/src/github.com/docker/distribution/registry/storage/cache/memory/memory.go @@ -0,0 +1,179 @@ +package memory + +import ( + "sync" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/cache" +) + +type inMemoryBlobDescriptorCacheProvider struct { + global *mapBlobDescriptorCache + repositories map[string]*mapBlobDescriptorCache + mu sync.RWMutex +} + +// NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for +// storing blob descriptor data. +func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider { + return &inMemoryBlobDescriptorCacheProvider{ + global: newMapBlobDescriptorCache(), + repositories: make(map[string]*mapBlobDescriptorCache), + } +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { + if _, err := reference.ParseNamed(repo); err != nil { + return nil, err + } + + imbdcp.mu.RLock() + defer imbdcp.mu.RUnlock() + + return &repositoryScopedInMemoryBlobDescriptorCache{ + repo: repo, + parent: imbdcp, + repository: imbdcp.repositories[repo], + }, nil +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return imbdcp.global.Stat(ctx, dgst) +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error { + return imbdcp.global.Clear(ctx, dgst) +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + _, err := imbdcp.Stat(ctx, dgst) + if err == distribution.ErrBlobUnknown { + + if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest { + // if the digests differ, set the other canonical mapping + if err := imbdcp.global.SetDescriptor(ctx, desc.Digest, desc); err != nil { + return err + } + } + + // unknown, just set it + return imbdcp.global.SetDescriptor(ctx, dgst, desc) + } + + // we already know it, do nothing + return err +} + +// repositoryScopedInMemoryBlobDescriptorCache provides the request scoped +// repository cache. Instances are not thread-safe but the delegated +// operations are. +type repositoryScopedInMemoryBlobDescriptorCache struct { + repo string + parent *inMemoryBlobDescriptorCacheProvider // allows lazy allocation of repo's map + repository *mapBlobDescriptorCache +} + +func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + rsimbdcp.parent.mu.Lock() + repo := rsimbdcp.repository + rsimbdcp.parent.mu.Unlock() + + if repo == nil { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + return repo.Stat(ctx, dgst) +} + +func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { + rsimbdcp.parent.mu.Lock() + repo := rsimbdcp.repository + rsimbdcp.parent.mu.Unlock() + + if repo == nil { + return distribution.ErrBlobUnknown + } + + return repo.Clear(ctx, dgst) +} + +func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + rsimbdcp.parent.mu.Lock() + repo := rsimbdcp.repository + if repo == nil { + // allocate map since we are setting it now. + var ok bool + // have to read back value since we may have allocated elsewhere. + repo, ok = rsimbdcp.parent.repositories[rsimbdcp.repo] + if !ok { + repo = newMapBlobDescriptorCache() + rsimbdcp.parent.repositories[rsimbdcp.repo] = repo + } + rsimbdcp.repository = repo + } + rsimbdcp.parent.mu.Unlock() + + if err := repo.SetDescriptor(ctx, dgst, desc); err != nil { + return err + } + + return rsimbdcp.parent.SetDescriptor(ctx, dgst, desc) +} + +// mapBlobDescriptorCache provides a simple map-based implementation of the +// descriptor cache. +type mapBlobDescriptorCache struct { + descriptors map[digest.Digest]distribution.Descriptor + mu sync.RWMutex +} + +var _ distribution.BlobDescriptorService = &mapBlobDescriptorCache{} + +func newMapBlobDescriptorCache() *mapBlobDescriptorCache { + return &mapBlobDescriptorCache{ + descriptors: make(map[digest.Digest]distribution.Descriptor), + } +} + +func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if err := dgst.Validate(); err != nil { + return distribution.Descriptor{}, err + } + + mbdc.mu.RLock() + defer mbdc.mu.RUnlock() + + desc, ok := mbdc.descriptors[dgst] + if !ok { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + return desc, nil +} + +func (mbdc *mapBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { + mbdc.mu.Lock() + defer mbdc.mu.Unlock() + + delete(mbdc.descriptors, dgst) + return nil +} + +func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := dgst.Validate(); err != nil { + return err + } + + if err := cache.ValidateDescriptor(desc); err != nil { + return err + } + + mbdc.mu.Lock() + defer mbdc.mu.Unlock() + + mbdc.descriptors[dgst] = desc + return nil +} diff --git a/vendor/src/github.com/docker/engine-api/LICENSE b/vendor/src/github.com/docker/engine-api/LICENSE new file mode 100644 index 00000000..c157bff9 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/src/github.com/docker/engine-api/client/checkpoint_create.go b/vendor/src/github.com/docker/engine-api/client/checkpoint_create.go new file mode 100644 index 00000000..23883cc0 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/checkpoint_create.go @@ -0,0 +1,13 @@ +package client + +import ( + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// CheckpointCreate creates a checkpoint from the given container with the given name +func (cli *Client) CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error { + resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/src/github.com/docker/engine-api/client/checkpoint_delete.go b/vendor/src/github.com/docker/engine-api/client/checkpoint_delete.go new file mode 100644 index 00000000..a4e9ed0c --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/checkpoint_delete.go @@ -0,0 +1,12 @@ +package client + +import ( + "golang.org/x/net/context" +) + +// CheckpointDelete deletes the checkpoint with the given name from the given container +func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, checkpointID string) error { + resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+checkpointID, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/src/github.com/docker/engine-api/client/checkpoint_list.go b/vendor/src/github.com/docker/engine-api/client/checkpoint_list.go new file mode 100644 index 00000000..ef5ec261 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/checkpoint_list.go @@ -0,0 +1,22 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// CheckpointList returns the volumes configured in the docker host. +func (cli *Client) CheckpointList(ctx context.Context, container string) ([]types.Checkpoint, error) { + var checkpoints []types.Checkpoint + + resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", nil, nil) + if err != nil { + return checkpoints, err + } + + err = json.NewDecoder(resp.body).Decode(&checkpoints) + ensureReaderClosed(resp) + return checkpoints, err +} diff --git a/vendor/src/github.com/docker/engine-api/client/client.go b/vendor/src/github.com/docker/engine-api/client/client.go new file mode 100644 index 00000000..f3ad2cf3 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/client.go @@ -0,0 +1,153 @@ +package client + +import ( + "fmt" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + + "github.com/docker/engine-api/client/transport" + "github.com/docker/go-connections/tlsconfig" +) + +// DefaultVersion is the version of the current stable API +const DefaultVersion string = "1.23" + +// Client is the API client that performs all operations +// against a docker server. +type Client struct { + // proto holds the client protocol i.e. unix. + proto string + // addr holds the client address. + addr string + // basePath holds the path to prepend to the requests. + basePath string + // transport is the interface to send request with, it implements transport.Client. + transport transport.Client + // version of the server to talk to. + version string + // custom http headers configured by users. + customHTTPHeaders map[string]string +} + +// NewEnvClient initializes a new API client based on environment variables. +// Use DOCKER_HOST to set the url to the docker server. +// Use DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest. +// Use DOCKER_CERT_PATH to load the tls certificates from. +// Use DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default. +func NewEnvClient() (*Client, error) { + var client *http.Client + if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" { + options := tlsconfig.Options{ + CAFile: filepath.Join(dockerCertPath, "ca.pem"), + CertFile: filepath.Join(dockerCertPath, "cert.pem"), + KeyFile: filepath.Join(dockerCertPath, "key.pem"), + InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "", + } + tlsc, err := tlsconfig.Client(options) + if err != nil { + return nil, err + } + + client = &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsc, + }, + } + } + + host := os.Getenv("DOCKER_HOST") + if host == "" { + host = DefaultDockerHost + } + + version := os.Getenv("DOCKER_API_VERSION") + if version == "" { + version = DefaultVersion + } + + return NewClient(host, version, client, nil) +} + +// NewClient initializes a new API client for the given host and API version. +// It uses the given http client as transport. +// It also initializes the custom http headers to add to each request. +// +// It won't send any version information if the version number is empty. It is +// highly recommended that you set a version or your client may break if the +// server is upgraded. +func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { + proto, addr, basePath, err := ParseHost(host) + if err != nil { + return nil, err + } + + transport, err := transport.NewTransportWithHTTP(proto, addr, client) + if err != nil { + return nil, err + } + + return &Client{ + proto: proto, + addr: addr, + basePath: basePath, + transport: transport, + version: version, + customHTTPHeaders: httpHeaders, + }, nil +} + +// getAPIPath returns the versioned request path to call the api. +// It appends the query parameters to the path if they are not empty. +func (cli *Client) getAPIPath(p string, query url.Values) string { + var apiPath string + if cli.version != "" { + v := strings.TrimPrefix(cli.version, "v") + apiPath = fmt.Sprintf("%s/v%s%s", cli.basePath, v, p) + } else { + apiPath = fmt.Sprintf("%s%s", cli.basePath, p) + } + + u := &url.URL{ + Path: apiPath, + } + if len(query) > 0 { + u.RawQuery = query.Encode() + } + return u.String() +} + +// ClientVersion returns the version string associated with this +// instance of the Client. Note that this value can be changed +// via the DOCKER_API_VERSION env var. +func (cli *Client) ClientVersion() string { + return cli.version +} + +// UpdateClientVersion updates the version string associated with this +// instance of the Client. +func (cli *Client) UpdateClientVersion(v string) { + cli.version = v +} + +// ParseHost verifies that the given host strings is valid. +func ParseHost(host string) (string, string, string, error) { + protoAddrParts := strings.SplitN(host, "://", 2) + if len(protoAddrParts) == 1 { + return "", "", "", fmt.Errorf("unable to parse docker host `%s`", host) + } + + var basePath string + proto, addr := protoAddrParts[0], protoAddrParts[1] + if proto == "tcp" { + parsed, err := url.Parse("tcp://" + addr) + if err != nil { + return "", "", "", err + } + addr = parsed.Host + basePath = parsed.Path + } + return proto, addr, basePath, nil +} diff --git a/vendor/src/github.com/docker/engine-api/client/client_darwin.go b/vendor/src/github.com/docker/engine-api/client/client_darwin.go new file mode 100644 index 00000000..4b47a178 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/client_darwin.go @@ -0,0 +1,4 @@ +package client + +// DefaultDockerHost defines os specific default if DOCKER_HOST is unset +const DefaultDockerHost = "tcp://127.0.0.1:2375" diff --git a/vendor/src/github.com/docker/engine-api/client/client_unix.go b/vendor/src/github.com/docker/engine-api/client/client_unix.go new file mode 100644 index 00000000..572c5f87 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/client_unix.go @@ -0,0 +1,6 @@ +// +build linux freebsd solaris openbsd + +package client + +// DefaultDockerHost defines os specific default if DOCKER_HOST is unset +const DefaultDockerHost = "unix:///var/run/docker.sock" diff --git a/vendor/src/github.com/docker/engine-api/client/client_windows.go b/vendor/src/github.com/docker/engine-api/client/client_windows.go new file mode 100644 index 00000000..07c0c7a7 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/client_windows.go @@ -0,0 +1,4 @@ +package client + +// DefaultDockerHost defines os specific default if DOCKER_HOST is unset +const DefaultDockerHost = "npipe:////./pipe/docker_engine" diff --git a/vendor/src/github.com/docker/engine-api/client/container_attach.go b/vendor/src/github.com/docker/engine-api/client/container_attach.go new file mode 100644 index 00000000..1b616bf0 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/container_attach.go @@ -0,0 +1,34 @@ +package client + +import ( + "net/url" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ContainerAttach attaches a connection to a container in the server. +// It returns a types.HijackedConnection with the hijacked connection +// and the a reader to get output. It's up to the called to close +// the hijacked connection by calling types.HijackedResponse.Close. +func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) { + query := url.Values{} + if options.Stream { + query.Set("stream", "1") + } + if options.Stdin { + query.Set("stdin", "1") + } + if options.Stdout { + query.Set("stdout", "1") + } + if options.Stderr { + query.Set("stderr", "1") + } + if options.DetachKeys != "" { + query.Set("detachKeys", options.DetachKeys) + } + + headers := map[string][]string{"Content-Type": {"text/plain"}} + return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers) +} diff --git a/vendor/src/github.com/docker/engine-api/client/container_commit.go b/vendor/src/github.com/docker/engine-api/client/container_commit.go new file mode 100644 index 00000000..d5c47499 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/container_commit.go @@ -0,0 +1,53 @@ +package client + +import ( + "encoding/json" + "errors" + "net/url" + + distreference "github.com/docker/distribution/reference" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/reference" + "golang.org/x/net/context" +) + +// ContainerCommit applies changes into a container and creates a new tagged image. +func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.ContainerCommitResponse, error) { + var repository, tag string + if options.Reference != "" { + distributionRef, err := distreference.ParseNamed(options.Reference) + if err != nil { + return types.ContainerCommitResponse{}, err + } + + if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { + return types.ContainerCommitResponse{}, errors.New("refusing to create a tag with a digest reference") + } + + tag = reference.GetTagFromNamedRef(distributionRef) + repository = distributionRef.Name() + } + + query := url.Values{} + query.Set("container", container) + query.Set("repo", repository) + query.Set("tag", tag) + query.Set("comment", options.Comment) + query.Set("author", options.Author) + for _, change := range options.Changes { + query.Add("changes", change) + } + if options.Pause != true { + query.Set("pause", "0") + } + + var response types.ContainerCommitResponse + resp, err := cli.post(ctx, "/commit", query, options.Config, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/src/github.com/docker/engine-api/client/container_copy.go b/vendor/src/github.com/docker/engine-api/client/container_copy.go new file mode 100644 index 00000000..d3dd0b11 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/container_copy.go @@ -0,0 +1,97 @@ +package client + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "path/filepath" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/engine-api/types" +) + +// ContainerStatPath returns Stat information about a path inside the container filesystem. +func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) { + query := url.Values{} + query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. + + urlStr := fmt.Sprintf("/containers/%s/archive", containerID) + response, err := cli.head(ctx, urlStr, query, nil) + if err != nil { + return types.ContainerPathStat{}, err + } + defer ensureReaderClosed(response) + return getContainerPathStatFromHeader(response.header) +} + +// CopyToContainer copies content into the container filesystem. +func (cli *Client) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error { + query := url.Values{} + query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. + // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. + if !options.AllowOverwriteDirWithFile { + query.Set("noOverwriteDirNonDir", "true") + } + + apiPath := fmt.Sprintf("/containers/%s/archive", container) + + response, err := cli.putRaw(ctx, apiPath, query, content, nil) + if err != nil { + return err + } + defer ensureReaderClosed(response) + + if response.statusCode != http.StatusOK { + return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + } + + return nil +} + +// CopyFromContainer gets the content from the container and returns it as a Reader +// to manipulate it in the host. It's up to the caller to close the reader. +func (cli *Client) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { + query := make(url.Values, 1) + query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. + + apiPath := fmt.Sprintf("/containers/%s/archive", container) + response, err := cli.get(ctx, apiPath, query, nil) + if err != nil { + return nil, types.ContainerPathStat{}, err + } + + if response.statusCode != http.StatusOK { + return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + } + + // In order to get the copy behavior right, we need to know information + // about both the source and the destination. The response headers include + // stat info about the source that we can use in deciding exactly how to + // copy it locally. Along with the stat info about the local destination, + // we have everything we need to handle the multiple possibilities there + // can be when copying a file/dir from one location to another file/dir. + stat, err := getContainerPathStatFromHeader(response.header) + if err != nil { + return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err) + } + return response.body, stat, err +} + +func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) { + var stat types.ContainerPathStat + + encodedStat := header.Get("X-Docker-Container-Path-Stat") + statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat)) + + err := json.NewDecoder(statDecoder).Decode(&stat) + if err != nil { + err = fmt.Errorf("unable to decode container path stat header: %s", err) + } + + return stat, err +} diff --git a/vendor/src/github.com/docker/engine-api/client/container_create.go b/vendor/src/github.com/docker/engine-api/client/container_create.go new file mode 100644 index 00000000..98935794 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/container_create.go @@ -0,0 +1,46 @@ +package client + +import ( + "encoding/json" + "net/url" + "strings" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/network" + "golang.org/x/net/context" +) + +type configWrapper struct { + *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig +} + +// ContainerCreate creates a new container based in the given configuration. +// It can be associated with a name, but it's not mandatory. +func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (types.ContainerCreateResponse, error) { + var response types.ContainerCreateResponse + query := url.Values{} + if containerName != "" { + query.Set("name", containerName) + } + + body := configWrapper{ + Config: config, + HostConfig: hostConfig, + NetworkingConfig: networkingConfig, + } + + serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) + if err != nil { + if serverResp != nil && serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") { + return response, imageNotFoundError{config.Image} + } + return response, err + } + + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/src/github.com/docker/engine-api/client/container_diff.go b/vendor/src/github.com/docker/engine-api/client/container_diff.go new file mode 100644 index 00000000..f4bb3a46 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/container_diff.go @@ -0,0 +1,23 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ContainerDiff shows differences in a container filesystem since it was started. +func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]types.ContainerChange, error) { + var changes []types.ContainerChange + + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) + if err != nil { + return changes, err + } + + err = json.NewDecoder(serverResp.body).Decode(&changes) + ensureReaderClosed(serverResp) + return changes, err +} diff --git a/vendor/src/github.com/docker/engine-api/client/container_exec.go b/vendor/src/github.com/docker/engine-api/client/container_exec.go new file mode 100644 index 00000000..ff7e1a9d --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/container_exec.go @@ -0,0 +1,49 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ContainerExecCreate creates a new exec configuration to run an exec process. +func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.ContainerExecCreateResponse, error) { + var response types.ContainerExecCreateResponse + resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil) + if err != nil { + return response, err + } + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} + +// ContainerExecStart starts an exec process already created in the docker host. +func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error { + resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil) + ensureReaderClosed(resp) + return err +} + +// ContainerExecAttach attaches a connection to an exec process in the server. +// It returns a types.HijackedConnection with the hijacked connection +// and the a reader to get output. It's up to the called to close +// the hijacked connection by calling types.HijackedResponse.Close. +func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) { + headers := map[string][]string{"Content-Type": {"application/json"}} + return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers) +} + +// ContainerExecInspect returns information about a specific exec process on the docker host. +func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) { + var response types.ContainerExecInspect + resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/src/github.com/docker/engine-api/client/container_export.go b/vendor/src/github.com/docker/engine-api/client/container_export.go new file mode 100644 index 00000000..52194f3d --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/container_export.go @@ -0,0 +1,20 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" +) + +// ContainerExport retrieves the raw contents of a container +// and returns them as an io.ReadCloser. It's up to the caller +// to close the stream. +func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) { + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) + if err != nil { + return nil, err + } + + return serverResp.body, nil +} diff --git a/vendor/src/github.com/docker/engine-api/client/container_inspect.go b/vendor/src/github.com/docker/engine-api/client/container_inspect.go new file mode 100644 index 00000000..0fa096d3 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/container_inspect.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ContainerInspect returns the container information. +func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return types.ContainerJSON{}, containerNotFoundError{containerID} + } + return types.ContainerJSON{}, err + } + + var response types.ContainerJSON + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} + +// ContainerInspectWithRaw returns the container information and its raw representation. +func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) { + query := url.Values{} + if getSize { + query.Set("size", "1") + } + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return types.ContainerJSON{}, nil, containerNotFoundError{containerID} + } + return types.ContainerJSON{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return types.ContainerJSON{}, nil, err + } + + var response types.ContainerJSON + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/src/github.com/docker/engine-api/client/container_kill.go b/vendor/src/github.com/docker/engine-api/client/container_kill.go new file mode 100644 index 00000000..29f80c73 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/container_kill.go @@ -0,0 +1,17 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" +) + +// ContainerKill terminates the container process but does not remove the container from the docker host. +func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error { + query := url.Values{} + query.Set("signal", signal) + + resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/src/github.com/docker/engine-api/client/container_list.go b/vendor/src/github.com/docker/engine-api/client/container_list.go new file mode 100644 index 00000000..87f7333d --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/container_list.go @@ -0,0 +1,56 @@ +package client + +import ( + "encoding/json" + "net/url" + "strconv" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" + "golang.org/x/net/context" +) + +// ContainerList returns the list of containers in the docker host. +func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { + query := url.Values{} + + if options.All { + query.Set("all", "1") + } + + if options.Limit != -1 { + query.Set("limit", strconv.Itoa(options.Limit)) + } + + if options.Since != "" { + query.Set("since", options.Since) + } + + if options.Before != "" { + query.Set("before", options.Before) + } + + if options.Size { + query.Set("size", "1") + } + + if options.Filter.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filter) + + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/containers/json", query, nil) + if err != nil { + return nil, err + } + + var containers []types.Container + err = json.NewDecoder(resp.body).Decode(&containers) + ensureReaderClosed(resp) + return containers, err +} diff --git a/vendor/src/github.com/docker/engine-api/client/container_logs.go b/vendor/src/github.com/docker/engine-api/client/container_logs.go new file mode 100644 index 00000000..08b9b918 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/container_logs.go @@ -0,0 +1,52 @@ +package client + +import ( + "io" + "net/url" + "time" + + "golang.org/x/net/context" + + "github.com/docker/engine-api/types" + timetypes "github.com/docker/engine-api/types/time" +) + +// ContainerLogs returns the logs generated by a container in an io.ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/src/github.com/docker/engine-api/client/container_pause.go b/vendor/src/github.com/docker/engine-api/client/container_pause.go new file mode 100644 index 00000000..412067a7 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/container_pause.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// ContainerPause pauses the main process of a given container without terminating it. +func (cli *Client) ContainerPause(ctx context.Context, containerID string) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/src/github.com/docker/engine-api/client/container_remove.go b/vendor/src/github.com/docker/engine-api/client/container_remove.go new file mode 100644 index 00000000..cef4b812 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/container_remove.go @@ -0,0 +1,27 @@ +package client + +import ( + "net/url" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ContainerRemove kills and removes a container from the docker host. +func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error { + query := url.Values{} + if options.RemoveVolumes { + query.Set("v", "1") + } + if options.RemoveLinks { + query.Set("link", "1") + } + + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/src/github.com/docker/engine-api/client/container_rename.go b/vendor/src/github.com/docker/engine-api/client/container_rename.go new file mode 100644 index 00000000..0e718da7 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/container_rename.go @@ -0,0 +1,16 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" +) + +// ContainerRename changes the name of a given container. +func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error { + query := url.Values{} + query.Set("name", newContainerName) + resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/src/github.com/docker/engine-api/client/container_resize.go b/vendor/src/github.com/docker/engine-api/client/container_resize.go new file mode 100644 index 00000000..b95d26b3 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/container_resize.go @@ -0,0 +1,29 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ContainerResize changes the size of the tty for a container. +func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error { + return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width) +} + +// ContainerExecResize changes the size of the tty for an exec process running inside a container. +func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error { + return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width) +} + +func (cli *Client) resize(ctx context.Context, basePath string, height, width int) error { + query := url.Values{} + query.Set("h", strconv.Itoa(height)) + query.Set("w", strconv.Itoa(width)) + + resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/src/github.com/docker/engine-api/client/container_restart.go b/vendor/src/github.com/docker/engine-api/client/container_restart.go new file mode 100644 index 00000000..93c042d0 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/container_restart.go @@ -0,0 +1,22 @@ +package client + +import ( + "net/url" + "time" + + timetypes "github.com/docker/engine-api/types/time" + "golang.org/x/net/context" +) + +// ContainerRestart stops and starts a container again. +// It makes the daemon to wait for the container to be up again for +// a specific amount of time, given the timeout. +func (cli *Client) ContainerRestart(ctx context.Context, containerID string, timeout *time.Duration) error { + query := url.Values{} + if timeout != nil { + query.Set("t", timetypes.DurationToSecondsString(*timeout)) + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/src/github.com/docker/engine-api/client/container_start.go b/vendor/src/github.com/docker/engine-api/client/container_start.go new file mode 100644 index 00000000..1e22eec6 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/container_start.go @@ -0,0 +1,21 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/engine-api/types" +) + +// ContainerStart sends a request to the docker daemon to start a container. +func (cli *Client) ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error { + query := url.Values{} + if len(options.CheckpointID) != 0 { + query.Set("checkpoint", options.CheckpointID) + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/src/github.com/docker/engine-api/client/container_stats.go b/vendor/src/github.com/docker/engine-api/client/container_stats.go new file mode 100644 index 00000000..2cc67c3a --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/container_stats.go @@ -0,0 +1,24 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" +) + +// ContainerStats returns near realtime stats for a given container. +// It's up to the caller to close the io.ReadCloser returned. +func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (io.ReadCloser, error) { + query := url.Values{} + query.Set("stream", "0") + if stream { + query.Set("stream", "1") + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) + if err != nil { + return nil, err + } + return resp.body, err +} diff --git a/vendor/src/github.com/docker/engine-api/client/container_stop.go b/vendor/src/github.com/docker/engine-api/client/container_stop.go new file mode 100644 index 00000000..1fc577f2 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/container_stop.go @@ -0,0 +1,21 @@ +package client + +import ( + "net/url" + "time" + + timetypes "github.com/docker/engine-api/types/time" + "golang.org/x/net/context" +) + +// ContainerStop stops a container without terminating the process. +// The process is blocked until the container stops or the timeout expires. +func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error { + query := url.Values{} + if timeout != nil { + query.Set("t", timetypes.DurationToSecondsString(*timeout)) + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/src/github.com/docker/engine-api/client/container_top.go b/vendor/src/github.com/docker/engine-api/client/container_top.go new file mode 100644 index 00000000..5ad926ae --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/container_top.go @@ -0,0 +1,28 @@ +package client + +import ( + "encoding/json" + "net/url" + "strings" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ContainerTop shows process information from within a container. +func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (types.ContainerProcessList, error) { + var response types.ContainerProcessList + query := url.Values{} + if len(arguments) > 0 { + query.Set("ps_args", strings.Join(arguments, " ")) + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/src/github.com/docker/engine-api/client/container_unpause.go b/vendor/src/github.com/docker/engine-api/client/container_unpause.go new file mode 100644 index 00000000..5c762112 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/container_unpause.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// ContainerUnpause resumes the process execution within a container +func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/src/github.com/docker/engine-api/client/container_update.go b/vendor/src/github.com/docker/engine-api/client/container_update.go new file mode 100644 index 00000000..a5a1826d --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/container_update.go @@ -0,0 +1,13 @@ +package client + +import ( + "github.com/docker/engine-api/types/container" + "golang.org/x/net/context" +) + +// ContainerUpdate updates resources of a container +func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/src/github.com/docker/engine-api/client/container_wait.go b/vendor/src/github.com/docker/engine-api/client/container_wait.go new file mode 100644 index 00000000..c26ff3f3 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/container_wait.go @@ -0,0 +1,26 @@ +package client + +import ( + "encoding/json" + + "golang.org/x/net/context" + + "github.com/docker/engine-api/types" +) + +// ContainerWait pauses execution until a container exits. +// It returns the API status code as response of its readiness. +func (cli *Client) ContainerWait(ctx context.Context, containerID string) (int, error) { + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) + if err != nil { + return -1, err + } + defer ensureReaderClosed(resp) + + var res types.ContainerWaitResponse + if err := json.NewDecoder(resp.body).Decode(&res); err != nil { + return -1, err + } + + return res.StatusCode, nil +} diff --git a/vendor/src/github.com/docker/engine-api/client/errors.go b/vendor/src/github.com/docker/engine-api/client/errors.go new file mode 100644 index 00000000..e026320b --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/errors.go @@ -0,0 +1,203 @@ +package client + +import ( + "errors" + "fmt" +) + +// ErrConnectionFailed is an error raised when the connection between the client and the server failed. +var ErrConnectionFailed = errors.New("Cannot connect to the Docker daemon. Is the docker daemon running on this host?") + +type notFound interface { + error + NotFound() bool // Is the error a NotFound error +} + +// IsErrNotFound returns true if the error is caused with an +// object (image, container, network, volume, …) is not found in the docker host. +func IsErrNotFound(err error) bool { + te, ok := err.(notFound) + return ok && te.NotFound() +} + +// imageNotFoundError implements an error returned when an image is not in the docker host. +type imageNotFoundError struct { + imageID string +} + +// NoFound indicates that this error type is of NotFound +func (e imageNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of an imageNotFoundError +func (e imageNotFoundError) Error() string { + return fmt.Sprintf("Error: No such image: %s", e.imageID) +} + +// IsErrImageNotFound returns true if the error is caused +// when an image is not found in the docker host. +func IsErrImageNotFound(err error) bool { + return IsErrNotFound(err) +} + +// containerNotFoundError implements an error returned when a container is not in the docker host. +type containerNotFoundError struct { + containerID string +} + +// NoFound indicates that this error type is of NotFound +func (e containerNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of a containerNotFoundError +func (e containerNotFoundError) Error() string { + return fmt.Sprintf("Error: No such container: %s", e.containerID) +} + +// IsErrContainerNotFound returns true if the error is caused +// when a container is not found in the docker host. +func IsErrContainerNotFound(err error) bool { + return IsErrNotFound(err) +} + +// networkNotFoundError implements an error returned when a network is not in the docker host. +type networkNotFoundError struct { + networkID string +} + +// NoFound indicates that this error type is of NotFound +func (e networkNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of a networkNotFoundError +func (e networkNotFoundError) Error() string { + return fmt.Sprintf("Error: No such network: %s", e.networkID) +} + +// IsErrNetworkNotFound returns true if the error is caused +// when a network is not found in the docker host. +func IsErrNetworkNotFound(err error) bool { + return IsErrNotFound(err) +} + +// volumeNotFoundError implements an error returned when a volume is not in the docker host. +type volumeNotFoundError struct { + volumeID string +} + +// NoFound indicates that this error type is of NotFound +func (e volumeNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of a networkNotFoundError +func (e volumeNotFoundError) Error() string { + return fmt.Sprintf("Error: No such volume: %s", e.volumeID) +} + +// IsErrVolumeNotFound returns true if the error is caused +// when a volume is not found in the docker host. +func IsErrVolumeNotFound(err error) bool { + return IsErrNotFound(err) +} + +// unauthorizedError represents an authorization error in a remote registry. +type unauthorizedError struct { + cause error +} + +// Error returns a string representation of an unauthorizedError +func (u unauthorizedError) Error() string { + return u.cause.Error() +} + +// IsErrUnauthorized returns true if the error is caused +// when a remote registry authentication fails +func IsErrUnauthorized(err error) bool { + _, ok := err.(unauthorizedError) + return ok +} + +// nodeNotFoundError implements an error returned when a node is not found. +type nodeNotFoundError struct { + nodeID string +} + +// Error returns a string representation of a nodeNotFoundError +func (e nodeNotFoundError) Error() string { + return fmt.Sprintf("Error: No such node: %s", e.nodeID) +} + +// NoFound indicates that this error type is of NotFound +func (e nodeNotFoundError) NotFound() bool { + return true +} + +// IsErrNodeNotFound returns true if the error is caused +// when a node is not found. +func IsErrNodeNotFound(err error) bool { + _, ok := err.(nodeNotFoundError) + return ok +} + +// serviceNotFoundError implements an error returned when a service is not found. +type serviceNotFoundError struct { + serviceID string +} + +// Error returns a string representation of a serviceNotFoundError +func (e serviceNotFoundError) Error() string { + return fmt.Sprintf("Error: No such service: %s", e.serviceID) +} + +// NoFound indicates that this error type is of NotFound +func (e serviceNotFoundError) NotFound() bool { + return true +} + +// IsErrServiceNotFound returns true if the error is caused +// when a service is not found. +func IsErrServiceNotFound(err error) bool { + _, ok := err.(serviceNotFoundError) + return ok +} + +// taskNotFoundError implements an error returned when a task is not found. +type taskNotFoundError struct { + taskID string +} + +// Error returns a string representation of a taskNotFoundError +func (e taskNotFoundError) Error() string { + return fmt.Sprintf("Error: No such task: %s", e.taskID) +} + +// NoFound indicates that this error type is of NotFound +func (e taskNotFoundError) NotFound() bool { + return true +} + +// IsErrTaskNotFound returns true if the error is caused +// when a task is not found. +func IsErrTaskNotFound(err error) bool { + _, ok := err.(taskNotFoundError) + return ok +} + +type pluginPermissionDenied struct { + name string +} + +func (e pluginPermissionDenied) Error() string { + return "Permission denied while installing plugin " + e.name +} + +// IsErrPluginPermissionDenied returns true if the error is caused +// when a user denies a plugin's permissions +func IsErrPluginPermissionDenied(err error) bool { + _, ok := err.(pluginPermissionDenied) + return ok +} diff --git a/vendor/src/github.com/docker/engine-api/client/events.go b/vendor/src/github.com/docker/engine-api/client/events.go new file mode 100644 index 00000000..f22a18e1 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/events.go @@ -0,0 +1,48 @@ +package client + +import ( + "io" + "net/url" + "time" + + "golang.org/x/net/context" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" + timetypes "github.com/docker/engine-api/types/time" +) + +// Events returns a stream of events in the daemon in a ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (io.ReadCloser, error) { + query := url.Values{} + ref := time.Now() + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, ref) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + if options.Until != "" { + ts, err := timetypes.GetTimestamp(options.Until, ref) + if err != nil { + return nil, err + } + query.Set("until", ts) + } + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) + if err != nil { + return nil, err + } + query.Set("filters", filterJSON) + } + + serverResponse, err := cli.get(ctx, "/events", query, nil) + if err != nil { + return nil, err + } + return serverResponse.body, nil +} diff --git a/vendor/src/github.com/docker/engine-api/client/hijack.go b/vendor/src/github.com/docker/engine-api/client/hijack.go new file mode 100644 index 00000000..dbd91ef6 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/hijack.go @@ -0,0 +1,174 @@ +package client + +import ( + "crypto/tls" + "errors" + "fmt" + "net" + "net/http/httputil" + "net/url" + "strings" + "time" + + "github.com/docker/engine-api/types" + "github.com/docker/go-connections/sockets" + "golang.org/x/net/context" +) + +// tlsClientCon holds tls information and a dialed connection. +type tlsClientCon struct { + *tls.Conn + rawConn net.Conn +} + +func (c *tlsClientCon) CloseWrite() error { + // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it + // on its underlying connection. + if conn, ok := c.rawConn.(types.CloseWriter); ok { + return conn.CloseWrite() + } + return nil +} + +// postHijacked sends a POST request and hijacks the connection. +func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) { + bodyEncoded, err := encodeData(body) + if err != nil { + return types.HijackedResponse{}, err + } + + req, err := cli.newRequest("POST", path, query, bodyEncoded, headers) + if err != nil { + return types.HijackedResponse{}, err + } + req.Host = cli.addr + + req.Header.Set("Connection", "Upgrade") + req.Header.Set("Upgrade", "tcp") + + conn, err := dial(cli.proto, cli.addr, cli.transport.TLSConfig()) + if err != nil { + if strings.Contains(err.Error(), "connection refused") { + return types.HijackedResponse{}, fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") + } + return types.HijackedResponse{}, err + } + + // When we set up a TCP connection for hijack, there could be long periods + // of inactivity (a long running command with no output) that in certain + // network setups may cause ECONNTIMEOUT, leaving the client in an unknown + // state. Setting TCP KeepAlive on the socket connection will prohibit + // ECONNTIMEOUT unless the socket connection truly is broken + if tcpConn, ok := conn.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(30 * time.Second) + } + + clientconn := httputil.NewClientConn(conn, nil) + defer clientconn.Close() + + // Server hijacks the connection, error 'connection closed' expected + _, err = clientconn.Do(req) + + rwc, br := clientconn.Hijack() + + return types.HijackedResponse{Conn: rwc, Reader: br}, err +} + +func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) { + return tlsDialWithDialer(new(net.Dialer), network, addr, config) +} + +// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in +// order to return our custom tlsClientCon struct which holds both the tls.Conn +// object _and_ its underlying raw connection. The rationale for this is that +// we need to be able to close the write end of the connection when attaching, +// which tls.Conn does not provide. +func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) { + // We want the Timeout and Deadline values from dialer to cover the + // whole process: TCP connection and TLS handshake. This means that we + // also need to start our own timers now. + timeout := dialer.Timeout + + if !dialer.Deadline.IsZero() { + deadlineTimeout := dialer.Deadline.Sub(time.Now()) + if timeout == 0 || deadlineTimeout < timeout { + timeout = deadlineTimeout + } + } + + var errChannel chan error + + if timeout != 0 { + errChannel = make(chan error, 2) + time.AfterFunc(timeout, func() { + errChannel <- errors.New("") + }) + } + + proxyDialer, err := sockets.DialerFromEnvironment(dialer) + if err != nil { + return nil, err + } + + rawConn, err := proxyDialer.Dial(network, addr) + if err != nil { + return nil, err + } + // When we set up a TCP connection for hijack, there could be long periods + // of inactivity (a long running command with no output) that in certain + // network setups may cause ECONNTIMEOUT, leaving the client in an unknown + // state. Setting TCP KeepAlive on the socket connection will prohibit + // ECONNTIMEOUT unless the socket connection truly is broken + if tcpConn, ok := rawConn.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(30 * time.Second) + } + + colonPos := strings.LastIndex(addr, ":") + if colonPos == -1 { + colonPos = len(addr) + } + hostname := addr[:colonPos] + + // If no ServerName is set, infer the ServerName + // from the hostname we're connecting to. + if config.ServerName == "" { + // Make a copy to avoid polluting argument or default. + c := *config + c.ServerName = hostname + config = &c + } + + conn := tls.Client(rawConn, config) + + if timeout == 0 { + err = conn.Handshake() + } else { + go func() { + errChannel <- conn.Handshake() + }() + + err = <-errChannel + } + + if err != nil { + rawConn.Close() + return nil, err + } + + // This is Docker difference with standard's crypto/tls package: returned a + // wrapper which holds both the TLS and raw connections. + return &tlsClientCon{conn, rawConn}, nil +} + +func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { + if tlsConfig != nil && proto != "unix" && proto != "npipe" { + // Notice this isn't Go standard's tls.Dial function + return tlsDial(proto, addr, tlsConfig) + } + if proto == "npipe" { + return sockets.DialPipe(addr, 32*time.Second) + } + return net.Dial(proto, addr) +} diff --git a/vendor/src/github.com/docker/engine-api/client/image_build.go b/vendor/src/github.com/docker/engine-api/client/image_build.go new file mode 100644 index 00000000..0ceb88cf --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/image_build.go @@ -0,0 +1,119 @@ +package client + +import ( + "encoding/base64" + "encoding/json" + "io" + "net/http" + "net/url" + "regexp" + "strconv" + + "golang.org/x/net/context" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" +) + +var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) + +// ImageBuild sends request to the daemon to build images. +// The Body in the response implement an io.ReadCloser and it's up to the caller to +// close it. +func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { + query, err := imageBuildOptionsToQuery(options) + if err != nil { + return types.ImageBuildResponse{}, err + } + + headers := http.Header(make(map[string][]string)) + buf, err := json.Marshal(options.AuthConfigs) + if err != nil { + return types.ImageBuildResponse{}, err + } + headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) + headers.Set("Content-Type", "application/tar") + + serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) + if err != nil { + return types.ImageBuildResponse{}, err + } + + osType := getDockerOS(serverResp.header.Get("Server")) + + return types.ImageBuildResponse{ + Body: serverResp.body, + OSType: osType, + }, nil +} + +func imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) { + query := url.Values{ + "t": options.Tags, + } + if options.SuppressOutput { + query.Set("q", "1") + } + if options.RemoteContext != "" { + query.Set("remote", options.RemoteContext) + } + if options.NoCache { + query.Set("nocache", "1") + } + if options.Remove { + query.Set("rm", "1") + } else { + query.Set("rm", "0") + } + + if options.ForceRemove { + query.Set("forcerm", "1") + } + + if options.PullParent { + query.Set("pull", "1") + } + + if !container.Isolation.IsDefault(options.Isolation) { + query.Set("isolation", string(options.Isolation)) + } + + query.Set("cpusetcpus", options.CPUSetCPUs) + query.Set("cpusetmems", options.CPUSetMems) + query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) + query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) + query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) + query.Set("memory", strconv.FormatInt(options.Memory, 10)) + query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) + query.Set("cgroupparent", options.CgroupParent) + query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) + query.Set("dockerfile", options.Dockerfile) + + ulimitsJSON, err := json.Marshal(options.Ulimits) + if err != nil { + return query, err + } + query.Set("ulimits", string(ulimitsJSON)) + + buildArgsJSON, err := json.Marshal(options.BuildArgs) + if err != nil { + return query, err + } + query.Set("buildargs", string(buildArgsJSON)) + + labelsJSON, err := json.Marshal(options.Labels) + if err != nil { + return query, err + } + query.Set("labels", string(labelsJSON)) + return query, nil +} + +func getDockerOS(serverHeader string) string { + var osType string + matches := headerRegexp.FindStringSubmatch(serverHeader) + if len(matches) > 0 { + osType = matches[1] + } + return osType +} diff --git a/vendor/src/github.com/docker/engine-api/client/image_create.go b/vendor/src/github.com/docker/engine-api/client/image_create.go new file mode 100644 index 00000000..6dfc0391 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/image_create.go @@ -0,0 +1,34 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/reference" +) + +// ImageCreate creates a new image based in the parent options. +// It returns the JSON content in the response body. +func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { + repository, tag, err := reference.Parse(parentReference) + if err != nil { + return nil, err + } + + query := url.Values{} + query.Set("fromImage", repository) + query.Set("tag", tag) + resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (*serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/images/create", query, nil, headers) +} diff --git a/vendor/src/github.com/docker/engine-api/client/image_history.go b/vendor/src/github.com/docker/engine-api/client/image_history.go new file mode 100644 index 00000000..b2840b5e --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/image_history.go @@ -0,0 +1,22 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ImageHistory returns the changes in an image in history format. +func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]types.ImageHistory, error) { + var history []types.ImageHistory + serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil) + if err != nil { + return history, err + } + + err = json.NewDecoder(serverResp.body).Decode(&history) + ensureReaderClosed(serverResp) + return history, err +} diff --git a/vendor/src/github.com/docker/engine-api/client/image_import.go b/vendor/src/github.com/docker/engine-api/client/image_import.go new file mode 100644 index 00000000..4e8749a0 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/image_import.go @@ -0,0 +1,37 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/distribution/reference" + "github.com/docker/engine-api/types" +) + +// ImageImport creates a new image based in the source options. +// It returns the JSON content in the response body. +func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { + if ref != "" { + //Check if the given image name can be resolved + if _, err := reference.ParseNamed(ref); err != nil { + return nil, err + } + } + + query := url.Values{} + query.Set("fromSrc", source.SourceName) + query.Set("repo", ref) + query.Set("tag", options.Tag) + query.Set("message", options.Message) + for _, change := range options.Changes { + query.Add("changes", change) + } + + resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/src/github.com/docker/engine-api/client/image_inspect.go b/vendor/src/github.com/docker/engine-api/client/image_inspect.go new file mode 100644 index 00000000..859ba640 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/image_inspect.go @@ -0,0 +1,38 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ImageInspectWithRaw returns the image information and its raw representation. +func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string, getSize bool) (types.ImageInspect, []byte, error) { + query := url.Values{} + if getSize { + query.Set("size", "1") + } + serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", query, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return types.ImageInspect{}, nil, imageNotFoundError{imageID} + } + return types.ImageInspect{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return types.ImageInspect{}, nil, err + } + + var response types.ImageInspect + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/src/github.com/docker/engine-api/client/image_list.go b/vendor/src/github.com/docker/engine-api/client/image_list.go new file mode 100644 index 00000000..74082582 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/image_list.go @@ -0,0 +1,40 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" + "golang.org/x/net/context" +) + +// ImageList returns a list of images in the docker host. +func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.Image, error) { + var images []types.Image + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) + if err != nil { + return images, err + } + query.Set("filters", filterJSON) + } + if options.MatchName != "" { + // FIXME rename this parameter, to not be confused with the filters flag + query.Set("filter", options.MatchName) + } + if options.All { + query.Set("all", "1") + } + + serverResp, err := cli.get(ctx, "/images/json", query, nil) + if err != nil { + return images, err + } + + err = json.NewDecoder(serverResp.body).Decode(&images) + ensureReaderClosed(serverResp) + return images, err +} diff --git a/vendor/src/github.com/docker/engine-api/client/image_load.go b/vendor/src/github.com/docker/engine-api/client/image_load.go new file mode 100644 index 00000000..72f55fdc --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/image_load.go @@ -0,0 +1,30 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/engine-api/types" +) + +// ImageLoad loads an image in the docker host from the client host. +// It's up to the caller to close the io.ReadCloser in the +// ImageLoadResponse returned by this function. +func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) { + v := url.Values{} + v.Set("quiet", "0") + if quiet { + v.Set("quiet", "1") + } + headers := map[string][]string{"Content-Type": {"application/x-tar"}} + resp, err := cli.postRaw(ctx, "/images/load", v, input, headers) + if err != nil { + return types.ImageLoadResponse{}, err + } + return types.ImageLoadResponse{ + Body: resp.body, + JSON: resp.header.Get("Content-Type") == "application/json", + }, nil +} diff --git a/vendor/src/github.com/docker/engine-api/client/image_pull.go b/vendor/src/github.com/docker/engine-api/client/image_pull.go new file mode 100644 index 00000000..e2c49ec5 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/image_pull.go @@ -0,0 +1,46 @@ +package client + +import ( + "io" + "net/http" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/reference" +) + +// ImagePull requests the docker host to pull an image from a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// It's up to the caller to handle the io.ReadCloser and close it properly. +// +// FIXME(vdemeester): there is currently used in a few way in docker/docker +// - if not in trusted content, ref is used to pass the whole reference, and tag is empty +// - if in trusted content, ref is used to pass the reference name, and tag for the digest +func (cli *Client) ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) { + repository, tag, err := reference.Parse(ref) + if err != nil { + return nil, err + } + + query := url.Values{} + query.Set("fromImage", repository) + if tag != "" && !options.All { + query.Set("tag", tag) + } + + resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return nil, privilegeErr + } + resp, err = cli.tryImageCreate(ctx, query, newAuthHeader) + } + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/src/github.com/docker/engine-api/client/image_push.go b/vendor/src/github.com/docker/engine-api/client/image_push.go new file mode 100644 index 00000000..89191ee3 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/image_push.go @@ -0,0 +1,54 @@ +package client + +import ( + "errors" + "io" + "net/http" + "net/url" + + "golang.org/x/net/context" + + distreference "github.com/docker/distribution/reference" + "github.com/docker/engine-api/types" +) + +// ImagePush requests the docker host to push an image to a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// It's up to the caller to handle the io.ReadCloser and close it properly. +func (cli *Client) ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) { + distributionRef, err := distreference.ParseNamed(ref) + if err != nil { + return nil, err + } + + if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { + return nil, errors.New("cannot push a digest reference") + } + + var tag = "" + if nameTaggedRef, isNamedTagged := distributionRef.(distreference.NamedTagged); isNamedTagged { + tag = nameTaggedRef.Tag() + } + + query := url.Values{} + query.Set("tag", tag) + + resp, err := cli.tryImagePush(ctx, distributionRef.Name(), query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return nil, privilegeErr + } + resp, err = cli.tryImagePush(ctx, distributionRef.Name(), query, newAuthHeader) + } + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (*serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/images/"+imageID+"/push", query, nil, headers) +} diff --git a/vendor/src/github.com/docker/engine-api/client/image_remove.go b/vendor/src/github.com/docker/engine-api/client/image_remove.go new file mode 100644 index 00000000..47224326 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/image_remove.go @@ -0,0 +1,31 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ImageRemove removes an image from the docker host. +func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDelete, error) { + query := url.Values{} + + if options.Force { + query.Set("force", "1") + } + if !options.PruneChildren { + query.Set("noprune", "1") + } + + resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) + if err != nil { + return nil, err + } + + var dels []types.ImageDelete + err = json.NewDecoder(resp.body).Decode(&dels) + ensureReaderClosed(resp) + return dels, err +} diff --git a/vendor/src/github.com/docker/engine-api/client/image_save.go b/vendor/src/github.com/docker/engine-api/client/image_save.go new file mode 100644 index 00000000..ecac880a --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/image_save.go @@ -0,0 +1,22 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" +) + +// ImageSave retrieves one or more images from the docker host as an io.ReadCloser. +// It's up to the caller to store the images and close the stream. +func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) { + query := url.Values{ + "names": imageIDs, + } + + resp, err := cli.get(ctx, "/images/get", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/src/github.com/docker/engine-api/client/image_search.go b/vendor/src/github.com/docker/engine-api/client/image_search.go new file mode 100644 index 00000000..3940dfd7 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/image_search.go @@ -0,0 +1,51 @@ +package client + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" + "github.com/docker/engine-api/types/registry" + "golang.org/x/net/context" +) + +// ImageSearch makes the docker host to search by a term in a remote registry. +// The list of results is not sorted in any fashion. +func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) { + var results []registry.SearchResult + query := url.Values{} + query.Set("term", term) + query.Set("limit", fmt.Sprintf("%d", options.Limit)) + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return results, err + } + query.Set("filters", filterJSON) + } + + resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return results, privilegeErr + } + resp, err = cli.tryImageSearch(ctx, query, newAuthHeader) + } + if err != nil { + return results, err + } + + err = json.NewDecoder(resp.body).Decode(&results) + ensureReaderClosed(resp) + return results, err +} + +func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (*serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.get(ctx, "/images/search", query, headers) +} diff --git a/vendor/src/github.com/docker/engine-api/client/image_tag.go b/vendor/src/github.com/docker/engine-api/client/image_tag.go new file mode 100644 index 00000000..71829136 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/image_tag.go @@ -0,0 +1,34 @@ +package client + +import ( + "errors" + "fmt" + "net/url" + + "golang.org/x/net/context" + + distreference "github.com/docker/distribution/reference" + "github.com/docker/engine-api/types/reference" +) + +// ImageTag tags an image in the docker host +func (cli *Client) ImageTag(ctx context.Context, imageID, ref string) error { + distributionRef, err := distreference.ParseNamed(ref) + if err != nil { + return fmt.Errorf("Error parsing reference: %q is not a valid repository/tag", ref) + } + + if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { + return errors.New("refusing to create a tag with a digest reference") + } + + tag := reference.GetTagFromNamedRef(distributionRef) + + query := url.Values{} + query.Set("repo", distributionRef.Name()) + query.Set("tag", tag) + + resp, err := cli.post(ctx, "/images/"+imageID+"/tag", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/src/github.com/docker/engine-api/client/info.go b/vendor/src/github.com/docker/engine-api/client/info.go new file mode 100644 index 00000000..ff0958d6 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/info.go @@ -0,0 +1,26 @@ +package client + +import ( + "encoding/json" + "fmt" + "net/url" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// Info returns information about the docker server. +func (cli *Client) Info(ctx context.Context) (types.Info, error) { + var info types.Info + serverResp, err := cli.get(ctx, "/info", url.Values{}, nil) + if err != nil { + return info, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil { + return info, fmt.Errorf("Error reading remote info: %v", err) + } + + return info, nil +} diff --git a/vendor/src/github.com/docker/engine-api/client/interface.go b/vendor/src/github.com/docker/engine-api/client/interface.go new file mode 100644 index 00000000..2e0491ab --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/interface.go @@ -0,0 +1,135 @@ +package client + +import ( + "io" + "time" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/filters" + "github.com/docker/engine-api/types/network" + "github.com/docker/engine-api/types/registry" + "github.com/docker/engine-api/types/swarm" + "golang.org/x/net/context" +) + +// CommonAPIClient is the common methods between stable and experimental versions of APIClient. +type CommonAPIClient interface { + ContainerAPIClient + ImageAPIClient + NodeAPIClient + NetworkAPIClient + ServiceAPIClient + SwarmAPIClient + SystemAPIClient + VolumeAPIClient + ClientVersion() string + ServerVersion(ctx context.Context) (types.Version, error) + UpdateClientVersion(v string) +} + +// ContainerAPIClient defines API client methods for the containers +type ContainerAPIClient interface { + ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) + ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.ContainerCommitResponse, error) + ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (types.ContainerCreateResponse, error) + ContainerDiff(ctx context.Context, container string) ([]types.ContainerChange, error) + ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) + ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.ContainerExecCreateResponse, error) + ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) + ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error + ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error + ContainerExport(ctx context.Context, container string) (io.ReadCloser, error) + ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error) + ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error) + ContainerKill(ctx context.Context, container, signal string) error + ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) + ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) + ContainerPause(ctx context.Context, container string) error + ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error + ContainerRename(ctx context.Context, container, newContainerName string) error + ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error + ContainerRestart(ctx context.Context, container string, timeout *time.Duration) error + ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) + ContainerStats(ctx context.Context, container string, stream bool) (io.ReadCloser, error) + ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error + ContainerStop(ctx context.Context, container string, timeout *time.Duration) error + ContainerTop(ctx context.Context, container string, arguments []string) (types.ContainerProcessList, error) + ContainerUnpause(ctx context.Context, container string) error + ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) error + ContainerWait(ctx context.Context, container string) (int, error) + CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) + CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error +} + +// ImageAPIClient defines API client methods for the images +type ImageAPIClient interface { + ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) + ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) + ImageHistory(ctx context.Context, image string) ([]types.ImageHistory, error) + ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) + ImageInspectWithRaw(ctx context.Context, image string, getSize bool) (types.ImageInspect, []byte, error) + ImageList(ctx context.Context, options types.ImageListOptions) ([]types.Image, error) + ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) + ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) + ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) + ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDelete, error) + ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) + ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) + ImageTag(ctx context.Context, image, ref string) error +} + +// NetworkAPIClient defines API client methods for the networks +type NetworkAPIClient interface { + NetworkConnect(ctx context.Context, networkID, container string, config *network.EndpointSettings) error + NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) + NetworkDisconnect(ctx context.Context, networkID, container string, force bool) error + NetworkInspect(ctx context.Context, networkID string) (types.NetworkResource, error) + NetworkInspectWithRaw(ctx context.Context, networkID string) (types.NetworkResource, []byte, error) + NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) + NetworkRemove(ctx context.Context, networkID string) error +} + +// NodeAPIClient defines API client methods for the nodes +type NodeAPIClient interface { + NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) + NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) + NodeRemove(ctx context.Context, nodeID string) error + NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error +} + +// ServiceAPIClient defines API client methods for the services +type ServiceAPIClient interface { + ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) + ServiceInspectWithRaw(ctx context.Context, serviceID string) (swarm.Service, []byte, error) + ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) + ServiceRemove(ctx context.Context, serviceID string) error + ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) error + TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) + TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) +} + +// SwarmAPIClient defines API client methods for the swarm +type SwarmAPIClient interface { + SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) + SwarmJoin(ctx context.Context, req swarm.JoinRequest) error + SwarmLeave(ctx context.Context, force bool) error + SwarmInspect(ctx context.Context) (swarm.Swarm, error) + SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error +} + +// SystemAPIClient defines API client methods for the system +type SystemAPIClient interface { + Events(ctx context.Context, options types.EventsOptions) (io.ReadCloser, error) + Info(ctx context.Context) (types.Info, error) + RegistryLogin(ctx context.Context, auth types.AuthConfig) (types.AuthResponse, error) +} + +// VolumeAPIClient defines API client methods for the volumes +type VolumeAPIClient interface { + VolumeCreate(ctx context.Context, options types.VolumeCreateRequest) (types.Volume, error) + VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) + VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) + VolumeList(ctx context.Context, filter filters.Args) (types.VolumesListResponse, error) + VolumeRemove(ctx context.Context, volumeID string) error +} diff --git a/vendor/src/github.com/docker/engine-api/client/interface_experimental.go b/vendor/src/github.com/docker/engine-api/client/interface_experimental.go new file mode 100644 index 00000000..eb0cd7bf --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/interface_experimental.go @@ -0,0 +1,37 @@ +// +build experimental + +package client + +import ( + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// APIClient is an interface that clients that talk with a docker server must implement. +type APIClient interface { + CommonAPIClient + CheckpointAPIClient + PluginAPIClient +} + +// CheckpointAPIClient defines API client methods for the checkpoints +type CheckpointAPIClient interface { + CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error + CheckpointDelete(ctx context.Context, container string, checkpointID string) error + CheckpointList(ctx context.Context, container string) ([]types.Checkpoint, error) +} + +// PluginAPIClient defines API client methods for the plugins +type PluginAPIClient interface { + PluginList(ctx context.Context) (types.PluginsListResponse, error) + PluginRemove(ctx context.Context, name string) error + PluginEnable(ctx context.Context, name string) error + PluginDisable(ctx context.Context, name string) error + PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) error + PluginPush(ctx context.Context, name string, registryAuth string) error + PluginSet(ctx context.Context, name string, args []string) error + PluginInspect(ctx context.Context, name string) (*types.Plugin, error) +} + +// Ensure that Client always implements APIClient. +var _ APIClient = &Client{} diff --git a/vendor/src/github.com/docker/engine-api/client/interface_stable.go b/vendor/src/github.com/docker/engine-api/client/interface_stable.go new file mode 100644 index 00000000..496f522d --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/interface_stable.go @@ -0,0 +1,11 @@ +// +build !experimental + +package client + +// APIClient is an interface that clients that talk with a docker server must implement. +type APIClient interface { + CommonAPIClient +} + +// Ensure that Client always implements APIClient. +var _ APIClient = &Client{} diff --git a/vendor/src/github.com/docker/engine-api/client/login.go b/vendor/src/github.com/docker/engine-api/client/login.go new file mode 100644 index 00000000..482f9478 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/login.go @@ -0,0 +1,28 @@ +package client + +import ( + "encoding/json" + "net/http" + "net/url" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// RegistryLogin authenticates the docker server with a given docker registry. +// It returns UnauthorizerError when the authentication fails. +func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (types.AuthResponse, error) { + resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) + + if resp != nil && resp.statusCode == http.StatusUnauthorized { + return types.AuthResponse{}, unauthorizedError{err} + } + if err != nil { + return types.AuthResponse{}, err + } + + var response types.AuthResponse + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/src/github.com/docker/engine-api/client/network_connect.go b/vendor/src/github.com/docker/engine-api/client/network_connect.go new file mode 100644 index 00000000..9a402a3e --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/network_connect.go @@ -0,0 +1,18 @@ +package client + +import ( + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/network" + "golang.org/x/net/context" +) + +// NetworkConnect connects a container to an existent network in the docker host. +func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error { + nc := types.NetworkConnect{ + Container: containerID, + EndpointConfig: config, + } + resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/src/github.com/docker/engine-api/client/network_create.go b/vendor/src/github.com/docker/engine-api/client/network_create.go new file mode 100644 index 00000000..c9c0b9fd --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/network_create.go @@ -0,0 +1,25 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// NetworkCreate creates a new network in the docker host. +func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) { + networkCreateRequest := types.NetworkCreateRequest{ + NetworkCreate: options, + Name: name, + } + var response types.NetworkCreateResponse + serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) + if err != nil { + return response, err + } + + json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/src/github.com/docker/engine-api/client/network_disconnect.go b/vendor/src/github.com/docker/engine-api/client/network_disconnect.go new file mode 100644 index 00000000..a3e33672 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/network_disconnect.go @@ -0,0 +1,14 @@ +package client + +import ( + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// NetworkDisconnect disconnects a container from an existent network in the docker host. +func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error { + nd := types.NetworkDisconnect{Container: containerID, Force: force} + resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/src/github.com/docker/engine-api/client/network_inspect.go b/vendor/src/github.com/docker/engine-api/client/network_inspect.go new file mode 100644 index 00000000..e22fcd67 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/network_inspect.go @@ -0,0 +1,38 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// NetworkInspect returns the information for a specific network configured in the docker host. +func (cli *Client) NetworkInspect(ctx context.Context, networkID string) (types.NetworkResource, error) { + networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID) + return networkResource, err +} + +// NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation. +func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string) (types.NetworkResource, []byte, error) { + var networkResource types.NetworkResource + resp, err := cli.get(ctx, "/networks/"+networkID, nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return networkResource, nil, networkNotFoundError{networkID} + } + return networkResource, nil, err + } + defer ensureReaderClosed(resp) + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return networkResource, nil, err + } + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&networkResource) + return networkResource, body, err +} diff --git a/vendor/src/github.com/docker/engine-api/client/network_list.go b/vendor/src/github.com/docker/engine-api/client/network_list.go new file mode 100644 index 00000000..05695524 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/network_list.go @@ -0,0 +1,31 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" + "golang.org/x/net/context" +) + +// NetworkList returns the list of networks configured in the docker host. +func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { + query := url.Values{} + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + var networkResources []types.NetworkResource + resp, err := cli.get(ctx, "/networks", query, nil) + if err != nil { + return networkResources, err + } + err = json.NewDecoder(resp.body).Decode(&networkResources) + ensureReaderClosed(resp) + return networkResources, err +} diff --git a/vendor/src/github.com/docker/engine-api/client/network_remove.go b/vendor/src/github.com/docker/engine-api/client/network_remove.go new file mode 100644 index 00000000..6bd67489 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/network_remove.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// NetworkRemove removes an existent network from the docker host. +func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { + resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/src/github.com/docker/engine-api/client/node_inspect.go b/vendor/src/github.com/docker/engine-api/client/node_inspect.go new file mode 100644 index 00000000..5f555bb3 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/node_inspect.go @@ -0,0 +1,33 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/engine-api/types/swarm" + "golang.org/x/net/context" +) + +// NodeInspectWithRaw returns the node information. +func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) { + serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return swarm.Node{}, nil, nodeNotFoundError{nodeID} + } + return swarm.Node{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return swarm.Node{}, nil, err + } + + var response swarm.Node + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/src/github.com/docker/engine-api/client/node_list.go b/vendor/src/github.com/docker/engine-api/client/node_list.go new file mode 100644 index 00000000..57cf1482 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/node_list.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" + "github.com/docker/engine-api/types/swarm" + "golang.org/x/net/context" +) + +// NodeList returns the list of nodes. +func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { + query := url.Values{} + + if options.Filter.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filter) + + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/nodes", query, nil) + if err != nil { + return nil, err + } + + var nodes []swarm.Node + err = json.NewDecoder(resp.body).Decode(&nodes) + ensureReaderClosed(resp) + return nodes, err +} diff --git a/vendor/src/github.com/docker/engine-api/client/node_remove.go b/vendor/src/github.com/docker/engine-api/client/node_remove.go new file mode 100644 index 00000000..a22ee93f --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/node_remove.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// NodeRemove removes a Node. +func (cli *Client) NodeRemove(ctx context.Context, nodeID string) error { + resp, err := cli.delete(ctx, "/nodes/"+nodeID, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/src/github.com/docker/engine-api/client/node_update.go b/vendor/src/github.com/docker/engine-api/client/node_update.go new file mode 100644 index 00000000..47222115 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/node_update.go @@ -0,0 +1,18 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/engine-api/types/swarm" + "golang.org/x/net/context" +) + +// NodeUpdate updates a Node. +func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error { + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/src/github.com/docker/engine-api/client/plugin_disable.go b/vendor/src/github.com/docker/engine-api/client/plugin_disable.go new file mode 100644 index 00000000..893fc6e8 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/plugin_disable.go @@ -0,0 +1,14 @@ +// +build experimental + +package client + +import ( + "golang.org/x/net/context" +) + +// PluginDisable disables a plugin +func (cli *Client) PluginDisable(ctx context.Context, name string) error { + resp, err := cli.post(ctx, "/plugins/"+name+"/disable", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/src/github.com/docker/engine-api/client/plugin_enable.go b/vendor/src/github.com/docker/engine-api/client/plugin_enable.go new file mode 100644 index 00000000..84422abc --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/plugin_enable.go @@ -0,0 +1,14 @@ +// +build experimental + +package client + +import ( + "golang.org/x/net/context" +) + +// PluginEnable enables a plugin +func (cli *Client) PluginEnable(ctx context.Context, name string) error { + resp, err := cli.post(ctx, "/plugins/"+name+"/enable", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/src/github.com/docker/engine-api/client/plugin_inspect.go b/vendor/src/github.com/docker/engine-api/client/plugin_inspect.go new file mode 100644 index 00000000..b4bcc200 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/plugin_inspect.go @@ -0,0 +1,22 @@ +// +build experimental + +package client + +import ( + "encoding/json" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// PluginInspect inspects an existing plugin +func (cli *Client) PluginInspect(ctx context.Context, name string) (*types.Plugin, error) { + var p types.Plugin + resp, err := cli.get(ctx, "/plugins/"+name, nil, nil) + if err != nil { + return nil, err + } + err = json.NewDecoder(resp.body).Decode(&p) + ensureReaderClosed(resp) + return &p, err +} diff --git a/vendor/src/github.com/docker/engine-api/client/plugin_install.go b/vendor/src/github.com/docker/engine-api/client/plugin_install.go new file mode 100644 index 00000000..3f5e59ff --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/plugin_install.go @@ -0,0 +1,59 @@ +// +build experimental + +package client + +import ( + "encoding/json" + "net/http" + "net/url" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// PluginInstall installs a plugin +func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) error { + // FIXME(vdemeester) name is a ref, we might want to parse/validate it here. + query := url.Values{} + query.Set("name", name) + resp, err := cli.tryPluginPull(ctx, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + ensureReaderClosed(resp) + return privilegeErr + } + resp, err = cli.tryPluginPull(ctx, query, newAuthHeader) + } + if err != nil { + ensureReaderClosed(resp) + return err + } + var privileges types.PluginPrivileges + if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil { + ensureReaderClosed(resp) + return err + } + ensureReaderClosed(resp) + + if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 { + accept, err := options.AcceptPermissionsFunc(privileges) + if err != nil { + return err + } + if !accept { + resp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil) + ensureReaderClosed(resp) + return pluginPermissionDenied{name} + } + } + if options.Disabled { + return nil + } + return cli.PluginEnable(ctx, name) +} + +func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, registryAuth string) (*serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/plugins/pull", query, nil, headers) +} diff --git a/vendor/src/github.com/docker/engine-api/client/plugin_list.go b/vendor/src/github.com/docker/engine-api/client/plugin_list.go new file mode 100644 index 00000000..7f2e2f21 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/plugin_list.go @@ -0,0 +1,23 @@ +// +build experimental + +package client + +import ( + "encoding/json" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// PluginList returns the installed plugins +func (cli *Client) PluginList(ctx context.Context) (types.PluginsListResponse, error) { + var plugins types.PluginsListResponse + resp, err := cli.get(ctx, "/plugins", nil, nil) + if err != nil { + return plugins, err + } + + err = json.NewDecoder(resp.body).Decode(&plugins) + ensureReaderClosed(resp) + return plugins, err +} diff --git a/vendor/src/github.com/docker/engine-api/client/plugin_push.go b/vendor/src/github.com/docker/engine-api/client/plugin_push.go new file mode 100644 index 00000000..3afea5ed --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/plugin_push.go @@ -0,0 +1,15 @@ +// +build experimental + +package client + +import ( + "golang.org/x/net/context" +) + +// PluginPush pushes a plugin to a registry +func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) error { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, headers) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/src/github.com/docker/engine-api/client/plugin_remove.go b/vendor/src/github.com/docker/engine-api/client/plugin_remove.go new file mode 100644 index 00000000..baf66655 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/plugin_remove.go @@ -0,0 +1,14 @@ +// +build experimental + +package client + +import ( + "golang.org/x/net/context" +) + +// PluginRemove removes a plugin +func (cli *Client) PluginRemove(ctx context.Context, name string) error { + resp, err := cli.delete(ctx, "/plugins/"+name, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/src/github.com/docker/engine-api/client/plugin_set.go b/vendor/src/github.com/docker/engine-api/client/plugin_set.go new file mode 100644 index 00000000..fb40f38b --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/plugin_set.go @@ -0,0 +1,14 @@ +// +build experimental + +package client + +import ( + "golang.org/x/net/context" +) + +// PluginSet modifies settings for an existing plugin +func (cli *Client) PluginSet(ctx context.Context, name string, args []string) error { + resp, err := cli.post(ctx, "/plugins/"+name+"/set", nil, args, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/src/github.com/docker/engine-api/client/request.go b/vendor/src/github.com/docker/engine-api/client/request.go new file mode 100644 index 00000000..85490155 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/request.go @@ -0,0 +1,207 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/docker/engine-api/client/transport/cancellable" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/versions" + "golang.org/x/net/context" +) + +// serverResponse is a wrapper for http API responses. +type serverResponse struct { + body io.ReadCloser + header http.Header + statusCode int +} + +// head sends an http request to the docker API using the method HEAD. +func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (*serverResponse, error) { + return cli.sendRequest(ctx, "HEAD", path, query, nil, headers) +} + +// getWithContext sends an http request to the docker API using the method GET with a specific go context. +func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (*serverResponse, error) { + return cli.sendRequest(ctx, "GET", path, query, nil, headers) +} + +// postWithContext sends an http request to the docker API using the method POST with a specific go context. +func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (*serverResponse, error) { + return cli.sendRequest(ctx, "POST", path, query, obj, headers) +} + +func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (*serverResponse, error) { + return cli.sendClientRequest(ctx, "POST", path, query, body, headers) +} + +// put sends an http request to the docker API using the method PUT. +func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (*serverResponse, error) { + return cli.sendRequest(ctx, "PUT", path, query, obj, headers) +} + +// put sends an http request to the docker API using the method PUT. +func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (*serverResponse, error) { + return cli.sendClientRequest(ctx, "PUT", path, query, body, headers) +} + +// delete sends an http request to the docker API using the method DELETE. +func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (*serverResponse, error) { + return cli.sendRequest(ctx, "DELETE", path, query, nil, headers) +} + +func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, obj interface{}, headers map[string][]string) (*serverResponse, error) { + var body io.Reader + + if obj != nil { + var err error + body, err = encodeData(obj) + if err != nil { + return nil, err + } + if headers == nil { + headers = make(map[string][]string) + } + headers["Content-Type"] = []string{"application/json"} + } + + return cli.sendClientRequest(ctx, method, path, query, body, headers) +} + +func (cli *Client) sendClientRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers map[string][]string) (*serverResponse, error) { + serverResp := &serverResponse{ + body: nil, + statusCode: -1, + } + + expectedPayload := (method == "POST" || method == "PUT") + if expectedPayload && body == nil { + body = bytes.NewReader([]byte{}) + } + + req, err := cli.newRequest(method, path, query, body, headers) + if err != nil { + return serverResp, err + } + + if cli.proto == "unix" || cli.proto == "npipe" { + // For local communications, it doesn't matter what the host is. We just + // need a valid and meaningful host name. (See #189) + req.Host = "docker" + } + req.URL.Host = cli.addr + req.URL.Scheme = cli.transport.Scheme() + + if expectedPayload && req.Header.Get("Content-Type") == "" { + req.Header.Set("Content-Type", "text/plain") + } + + resp, err := cancellable.Do(ctx, cli.transport, req) + if err != nil { + if isTimeout(err) || strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") { + return serverResp, ErrConnectionFailed + } + + if !cli.transport.Secure() && strings.Contains(err.Error(), "malformed HTTP response") { + return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err) + } + + if cli.transport.Secure() && strings.Contains(err.Error(), "bad certificate") { + return serverResp, fmt.Errorf("The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings: %v", err) + } + + return serverResp, fmt.Errorf("An error occurred trying to connect: %v", err) + } + + if resp != nil { + serverResp.statusCode = resp.StatusCode + } + + if serverResp.statusCode < 200 || serverResp.statusCode >= 400 { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return serverResp, err + } + if len(body) == 0 { + return serverResp, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), req.URL) + } + + var errorMessage string + if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && + resp.Header.Get("Content-Type") == "application/json" { + var errorResponse types.ErrorResponse + if err := json.Unmarshal(body, &errorResponse); err != nil { + return serverResp, fmt.Errorf("Error reading JSON: %v", err) + } + errorMessage = errorResponse.Message + } else { + errorMessage = string(body) + } + + return serverResp, fmt.Errorf("Error response from daemon: %s", strings.TrimSpace(errorMessage)) + } + + serverResp.body = resp.Body + serverResp.header = resp.Header + return serverResp, nil +} + +func (cli *Client) newRequest(method, path string, query url.Values, body io.Reader, headers map[string][]string) (*http.Request, error) { + apiPath := cli.getAPIPath(path, query) + req, err := http.NewRequest(method, apiPath, body) + if err != nil { + return nil, err + } + + // Add CLI Config's HTTP Headers BEFORE we set the Docker headers + // then the user can't change OUR headers + for k, v := range cli.customHTTPHeaders { + req.Header.Set(k, v) + } + + if headers != nil { + for k, v := range headers { + req.Header[k] = v + } + } + + return req, nil +} + +func encodeData(data interface{}) (*bytes.Buffer, error) { + params := bytes.NewBuffer(nil) + if data != nil { + if err := json.NewEncoder(params).Encode(data); err != nil { + return nil, err + } + } + return params, nil +} + +func ensureReaderClosed(response *serverResponse) { + if response != nil && response.body != nil { + // Drain up to 512 bytes and close the body to let the Transport reuse the connection + io.CopyN(ioutil.Discard, response.body, 512) + response.body.Close() + } +} + +func isTimeout(err error) bool { + type timeout interface { + Timeout() bool + } + e := err + switch urlErr := err.(type) { + case *url.Error: + e = urlErr.Err + } + t, ok := e.(timeout) + return ok && t.Timeout() +} diff --git a/vendor/src/github.com/docker/engine-api/client/service_create.go b/vendor/src/github.com/docker/engine-api/client/service_create.go new file mode 100644 index 00000000..7349a984 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/service_create.go @@ -0,0 +1,30 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/swarm" + "golang.org/x/net/context" +) + +// ServiceCreate creates a new Service. +func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) { + var headers map[string][]string + + if options.EncodedRegistryAuth != "" { + headers = map[string][]string{ + "X-Registry-Auth": []string{options.EncodedRegistryAuth}, + } + } + + var response types.ServiceCreateResponse + resp, err := cli.post(ctx, "/services/create", nil, service, headers) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/src/github.com/docker/engine-api/client/service_inspect.go b/vendor/src/github.com/docker/engine-api/client/service_inspect.go new file mode 100644 index 00000000..958cd662 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/service_inspect.go @@ -0,0 +1,33 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/engine-api/types/swarm" + "golang.org/x/net/context" +) + +// ServiceInspectWithRaw returns the service information and the raw data. +func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string) (swarm.Service, []byte, error) { + serverResp, err := cli.get(ctx, "/services/"+serviceID, nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return swarm.Service{}, nil, serviceNotFoundError{serviceID} + } + return swarm.Service{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return swarm.Service{}, nil, err + } + + var response swarm.Service + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/src/github.com/docker/engine-api/client/service_list.go b/vendor/src/github.com/docker/engine-api/client/service_list.go new file mode 100644 index 00000000..b48964aa --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/service_list.go @@ -0,0 +1,35 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" + "github.com/docker/engine-api/types/swarm" + "golang.org/x/net/context" +) + +// ServiceList returns the list of services. +func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { + query := url.Values{} + + if options.Filter.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filter) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/services", query, nil) + if err != nil { + return nil, err + } + + var services []swarm.Service + err = json.NewDecoder(resp.body).Decode(&services) + ensureReaderClosed(resp) + return services, err +} diff --git a/vendor/src/github.com/docker/engine-api/client/service_remove.go b/vendor/src/github.com/docker/engine-api/client/service_remove.go new file mode 100644 index 00000000..a9331f92 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/service_remove.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// ServiceRemove kills and removes a service. +func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error { + resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/src/github.com/docker/engine-api/client/service_update.go b/vendor/src/github.com/docker/engine-api/client/service_update.go new file mode 100644 index 00000000..ee8b4612 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/service_update.go @@ -0,0 +1,30 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/swarm" + "golang.org/x/net/context" +) + +// ServiceUpdate updates a Service. +func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) error { + var ( + headers map[string][]string + query = url.Values{} + ) + + if options.EncodedRegistryAuth != "" { + headers = map[string][]string{ + "X-Registry-Auth": []string{options.EncodedRegistryAuth}, + } + } + + query.Set("version", strconv.FormatUint(version.Index, 10)) + + resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/src/github.com/docker/engine-api/client/swarm_init.go b/vendor/src/github.com/docker/engine-api/client/swarm_init.go new file mode 100644 index 00000000..68f0a744 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/swarm_init.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/engine-api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmInit initializes the Swarm. +func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) { + serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil) + if err != nil { + return "", err + } + + var response string + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/src/github.com/docker/engine-api/client/swarm_inspect.go b/vendor/src/github.com/docker/engine-api/client/swarm_inspect.go new file mode 100644 index 00000000..d67c7c01 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/swarm_inspect.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/engine-api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmInspect inspects the Swarm. +func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) { + serverResp, err := cli.get(ctx, "/swarm", nil, nil) + if err != nil { + return swarm.Swarm{}, err + } + + var response swarm.Swarm + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/src/github.com/docker/engine-api/client/swarm_join.go b/vendor/src/github.com/docker/engine-api/client/swarm_join.go new file mode 100644 index 00000000..a9b14e0c --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/swarm_join.go @@ -0,0 +1,13 @@ +package client + +import ( + "github.com/docker/engine-api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmJoin joins the Swarm. +func (cli *Client) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error { + resp, err := cli.post(ctx, "/swarm/join", nil, req, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/src/github.com/docker/engine-api/client/swarm_leave.go b/vendor/src/github.com/docker/engine-api/client/swarm_leave.go new file mode 100644 index 00000000..a4df7321 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/swarm_leave.go @@ -0,0 +1,18 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" +) + +// SwarmLeave leaves the Swarm. +func (cli *Client) SwarmLeave(ctx context.Context, force bool) error { + query := url.Values{} + if force { + query.Set("force", "1") + } + resp, err := cli.post(ctx, "/swarm/leave", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/src/github.com/docker/engine-api/client/swarm_update.go b/vendor/src/github.com/docker/engine-api/client/swarm_update.go new file mode 100644 index 00000000..5adec81c --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/swarm_update.go @@ -0,0 +1,21 @@ +package client + +import ( + "fmt" + "net/url" + "strconv" + + "github.com/docker/engine-api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmUpdate updates the Swarm. +func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error { + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + query.Set("rotateWorkerToken", fmt.Sprintf("%v", flags.RotateWorkerToken)) + query.Set("rotateManagerToken", fmt.Sprintf("%v", flags.RotateManagerToken)) + resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/src/github.com/docker/engine-api/client/task_inspect.go b/vendor/src/github.com/docker/engine-api/client/task_inspect.go new file mode 100644 index 00000000..3cac8882 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/task_inspect.go @@ -0,0 +1,34 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/engine-api/types/swarm" + + "golang.org/x/net/context" +) + +// TaskInspectWithRaw returns the task information and its raw representation.. +func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) { + serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return swarm.Task{}, nil, taskNotFoundError{taskID} + } + return swarm.Task{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return swarm.Task{}, nil, err + } + + var response swarm.Task + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/src/github.com/docker/engine-api/client/task_list.go b/vendor/src/github.com/docker/engine-api/client/task_list.go new file mode 100644 index 00000000..4604513c --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/task_list.go @@ -0,0 +1,35 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" + "github.com/docker/engine-api/types/swarm" + "golang.org/x/net/context" +) + +// TaskList returns the list of tasks. +func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { + query := url.Values{} + + if options.Filter.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filter) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/tasks", query, nil) + if err != nil { + return nil, err + } + + var tasks []swarm.Task + err = json.NewDecoder(resp.body).Decode(&tasks) + ensureReaderClosed(resp) + return tasks, err +} diff --git a/vendor/src/github.com/docker/engine-api/client/transport/cancellable/LICENSE b/vendor/src/github.com/docker/engine-api/client/transport/cancellable/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/transport/cancellable/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/src/github.com/docker/engine-api/client/transport/cancellable/canceler.go b/vendor/src/github.com/docker/engine-api/client/transport/cancellable/canceler.go new file mode 100644 index 00000000..11dff600 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/transport/cancellable/canceler.go @@ -0,0 +1,23 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.5 + +package cancellable + +import ( + "net/http" + + "github.com/docker/engine-api/client/transport" +) + +func canceler(client transport.Sender, req *http.Request) func() { + // TODO(djd): Respect any existing value of req.Cancel. + ch := make(chan struct{}) + req.Cancel = ch + + return func() { + close(ch) + } +} diff --git a/vendor/src/github.com/docker/engine-api/client/transport/cancellable/canceler_go14.go b/vendor/src/github.com/docker/engine-api/client/transport/cancellable/canceler_go14.go new file mode 100644 index 00000000..8ff2845c --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/transport/cancellable/canceler_go14.go @@ -0,0 +1,27 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.5 + +package cancellable + +import ( + "net/http" + + "github.com/docker/engine-api/client/transport" +) + +type requestCanceler interface { + CancelRequest(*http.Request) +} + +func canceler(client transport.Sender, req *http.Request) func() { + rc, ok := client.(requestCanceler) + if !ok { + return func() {} + } + return func() { + rc.CancelRequest(req) + } +} diff --git a/vendor/src/github.com/docker/engine-api/client/transport/cancellable/cancellable.go b/vendor/src/github.com/docker/engine-api/client/transport/cancellable/cancellable.go new file mode 100644 index 00000000..13941495 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/transport/cancellable/cancellable.go @@ -0,0 +1,113 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cancellable provides helper function to cancel http requests. +package cancellable + +import ( + "io" + "net/http" + + "github.com/docker/engine-api/client/transport" + + "golang.org/x/net/context" +) + +func nop() {} + +var ( + testHookContextDoneBeforeHeaders = nop + testHookDoReturned = nop + testHookDidBodyClose = nop +) + +// Do sends an HTTP request with the provided transport.Sender and returns an HTTP response. +// If the client is nil, http.DefaultClient is used. +// If the context is canceled or times out, ctx.Err() will be returned. +// +// FORK INFORMATION: +// +// This function deviates from the upstream version in golang.org/x/net/context/ctxhttp by +// taking a Sender interface rather than a *http.Client directly. That allow us to use +// this function with mocked clients and hijacked connections. +func Do(ctx context.Context, client transport.Sender, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + + // Request cancelation changed in Go 1.5, see canceler.go and canceler_go14.go. + cancel := canceler(client, req) + + type responseAndError struct { + resp *http.Response + err error + } + result := make(chan responseAndError, 1) + + go func() { + resp, err := client.Do(req) + testHookDoReturned() + result <- responseAndError{resp, err} + }() + + var resp *http.Response + + select { + case <-ctx.Done(): + testHookContextDoneBeforeHeaders() + cancel() + // Clean up after the goroutine calling client.Do: + go func() { + if r := <-result; r.resp != nil && r.resp.Body != nil { + testHookDidBodyClose() + r.resp.Body.Close() + } + }() + return nil, ctx.Err() + case r := <-result: + var err error + resp, err = r.resp, r.err + if err != nil { + return resp, err + } + } + + c := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + cancel() + case <-c: + // The response's Body is closed. + } + }() + resp.Body = ¬ifyingReader{resp.Body, c} + + return resp, nil +} + +// notifyingReader is an io.ReadCloser that closes the notify channel after +// Close is called or a Read fails on the underlying ReadCloser. +type notifyingReader struct { + io.ReadCloser + notify chan<- struct{} +} + +func (r *notifyingReader) Read(p []byte) (int, error) { + n, err := r.ReadCloser.Read(p) + if err != nil && r.notify != nil { + close(r.notify) + r.notify = nil + } + return n, err +} + +func (r *notifyingReader) Close() error { + err := r.ReadCloser.Close() + if r.notify != nil { + close(r.notify) + r.notify = nil + } + return err +} diff --git a/vendor/src/github.com/docker/engine-api/client/transport/client.go b/vendor/src/github.com/docker/engine-api/client/transport/client.go new file mode 100644 index 00000000..13d4b3ab --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/transport/client.go @@ -0,0 +1,47 @@ +package transport + +import ( + "crypto/tls" + "net/http" +) + +// Sender is an interface that clients must implement +// to be able to send requests to a remote connection. +type Sender interface { + // Do sends request to a remote endpoint. + Do(*http.Request) (*http.Response, error) +} + +// Client is an interface that abstracts all remote connections. +type Client interface { + Sender + // Secure tells whether the connection is secure or not. + Secure() bool + // Scheme returns the connection protocol the client uses. + Scheme() string + // TLSConfig returns any TLS configuration the client uses. + TLSConfig() *tls.Config +} + +// tlsInfo returns information about the TLS configuration. +type tlsInfo struct { + tlsConfig *tls.Config +} + +// TLSConfig returns the TLS configuration. +func (t *tlsInfo) TLSConfig() *tls.Config { + return t.tlsConfig +} + +// Scheme returns protocol scheme to use. +func (t *tlsInfo) Scheme() string { + if t.tlsConfig != nil { + return "https" + } + return "http" +} + +// Secure returns true if there is a TLS configuration. +func (t *tlsInfo) Secure() bool { + return t.tlsConfig != nil +} diff --git a/vendor/src/github.com/docker/engine-api/client/transport/transport.go b/vendor/src/github.com/docker/engine-api/client/transport/transport.go new file mode 100644 index 00000000..ff28af18 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/transport/transport.go @@ -0,0 +1,57 @@ +// Package transport provides function to send request to remote endpoints. +package transport + +import ( + "fmt" + "net/http" + + "github.com/docker/go-connections/sockets" +) + +// apiTransport holds information about the http transport to connect with the API. +type apiTransport struct { + *http.Client + *tlsInfo + transport *http.Transport +} + +// NewTransportWithHTTP creates a new transport based on the provided proto, address and http client. +// It uses Docker's default http transport configuration if the client is nil. +// It does not modify the client's transport if it's not nil. +func NewTransportWithHTTP(proto, addr string, client *http.Client) (Client, error) { + var transport *http.Transport + + if client != nil { + tr, ok := client.Transport.(*http.Transport) + if !ok { + return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", client.Transport) + } + transport = tr + } else { + transport = defaultTransport(proto, addr) + client = &http.Client{ + Transport: transport, + } + } + + return &apiTransport{ + Client: client, + tlsInfo: &tlsInfo{transport.TLSClientConfig}, + transport: transport, + }, nil +} + +// CancelRequest stops a request execution. +func (a *apiTransport) CancelRequest(req *http.Request) { + a.transport.CancelRequest(req) +} + +// defaultTransport creates a new http.Transport with Docker's +// default transport configuration. +func defaultTransport(proto, addr string) *http.Transport { + tr := new(http.Transport) + sockets.ConfigureTransport(tr, proto, addr) + return tr +} + +var _ Client = &apiTransport{} diff --git a/vendor/src/github.com/docker/engine-api/client/version.go b/vendor/src/github.com/docker/engine-api/client/version.go new file mode 100644 index 00000000..e037551a --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/version.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ServerVersion returns information of the docker client and server host. +func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) { + resp, err := cli.get(ctx, "/version", nil, nil) + if err != nil { + return types.Version{}, err + } + + var server types.Version + err = json.NewDecoder(resp.body).Decode(&server) + ensureReaderClosed(resp) + return server, err +} diff --git a/vendor/src/github.com/docker/engine-api/client/volume_create.go b/vendor/src/github.com/docker/engine-api/client/volume_create.go new file mode 100644 index 00000000..cc1e1c17 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/volume_create.go @@ -0,0 +1,20 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// VolumeCreate creates a volume in the docker host. +func (cli *Client) VolumeCreate(ctx context.Context, options types.VolumeCreateRequest) (types.Volume, error) { + var volume types.Volume + resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) + if err != nil { + return volume, err + } + err = json.NewDecoder(resp.body).Decode(&volume) + ensureReaderClosed(resp) + return volume, err +} diff --git a/vendor/src/github.com/docker/engine-api/client/volume_inspect.go b/vendor/src/github.com/docker/engine-api/client/volume_inspect.go new file mode 100644 index 00000000..2eaebfaf --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/volume_inspect.go @@ -0,0 +1,38 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// VolumeInspect returns the information about a specific volume in the docker host. +func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) { + volume, _, err := cli.VolumeInspectWithRaw(ctx, volumeID) + return volume, err +} + +// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation +func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) { + var volume types.Volume + resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return volume, nil, volumeNotFoundError{volumeID} + } + return volume, nil, err + } + defer ensureReaderClosed(resp) + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return volume, nil, err + } + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&volume) + return volume, body, err +} diff --git a/vendor/src/github.com/docker/engine-api/client/volume_list.go b/vendor/src/github.com/docker/engine-api/client/volume_list.go new file mode 100644 index 00000000..7c6ccf83 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/volume_list.go @@ -0,0 +1,32 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" + "golang.org/x/net/context" +) + +// VolumeList returns the volumes configured in the docker host. +func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (types.VolumesListResponse, error) { + var volumes types.VolumesListResponse + query := url.Values{} + + if filter.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, filter) + if err != nil { + return volumes, err + } + query.Set("filters", filterJSON) + } + resp, err := cli.get(ctx, "/volumes", query, nil) + if err != nil { + return volumes, err + } + + err = json.NewDecoder(resp.body).Decode(&volumes) + ensureReaderClosed(resp) + return volumes, err +} diff --git a/vendor/src/github.com/docker/engine-api/client/volume_remove.go b/vendor/src/github.com/docker/engine-api/client/volume_remove.go new file mode 100644 index 00000000..0dce24c7 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/client/volume_remove.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// VolumeRemove removes a volume from the docker host. +func (cli *Client) VolumeRemove(ctx context.Context, volumeID string) error { + resp, err := cli.delete(ctx, "/volumes/"+volumeID, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/src/github.com/docker/engine-api/types/auth.go b/vendor/src/github.com/docker/engine-api/types/auth.go new file mode 100644 index 00000000..056af6b8 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/types/auth.go @@ -0,0 +1,22 @@ +package types + +// AuthConfig contains authorization information for connecting to a Registry +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + // Email is an optional value associated with the username. + // This field is deprecated and will be removed in a later + // version of docker. + Email string `json:"email,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} diff --git a/vendor/src/github.com/docker/engine-api/types/blkiodev/blkio.go b/vendor/src/github.com/docker/engine-api/types/blkiodev/blkio.go new file mode 100644 index 00000000..931ae10a --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/types/blkiodev/blkio.go @@ -0,0 +1,23 @@ +package blkiodev + +import "fmt" + +// WeightDevice is a structure that holds device:weight pair +type WeightDevice struct { + Path string + Weight uint16 +} + +func (w *WeightDevice) String() string { + return fmt.Sprintf("%s:%d", w.Path, w.Weight) +} + +// ThrottleDevice is a structure that holds device:rate_per_second pair +type ThrottleDevice struct { + Path string + Rate uint64 +} + +func (t *ThrottleDevice) String() string { + return fmt.Sprintf("%s:%d", t.Path, t.Rate) +} diff --git a/vendor/src/github.com/docker/engine-api/types/client.go b/vendor/src/github.com/docker/engine-api/types/client.go new file mode 100644 index 00000000..def3f061 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/types/client.go @@ -0,0 +1,286 @@ +package types + +import ( + "bufio" + "io" + "net" + + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/filters" + "github.com/docker/go-units" +) + +// CheckpointCreateOptions holds parameters to create a checkpoint from a container +type CheckpointCreateOptions struct { + CheckpointID string + Exit bool +} + +// ContainerAttachOptions holds parameters to attach to a container. +type ContainerAttachOptions struct { + Stream bool + Stdin bool + Stdout bool + Stderr bool + DetachKeys string +} + +// ContainerCommitOptions holds parameters to commit changes into a container. +type ContainerCommitOptions struct { + Reference string + Comment string + Author string + Changes []string + Pause bool + Config *container.Config +} + +// ContainerExecInspect holds information returned by exec inspect. +type ContainerExecInspect struct { + ExecID string + ContainerID string + Running bool + ExitCode int +} + +// ContainerListOptions holds parameters to list containers with. +type ContainerListOptions struct { + Quiet bool + Size bool + All bool + Latest bool + Since string + Before string + Limit int + Filter filters.Args +} + +// ContainerLogsOptions holds parameters to filter logs with. +type ContainerLogsOptions struct { + ShowStdout bool + ShowStderr bool + Since string + Timestamps bool + Follow bool + Tail string + Details bool +} + +// ContainerRemoveOptions holds parameters to remove containers. +type ContainerRemoveOptions struct { + RemoveVolumes bool + RemoveLinks bool + Force bool +} + +// ContainerStartOptions holds parameters to start containers. +type ContainerStartOptions struct { + CheckpointID string +} + +// CopyToContainerOptions holds information +// about files to copy into a container +type CopyToContainerOptions struct { + AllowOverwriteDirWithFile bool +} + +// EventsOptions hold parameters to filter events with. +type EventsOptions struct { + Since string + Until string + Filters filters.Args +} + +// NetworkListOptions holds parameters to filter the list of networks with. +type NetworkListOptions struct { + Filters filters.Args +} + +// HijackedResponse holds connection information for a hijacked request. +type HijackedResponse struct { + Conn net.Conn + Reader *bufio.Reader +} + +// Close closes the hijacked connection and reader. +func (h *HijackedResponse) Close() { + h.Conn.Close() +} + +// CloseWriter is an interface that implements structs +// that close input streams to prevent from writing. +type CloseWriter interface { + CloseWrite() error +} + +// CloseWrite closes a readWriter for writing. +func (h *HijackedResponse) CloseWrite() error { + if conn, ok := h.Conn.(CloseWriter); ok { + return conn.CloseWrite() + } + return nil +} + +// ImageBuildOptions holds the information +// necessary to build images. +type ImageBuildOptions struct { + Tags []string + SuppressOutput bool + RemoteContext string + NoCache bool + Remove bool + ForceRemove bool + PullParent bool + Isolation container.Isolation + CPUSetCPUs string + CPUSetMems string + CPUShares int64 + CPUQuota int64 + CPUPeriod int64 + Memory int64 + MemorySwap int64 + CgroupParent string + ShmSize int64 + Dockerfile string + Ulimits []*units.Ulimit + BuildArgs map[string]string + AuthConfigs map[string]AuthConfig + Context io.Reader + Labels map[string]string +} + +// ImageBuildResponse holds information +// returned by a server after building +// an image. +type ImageBuildResponse struct { + Body io.ReadCloser + OSType string +} + +// ImageCreateOptions holds information to create images. +type ImageCreateOptions struct { + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry +} + +// ImageImportSource holds source information for ImageImport +type ImageImportSource struct { + Source io.Reader // Source is the data to send to the server to create this image from (mutually exclusive with SourceName) + SourceName string // SourceName is the name of the image to pull (mutually exclusive with Source) +} + +// ImageImportOptions holds information to import images from the client host. +type ImageImportOptions struct { + Tag string // Tag is the name to tag this image with. This attribute is deprecated. + Message string // Message is the message to tag the image with + Changes []string // Changes are the raw changes to apply to this image +} + +// ImageListOptions holds parameters to filter the list of images with. +type ImageListOptions struct { + MatchName string + All bool + Filters filters.Args +} + +// ImageLoadResponse returns information to the client about a load process. +type ImageLoadResponse struct { + // Body must be closed to avoid a resource leak + Body io.ReadCloser + JSON bool +} + +// ImagePullOptions holds information to pull images. +type ImagePullOptions struct { + All bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + PrivilegeFunc RequestPrivilegeFunc +} + +// RequestPrivilegeFunc is a function interface that +// clients can supply to retry operations after +// getting an authorization error. +// This function returns the registry authentication +// header value in base 64 format, or an error +// if the privilege request fails. +type RequestPrivilegeFunc func() (string, error) + +//ImagePushOptions holds information to push images. +type ImagePushOptions ImagePullOptions + +// ImageRemoveOptions holds parameters to remove images. +type ImageRemoveOptions struct { + Force bool + PruneChildren bool +} + +// ImageSearchOptions holds parameters to search images with. +type ImageSearchOptions struct { + RegistryAuth string + PrivilegeFunc RequestPrivilegeFunc + Filters filters.Args + Limit int +} + +// ResizeOptions holds parameters to resize a tty. +// It can be used to resize container ttys and +// exec process ttys too. +type ResizeOptions struct { + Height int + Width int +} + +// VersionResponse holds version information for the client and the server +type VersionResponse struct { + Client *Version + Server *Version +} + +// ServerOK returns true when the client could connect to the docker server +// and parse the information received. It returns false otherwise. +func (v VersionResponse) ServerOK() bool { + return v.Server != nil +} + +// NodeListOptions holds parameters to list nodes with. +type NodeListOptions struct { + Filter filters.Args +} + +// ServiceCreateOptions contains the options to use when creating a service. +type ServiceCreateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string +} + +// ServiceCreateResponse contains the information returned to a client +// on the creation of a new service. +type ServiceCreateResponse struct { + // ID is the ID of the created service. + ID string +} + +// ServiceUpdateOptions contains the options to be used for updating services. +type ServiceUpdateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate + // into this field. While it does open API users up to racy writes, most + // users may not need that level of consistency in practice. +} + +// ServiceListOptions holds parameters to list services with. +type ServiceListOptions struct { + Filter filters.Args +} + +// TaskListOptions holds parameters to list tasks with. +type TaskListOptions struct { + Filter filters.Args +} diff --git a/vendor/src/github.com/docker/engine-api/types/configs.go b/vendor/src/github.com/docker/engine-api/types/configs.go new file mode 100644 index 00000000..93384b9f --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/types/configs.go @@ -0,0 +1,53 @@ +package types + +import ( + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/network" +) + +// configs holds structs used for internal communication between the +// frontend (such as an http server) and the backend (such as the +// docker daemon). + +// ContainerCreateConfig is the parameter set to ContainerCreate() +type ContainerCreateConfig struct { + Name string + Config *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig + AdjustCPUShares bool +} + +// ContainerRmConfig holds arguments for the container remove +// operation. This struct is used to tell the backend what operations +// to perform. +type ContainerRmConfig struct { + ForceRemove, RemoveVolume, RemoveLink bool +} + +// ContainerCommitConfig contains build configs for commit operation, +// and is used when making a commit with the current state of the container. +type ContainerCommitConfig struct { + Pause bool + Repo string + Tag string + Author string + Comment string + // merge container config into commit config before commit + MergeConfigs bool + Config *container.Config +} + +// ExecConfig is a small subset of the Config struct that holds the configuration +// for the exec feature of docker. +type ExecConfig struct { + User string // User that will run the command + Privileged bool // Is the container in privileged mode + Tty bool // Attach standard streams to a tty. + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStderr bool // Attach the standard error + AttachStdout bool // Attach the standard output + Detach bool // Execute in detach mode + DetachKeys string // Escape keys for detach + Cmd []string // Execution commands and args +} diff --git a/vendor/src/github.com/docker/engine-api/types/container/config.go b/vendor/src/github.com/docker/engine-api/types/container/config.go new file mode 100644 index 00000000..707fc8c1 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/types/container/config.go @@ -0,0 +1,62 @@ +package container + +import ( + "time" + + "github.com/docker/engine-api/types/strslice" + "github.com/docker/go-connections/nat" +) + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// Config contains the configuration data about a container. +// It should hold only portable information about the container. +// Here, "portable" means "independent from the host we are running on". +// Non-portable information *should* appear in HostConfig. +// All fields added to this struct must be marked `omitempty` to keep getting +// predictable hashes from the old `v1Compatibility` configuration. +type Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts map[nat.Port]struct{} `json:",omitempty"` // List of exposed ports + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd strslice.StrSlice // Command to run when starting the container + Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) + Image string // Name of the image as it was passed by the operator (eg. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint strslice.StrSlice // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} diff --git a/vendor/src/github.com/docker/engine-api/types/container/host_config.go b/vendor/src/github.com/docker/engine-api/types/container/host_config.go new file mode 100644 index 00000000..a9ff755b --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/types/container/host_config.go @@ -0,0 +1,320 @@ +package container + +import ( + "strings" + + "github.com/docker/engine-api/types/blkiodev" + "github.com/docker/engine-api/types/strslice" + "github.com/docker/go-connections/nat" + "github.com/docker/go-units" +) + +// NetworkMode represents the container network stack. +type NetworkMode string + +// Isolation represents the isolation technology of a container. The supported +// values are platform specific +type Isolation string + +// IsDefault indicates the default isolation technology of a container. On Linux this +// is the native driver. On Windows, this is a Windows Server Container. +func (i Isolation) IsDefault() bool { + return strings.ToLower(string(i)) == "default" || string(i) == "" +} + +// IpcMode represents the container ipc stack. +type IpcMode string + +// IsPrivate indicates whether the container uses its private ipc stack. +func (n IpcMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsHost indicates whether the container uses the host's ipc stack. +func (n IpcMode) IsHost() bool { + return n == "host" +} + +// IsContainer indicates whether the container uses a container's ipc stack. +func (n IpcMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the ipc stack is valid. +func (n IpcMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + case "container": + if len(parts) != 2 || parts[1] == "" { + return false + } + default: + return false + } + return true +} + +// Container returns the name of the container ipc stack is going to be used. +func (n IpcMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// UsernsMode represents userns mode in the container. +type UsernsMode string + +// IsHost indicates whether the container uses the host's userns. +func (n UsernsMode) IsHost() bool { + return n == "host" +} + +// IsPrivate indicates whether the container uses the a private userns. +func (n UsernsMode) IsPrivate() bool { + return !(n.IsHost()) +} + +// Valid indicates whether the userns is valid. +func (n UsernsMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +// CgroupSpec represents the cgroup to use for the container. +type CgroupSpec string + +// IsContainer indicates whether the container is using another container cgroup +func (c CgroupSpec) IsContainer() bool { + parts := strings.SplitN(string(c), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the cgroup spec is valid. +func (c CgroupSpec) Valid() bool { + return c.IsContainer() || c == "" +} + +// Container returns the name of the container whose cgroup will be used. +func (c CgroupSpec) Container() string { + parts := strings.SplitN(string(c), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// UTSMode represents the UTS namespace of the container. +type UTSMode string + +// IsPrivate indicates whether the container uses its private UTS namespace. +func (n UTSMode) IsPrivate() bool { + return !(n.IsHost()) +} + +// IsHost indicates whether the container uses the host's UTS namespace. +func (n UTSMode) IsHost() bool { + return n == "host" +} + +// Valid indicates whether the UTS namespace is valid. +func (n UTSMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +// PidMode represents the pid namespace of the container. +type PidMode string + +// IsPrivate indicates whether the container uses its own new pid namespace. +func (n PidMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsHost indicates whether the container uses the host's pid namespace. +func (n PidMode) IsHost() bool { + return n == "host" +} + +// IsContainer indicates whether the container uses a container's pid namespace. +func (n PidMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the pid namespace is valid. +func (n PidMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + case "container": + if len(parts) != 2 || parts[1] == "" { + return false + } + default: + return false + } + return true +} + +// Container returns the name of the container whose pid namespace is going to be used. +func (n PidMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// DeviceMapping represents the device mapping between the host and the container. +type DeviceMapping struct { + PathOnHost string + PathInContainer string + CgroupPermissions string +} + +// RestartPolicy represents the restart policies of the container. +type RestartPolicy struct { + Name string + MaximumRetryCount int +} + +// IsNone indicates whether the container has the "no" restart policy. +// This means the container will not automatically restart when exiting. +func (rp *RestartPolicy) IsNone() bool { + return rp.Name == "no" || rp.Name == "" +} + +// IsAlways indicates whether the container has the "always" restart policy. +// This means the container will automatically restart regardless of the exit status. +func (rp *RestartPolicy) IsAlways() bool { + return rp.Name == "always" +} + +// IsOnFailure indicates whether the container has the "on-failure" restart policy. +// This means the container will automatically restart of exiting with a non-zero exit status. +func (rp *RestartPolicy) IsOnFailure() bool { + return rp.Name == "on-failure" +} + +// IsUnlessStopped indicates whether the container has the +// "unless-stopped" restart policy. This means the container will +// automatically restart unless user has put it to stopped state. +func (rp *RestartPolicy) IsUnlessStopped() bool { + return rp.Name == "unless-stopped" +} + +// IsSame compares two RestartPolicy to see if they are the same +func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { + return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount +} + +// LogConfig represents the logging configuration of the container. +type LogConfig struct { + Type string + Config map[string]string +} + +// Resources contains container's resources (cgroups config, ulimits...) +type Resources struct { + // Applicable to all platforms + CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) + Memory int64 // Memory limit (in bytes) + + // Applicable to UNIX platforms + CgroupParent string // Parent cgroup. + BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) + BlkioWeightDevice []*blkiodev.WeightDevice + BlkioDeviceReadBps []*blkiodev.ThrottleDevice + BlkioDeviceWriteBps []*blkiodev.ThrottleDevice + BlkioDeviceReadIOps []*blkiodev.ThrottleDevice + BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice + CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period + CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota + CpusetCpus string // CpusetCpus 0-2, 0,1 + CpusetMems string // CpusetMems 0-2, 0,1 + Devices []DeviceMapping // List of devices to map inside the container + DiskQuota int64 // Disk limit (in bytes) + KernelMemory int64 // Kernel memory limit (in bytes) + MemoryReservation int64 // Memory soft limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap + MemorySwappiness *int64 // Tuning container memory swappiness behaviour + OomKillDisable *bool // Whether to disable OOM Killer or not + PidsLimit int64 // Setting pids limit for a container + Ulimits []*units.Ulimit // List of ulimits to be set in the container + + // Applicable to Windows + CPUCount int64 `json:"CpuCount"` // CPU count + CPUPercent int64 `json:"CpuPercent"` // CPU percent + IOMaximumIOps uint64 // Maximum IOps for the container system drive + IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive +} + +// UpdateConfig holds the mutable attributes of a Container. +// Those attributes can be updated at runtime. +type UpdateConfig struct { + // Contains container's resources (cgroups, ulimits) + Resources + RestartPolicy RestartPolicy +} + +// HostConfig the non-portable Config structure of a container. +// Here, "non-portable" means "dependent of the host we are running on". +// Portable information *should* appear in Config. +type HostConfig struct { + // Applicable to all platforms + Binds []string // List of volume bindings for this container + ContainerIDFile string // File (path) where the containerId is written + LogConfig LogConfig // Configuration of the logs for this container + NetworkMode NetworkMode // Network mode to use for the container + PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host + RestartPolicy RestartPolicy // Restart policy to be used for the container + AutoRemove bool // Automatically remove container when it exits + VolumeDriver string // Name of the volume driver used to mount volumes + VolumesFrom []string // List of volumes to take from other container + + // Applicable to UNIX platforms + CapAdd strslice.StrSlice // List of kernel capabilities to add to the container + CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container + DNS []string `json:"Dns"` // List of DNS server to lookup + DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for + DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for + ExtraHosts []string // List of extra hosts + GroupAdd []string // List of additional groups that the container process will run as + IpcMode IpcMode // IPC namespace to use for the container + Cgroup CgroupSpec // Cgroup to use for the container + Links []string // List of links (in the name:alias form) + OomScoreAdj int // Container preference for OOM-killing + PidMode PidMode // PID namespace to use for the container + Privileged bool // Is the container in privileged mode + PublishAllPorts bool // Should docker publish all exposed port for the container + ReadonlyRootfs bool // Is the container root filesystem in read-only + SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. + StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container. + Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container + UTSMode UTSMode // UTS namespace to use for the container + UsernsMode UsernsMode // The user namespace to use for the container + ShmSize int64 // Total shm memory usage + Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container + Runtime string `json:",omitempty"` // Runtime to use with this container + + // Applicable to Windows + ConsoleSize [2]int // Initial console size + Isolation Isolation // Isolation technology of the container (eg default, hyperv) + + // Contains container's resources (cgroups, ulimits) + Resources +} diff --git a/vendor/src/github.com/docker/engine-api/types/container/hostconfig_unix.go b/vendor/src/github.com/docker/engine-api/types/container/hostconfig_unix.go new file mode 100644 index 00000000..4171059a --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/types/container/hostconfig_unix.go @@ -0,0 +1,81 @@ +// +build !windows + +package container + +import "strings" + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() +} + +// IsPrivate indicates whether container uses it's private network stack. +func (n NetworkMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsDefault indicates whether container uses the default network stack. +func (n NetworkMode) IsDefault() bool { + return n == "default" +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsBridge() { + return "bridge" + } else if n.IsHost() { + return "host" + } else if n.IsContainer() { + return "container" + } else if n.IsNone() { + return "none" + } else if n.IsDefault() { + return "default" + } else if n.IsUserDefined() { + return n.UserDefined() + } + return "" +} + +// IsBridge indicates whether container uses the bridge network stack +func (n NetworkMode) IsBridge() bool { + return n == "bridge" +} + +// IsHost indicates whether container uses the host network stack. +func (n NetworkMode) IsHost() bool { + return n == "host" +} + +// IsContainer indicates whether container uses a container network stack. +func (n NetworkMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// IsNone indicates whether container isn't using a network stack. +func (n NetworkMode) IsNone() bool { + return n == "none" +} + +// ConnectedContainer is the id of the container which network this container is connected to. +func (n NetworkMode) ConnectedContainer() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() +} + +//UserDefined indicates user-created network +func (n NetworkMode) UserDefined() string { + if n.IsUserDefined() { + return string(n) + } + return "" +} diff --git a/vendor/src/github.com/docker/engine-api/types/container/hostconfig_windows.go b/vendor/src/github.com/docker/engine-api/types/container/hostconfig_windows.go new file mode 100644 index 00000000..0ee332ba --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/types/container/hostconfig_windows.go @@ -0,0 +1,87 @@ +package container + +import ( + "strings" +) + +// IsDefault indicates whether container uses the default network stack. +func (n NetworkMode) IsDefault() bool { + return n == "default" +} + +// IsNone indicates whether container isn't using a network stack. +func (n NetworkMode) IsNone() bool { + return n == "none" +} + +// IsContainer indicates whether container uses a container network stack. +// Returns false as windows doesn't support this mode +func (n NetworkMode) IsContainer() bool { + return false +} + +// IsBridge indicates whether container uses the bridge network stack +// in windows it is given the name NAT +func (n NetworkMode) IsBridge() bool { + return n == "nat" +} + +// IsHost indicates whether container uses the host network stack. +// returns false as this is not supported by windows +func (n NetworkMode) IsHost() bool { + return false +} + +// IsPrivate indicates whether container uses its private network stack. +func (n NetworkMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// ConnectedContainer is the id of the container which network this container is connected to. +// Returns blank string on windows +func (n NetworkMode) ConnectedContainer() string { + return "" +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsNone() && !n.IsBridge() +} + +// IsHyperV indicates the use of a Hyper-V partition for isolation +func (i Isolation) IsHyperV() bool { + return strings.ToLower(string(i)) == "hyperv" +} + +// IsProcess indicates the use of process isolation +func (i Isolation) IsProcess() bool { + return strings.ToLower(string(i)) == "process" +} + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() || i.IsHyperV() || i.IsProcess() +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsDefault() { + return "default" + } else if n.IsBridge() { + return "nat" + } else if n.IsNone() { + return "none" + } else if n.IsUserDefined() { + return n.UserDefined() + } + + return "" +} + +//UserDefined indicates user-created network +func (n NetworkMode) UserDefined() string { + if n.IsUserDefined() { + return string(n) + } + return "" +} diff --git a/vendor/src/github.com/docker/engine-api/types/errors.go b/vendor/src/github.com/docker/engine-api/types/errors.go new file mode 100644 index 00000000..649ab951 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/types/errors.go @@ -0,0 +1,6 @@ +package types + +// ErrorResponse is the response body of API errors. +type ErrorResponse struct { + Message string `json:"message"` +} diff --git a/vendor/src/github.com/docker/engine-api/types/filters/parse.go b/vendor/src/github.com/docker/engine-api/types/filters/parse.go new file mode 100644 index 00000000..dc2c48b8 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/types/filters/parse.go @@ -0,0 +1,307 @@ +// Package filters provides helper function to parse and handle command line +// filter, used for example in docker ps or docker images commands. +package filters + +import ( + "encoding/json" + "errors" + "fmt" + "regexp" + "strings" + + "github.com/docker/engine-api/types/versions" +) + +// Args stores filter arguments as map key:{map key: bool}. +// It contains an aggregation of the map of arguments (which are in the form +// of -f 'key=value') based on the key, and stores values for the same key +// in a map with string keys and boolean values. +// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu' +// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}} +type Args struct { + fields map[string]map[string]bool +} + +// NewArgs initializes a new Args struct. +func NewArgs() Args { + return Args{fields: map[string]map[string]bool{}} +} + +// ParseFlag parses the argument to the filter flag. Like +// +// `docker ps -f 'created=today' -f 'image.name=ubuntu*'` +// +// If prev map is provided, then it is appended to, and returned. By default a new +// map is created. +func ParseFlag(arg string, prev Args) (Args, error) { + filters := prev + if len(arg) == 0 { + return filters, nil + } + + if !strings.Contains(arg, "=") { + return filters, ErrBadFormat + } + + f := strings.SplitN(arg, "=", 2) + + name := strings.ToLower(strings.TrimSpace(f[0])) + value := strings.TrimSpace(f[1]) + + filters.Add(name, value) + + return filters, nil +} + +// ErrBadFormat is an error returned in case of bad format for a filter. +var ErrBadFormat = errors.New("bad format of filter (expected name=value)") + +// ToParam packs the Args into a string for easy transport from client to server. +func ToParam(a Args) (string, error) { + // this way we don't URL encode {}, just empty space + if a.Len() == 0 { + return "", nil + } + + buf, err := json.Marshal(a.fields) + if err != nil { + return "", err + } + return string(buf), nil +} + +// ToParamWithVersion packs the Args into a string for easy transport from client to server. +// The generated string will depend on the specified version (corresponding to the API version). +func ToParamWithVersion(version string, a Args) (string, error) { + // this way we don't URL encode {}, just empty space + if a.Len() == 0 { + return "", nil + } + + // for daemons older than v1.10, filter must be of the form map[string][]string + buf := []byte{} + err := errors.New("") + if version != "" && versions.LessThan(version, "1.22") { + buf, err = json.Marshal(convertArgsToSlice(a.fields)) + } else { + buf, err = json.Marshal(a.fields) + } + if err != nil { + return "", err + } + return string(buf), nil +} + +// FromParam unpacks the filter Args. +func FromParam(p string) (Args, error) { + if len(p) == 0 { + return NewArgs(), nil + } + + r := strings.NewReader(p) + d := json.NewDecoder(r) + + m := map[string]map[string]bool{} + if err := d.Decode(&m); err != nil { + r.Seek(0, 0) + + // Allow parsing old arguments in slice format. + // Because other libraries might be sending them in this format. + deprecated := map[string][]string{} + if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil { + m = deprecatedArgs(deprecated) + } else { + return NewArgs(), err + } + } + return Args{m}, nil +} + +// Get returns the list of values associates with a field. +// It returns a slice of strings to keep backwards compatibility with old code. +func (filters Args) Get(field string) []string { + values := filters.fields[field] + if values == nil { + return make([]string, 0) + } + slice := make([]string, 0, len(values)) + for key := range values { + slice = append(slice, key) + } + return slice +} + +// Add adds a new value to a filter field. +func (filters Args) Add(name, value string) { + if _, ok := filters.fields[name]; ok { + filters.fields[name][value] = true + } else { + filters.fields[name] = map[string]bool{value: true} + } +} + +// Del removes a value from a filter field. +func (filters Args) Del(name, value string) { + if _, ok := filters.fields[name]; ok { + delete(filters.fields[name], value) + } +} + +// Len returns the number of fields in the arguments. +func (filters Args) Len() int { + return len(filters.fields) +} + +// MatchKVList returns true if the values for the specified field matches the ones +// from the sources. +// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, +// field is 'label' and sources are {'label1': '1', 'label2': '2'} +// it returns true. +func (filters Args) MatchKVList(field string, sources map[string]string) bool { + fieldValues := filters.fields[field] + + //do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + + if sources == nil || len(sources) == 0 { + return false + } + + for name2match := range fieldValues { + testKV := strings.SplitN(name2match, "=", 2) + + v, ok := sources[testKV[0]] + if !ok { + return false + } + if len(testKV) == 2 && testKV[1] != v { + return false + } + } + + return true +} + +// Match returns true if the values for the specified field matches the source string +// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, +// field is 'image.name' and source is 'ubuntu' +// it returns true. +func (filters Args) Match(field, source string) bool { + if filters.ExactMatch(field, source) { + return true + } + + fieldValues := filters.fields[field] + for name2match := range fieldValues { + match, err := regexp.MatchString(name2match, source) + if err != nil { + continue + } + if match { + return true + } + } + return false +} + +// ExactMatch returns true if the source matches exactly one of the filters. +func (filters Args) ExactMatch(field, source string) bool { + fieldValues, ok := filters.fields[field] + //do not filter if there is no filter set or cannot determine filter + if !ok || len(fieldValues) == 0 { + return true + } + + // try to match full name value to avoid O(N) regular expression matching + return fieldValues[source] +} + +// UniqueExactMatch returns true if there is only one filter and the source matches exactly this one. +func (filters Args) UniqueExactMatch(field, source string) bool { + fieldValues := filters.fields[field] + //do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + if len(filters.fields[field]) != 1 { + return false + } + + // try to match full name value to avoid O(N) regular expression matching + return fieldValues[source] +} + +// FuzzyMatch returns true if the source matches exactly one of the filters, +// or the source has one of the filters as a prefix. +func (filters Args) FuzzyMatch(field, source string) bool { + if filters.ExactMatch(field, source) { + return true + } + + fieldValues := filters.fields[field] + for prefix := range fieldValues { + if strings.HasPrefix(source, prefix) { + return true + } + } + return false +} + +// Include returns true if the name of the field to filter is in the filters. +func (filters Args) Include(field string) bool { + _, ok := filters.fields[field] + return ok +} + +// Validate ensures that all the fields in the filter are valid. +// It returns an error as soon as it finds an invalid field. +func (filters Args) Validate(accepted map[string]bool) error { + for name := range filters.fields { + if !accepted[name] { + return fmt.Errorf("Invalid filter '%s'", name) + } + } + return nil +} + +// WalkValues iterates over the list of filtered values for a field. +// It stops the iteration if it finds an error and it returns that error. +func (filters Args) WalkValues(field string, op func(value string) error) error { + if _, ok := filters.fields[field]; !ok { + return nil + } + for v := range filters.fields[field] { + if err := op(v); err != nil { + return err + } + } + return nil +} + +func deprecatedArgs(d map[string][]string) map[string]map[string]bool { + m := map[string]map[string]bool{} + for k, v := range d { + values := map[string]bool{} + for _, vv := range v { + values[vv] = true + } + m[k] = values + } + return m +} + +func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { + m := map[string][]string{} + for k, v := range f { + values := []string{} + for kk := range v { + if v[kk] { + values = append(values, kk) + } + } + m[k] = values + } + return m +} diff --git a/vendor/src/github.com/docker/engine-api/types/network/network.go b/vendor/src/github.com/docker/engine-api/types/network/network.go new file mode 100644 index 00000000..47080b65 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/types/network/network.go @@ -0,0 +1,53 @@ +package network + +// Address represents an IP address +type Address struct { + Addr string + PrefixLen int +} + +// IPAM represents IP Address Management +type IPAM struct { + Driver string + Options map[string]string //Per network IPAM driver options + Config []IPAMConfig +} + +// IPAMConfig represents IPAM configurations +type IPAMConfig struct { + Subnet string `json:",omitempty"` + IPRange string `json:",omitempty"` + Gateway string `json:",omitempty"` + AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` +} + +// EndpointIPAMConfig represents IPAM configurations for the endpoint +type EndpointIPAMConfig struct { + IPv4Address string `json:",omitempty"` + IPv6Address string `json:",omitempty"` + LinkLocalIPs []string `json:",omitempty"` +} + +// EndpointSettings stores the network endpoint details +type EndpointSettings struct { + // Configurations + IPAMConfig *EndpointIPAMConfig + Links []string + Aliases []string + // Operational data + NetworkID string + EndpointID string + Gateway string + IPAddress string + IPPrefixLen int + IPv6Gateway string + GlobalIPv6Address string + GlobalIPv6PrefixLen int + MacAddress string +} + +// NetworkingConfig represents the container's networking configuration for each of its interfaces +// Carries the networking configs specified in the `docker run` and `docker network connect` commands +type NetworkingConfig struct { + EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network +} diff --git a/vendor/src/github.com/docker/engine-api/types/plugin.go b/vendor/src/github.com/docker/engine-api/types/plugin.go new file mode 100644 index 00000000..05030ff3 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/types/plugin.go @@ -0,0 +1,169 @@ +// +build experimental + +package types + +import ( + "encoding/json" + "fmt" +) + +// PluginInstallOptions holds parameters to install a plugin. +type PluginInstallOptions struct { + Disabled bool + AcceptAllPermissions bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + PrivilegeFunc RequestPrivilegeFunc + AcceptPermissionsFunc func(PluginPrivileges) (bool, error) +} + +// PluginConfig represents the values of settings potentially modifiable by a user +type PluginConfig struct { + Mounts []PluginMount + Env []string + Args []string + Devices []PluginDevice +} + +// Plugin represents a Docker plugin for the remote API +type Plugin struct { + ID string `json:"Id,omitempty"` + Name string + Tag string + Active bool + Config PluginConfig + Manifest PluginManifest +} + +// PluginsListResponse contains the response for the remote API +type PluginsListResponse []*Plugin + +const ( + authzDriver = "AuthzDriver" + graphDriver = "GraphDriver" + ipamDriver = "IpamDriver" + networkDriver = "NetworkDriver" + volumeDriver = "VolumeDriver" +) + +// PluginInterfaceType represents a type that a plugin implements. +type PluginInterfaceType struct { + Prefix string // This is always "docker" + Capability string // Capability should be validated against the above list. + Version string // Plugin API version. Depends on the capability +} + +// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType +func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { + versionIndex := len(p) + prefixIndex := 0 + if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { + return fmt.Errorf("%q is not a plugin interface type", p) + } + p = p[1 : len(p)-1] +loop: + for i, b := range p { + switch b { + case '.': + prefixIndex = i + case '/': + versionIndex = i + break loop + } + } + t.Prefix = string(p[:prefixIndex]) + t.Capability = string(p[prefixIndex+1 : versionIndex]) + if versionIndex < len(p) { + t.Version = string(p[versionIndex+1:]) + } + return nil +} + +// MarshalJSON implements json.Marshaler for PluginInterfaceType +func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) { + return json.Marshal(t.String()) +} + +// String implements fmt.Stringer for PluginInterfaceType +func (t PluginInterfaceType) String() string { + return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) +} + +// PluginInterface describes the interface between Docker and plugin +type PluginInterface struct { + Types []PluginInterfaceType + Socket string +} + +// PluginSetting is to be embedded in other structs, if they are supposed to be +// modifiable by the user. +type PluginSetting struct { + Name string + Description string + Settable []string +} + +// PluginNetwork represents the network configuration for a plugin +type PluginNetwork struct { + Type string +} + +// PluginMount represents the mount configuration for a plugin +type PluginMount struct { + PluginSetting + Source *string + Destination string + Type string + Options []string +} + +// PluginEnv represents an environment variable for a plugin +type PluginEnv struct { + PluginSetting + Value *string +} + +// PluginArgs represents the command line arguments for a plugin +type PluginArgs struct { + PluginSetting + Value []string +} + +// PluginDevice represents a device for a plugin +type PluginDevice struct { + PluginSetting + Path *string +} + +// PluginUser represents the user for the plugin's process +type PluginUser struct { + UID uint32 `json:"Uid,omitempty"` + GID uint32 `json:"Gid,omitempty"` +} + +// PluginManifest represents the manifest of a plugin +type PluginManifest struct { + ManifestVersion string + Description string + Documentation string + Interface PluginInterface + Entrypoint []string + Workdir string + User PluginUser `json:",omitempty"` + Network PluginNetwork + Capabilities []string + Mounts []PluginMount + Devices []PluginDevice + Env []PluginEnv + Args PluginArgs +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +type PluginPrivilege struct { + Name string + Description string + Value []string +} + +// PluginPrivileges is a list of PluginPrivilege +type PluginPrivileges []PluginPrivilege diff --git a/vendor/src/github.com/docker/engine-api/types/reference/image_reference.go b/vendor/src/github.com/docker/engine-api/types/reference/image_reference.go new file mode 100644 index 00000000..be9cf8eb --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/types/reference/image_reference.go @@ -0,0 +1,34 @@ +package reference + +import ( + distreference "github.com/docker/distribution/reference" +) + +// Parse parses the given references and returns the repository and +// tag (if present) from it. If there is an error during parsing, it will +// return an error. +func Parse(ref string) (string, string, error) { + distributionRef, err := distreference.ParseNamed(ref) + if err != nil { + return "", "", err + } + + tag := GetTagFromNamedRef(distributionRef) + return distributionRef.Name(), tag, nil +} + +// GetTagFromNamedRef returns a tag from the specified reference. +// This function is necessary as long as the docker "server" api makes the distinction between repository +// and tags. +func GetTagFromNamedRef(ref distreference.Named) string { + var tag string + switch x := ref.(type) { + case distreference.Digested: + tag = x.Digest().String() + case distreference.NamedTagged: + tag = x.Tag() + default: + tag = "latest" + } + return tag +} diff --git a/vendor/src/github.com/docker/engine-api/types/registry/registry.go b/vendor/src/github.com/docker/engine-api/types/registry/registry.go new file mode 100644 index 00000000..d2aca6f0 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/types/registry/registry.go @@ -0,0 +1,99 @@ +package registry + +import ( + "encoding/json" + "net" +) + +// ServiceConfig stores daemon registry services configuration. +type ServiceConfig struct { + InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` + IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` + Mirrors []string +} + +// NetIPNet is the net.IPNet type, which can be marshalled and +// unmarshalled to JSON +type NetIPNet net.IPNet + +// MarshalJSON returns the JSON representation of the IPNet +func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { + return json.Marshal((*net.IPNet)(ipnet).String()) +} + +// UnmarshalJSON sets the IPNet from a byte array of JSON +func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { + var ipnetStr string + if err = json.Unmarshal(b, &ipnetStr); err == nil { + var cidr *net.IPNet + if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { + *ipnet = NetIPNet(*cidr) + } + } + return +} + +// IndexInfo contains information about a registry +// +// RepositoryInfo Examples: +// { +// "Index" : { +// "Name" : "docker.io", +// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], +// "Secure" : true, +// "Official" : true, +// }, +// "RemoteName" : "library/debian", +// "LocalName" : "debian", +// "CanonicalName" : "docker.io/debian" +// "Official" : true, +// } +// +// { +// "Index" : { +// "Name" : "127.0.0.1:5000", +// "Mirrors" : [], +// "Secure" : false, +// "Official" : false, +// }, +// "RemoteName" : "user/repo", +// "LocalName" : "127.0.0.1:5000/user/repo", +// "CanonicalName" : "127.0.0.1:5000/user/repo", +// "Official" : false, +// } +type IndexInfo struct { + // Name is the name of the registry, such as "docker.io" + Name string + // Mirrors is a list of mirrors, expressed as URIs + Mirrors []string + // Secure is set to false if the registry is part of the list of + // insecure registries. Insecure registries accept HTTP and/or accept + // HTTPS with certificates from unknown CAs. + Secure bool + // Official indicates whether this is an official registry + Official bool +} + +// SearchResult describes a search result returned from a registry +type SearchResult struct { + // StarCount indicates the number of stars this repository has + StarCount int `json:"star_count"` + // IsOfficial is true if the result is from an official repository. + IsOfficial bool `json:"is_official"` + // Name is the name of the repository + Name string `json:"name"` + // IsAutomated indicates whether the result is automated + IsAutomated bool `json:"is_automated"` + // Description is a textual description of the repository + Description string `json:"description"` +} + +// SearchResults lists a collection search results returned from a registry +type SearchResults struct { + // Query contains the query string that generated the search results + Query string `json:"query"` + // NumResults indicates the number of results the query returned + NumResults int `json:"num_results"` + // Results is a slice containing the actual results for the search + Results []SearchResult `json:"results"` +} diff --git a/vendor/src/github.com/docker/engine-api/types/seccomp.go b/vendor/src/github.com/docker/engine-api/types/seccomp.go new file mode 100644 index 00000000..854f1c45 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/types/seccomp.go @@ -0,0 +1,73 @@ +package types + +// Seccomp represents the config for a seccomp profile for syscall restriction. +type Seccomp struct { + DefaultAction Action `json:"defaultAction"` + Architectures []Arch `json:"architectures"` + Syscalls []*Syscall `json:"syscalls"` +} + +// Arch used for additional architectures +type Arch string + +// Additional architectures permitted to be used for system calls +// By default only the native architecture of the kernel is permitted +const ( + ArchX86 Arch = "SCMP_ARCH_X86" + ArchX86_64 Arch = "SCMP_ARCH_X86_64" + ArchX32 Arch = "SCMP_ARCH_X32" + ArchARM Arch = "SCMP_ARCH_ARM" + ArchAARCH64 Arch = "SCMP_ARCH_AARCH64" + ArchMIPS Arch = "SCMP_ARCH_MIPS" + ArchMIPS64 Arch = "SCMP_ARCH_MIPS64" + ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32" + ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL" + ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64" + ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32" + ArchPPC Arch = "SCMP_ARCH_PPC" + ArchPPC64 Arch = "SCMP_ARCH_PPC64" + ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE" + ArchS390 Arch = "SCMP_ARCH_S390" + ArchS390X Arch = "SCMP_ARCH_S390X" +) + +// Action taken upon Seccomp rule match +type Action string + +// Define actions for Seccomp rules +const ( + ActKill Action = "SCMP_ACT_KILL" + ActTrap Action = "SCMP_ACT_TRAP" + ActErrno Action = "SCMP_ACT_ERRNO" + ActTrace Action = "SCMP_ACT_TRACE" + ActAllow Action = "SCMP_ACT_ALLOW" +) + +// Operator used to match syscall arguments in Seccomp +type Operator string + +// Define operators for syscall arguments in Seccomp +const ( + OpNotEqual Operator = "SCMP_CMP_NE" + OpLessThan Operator = "SCMP_CMP_LT" + OpLessEqual Operator = "SCMP_CMP_LE" + OpEqualTo Operator = "SCMP_CMP_EQ" + OpGreaterEqual Operator = "SCMP_CMP_GE" + OpGreaterThan Operator = "SCMP_CMP_GT" + OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ" +) + +// Arg used for matching specific syscall arguments in Seccomp +type Arg struct { + Index uint `json:"index"` + Value uint64 `json:"value"` + ValueTwo uint64 `json:"valueTwo"` + Op Operator `json:"op"` +} + +// Syscall is used to match a syscall in Seccomp +type Syscall struct { + Name string `json:"name"` + Action Action `json:"action"` + Args []*Arg `json:"args"` +} diff --git a/vendor/src/github.com/docker/engine-api/types/stats.go b/vendor/src/github.com/docker/engine-api/types/stats.go new file mode 100644 index 00000000..b420ebe7 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/types/stats.go @@ -0,0 +1,115 @@ +// Package types is used for API stability in the types and response to the +// consumers of the API stats endpoint. +package types + +import "time" + +// ThrottlingData stores CPU throttling stats of one running container +type ThrottlingData struct { + // Number of periods with throttling active + Periods uint64 `json:"periods"` + // Number of periods when the container hits its throttling limit. + ThrottledPeriods uint64 `json:"throttled_periods"` + // Aggregate time the container was throttled for in nanoseconds. + ThrottledTime uint64 `json:"throttled_time"` +} + +// CPUUsage stores All CPU stats aggregated since container inception. +type CPUUsage struct { + // Total CPU time consumed. + // Units: nanoseconds. + TotalUsage uint64 `json:"total_usage"` + // Total CPU time consumed per core. + // Units: nanoseconds. + PercpuUsage []uint64 `json:"percpu_usage"` + // Time spent by tasks of the cgroup in kernel mode. + // Units: nanoseconds. + UsageInKernelmode uint64 `json:"usage_in_kernelmode"` + // Time spent by tasks of the cgroup in user mode. + // Units: nanoseconds. + UsageInUsermode uint64 `json:"usage_in_usermode"` +} + +// CPUStats aggregates and wraps all CPU related info of container +type CPUStats struct { + CPUUsage CPUUsage `json:"cpu_usage"` + SystemUsage uint64 `json:"system_cpu_usage"` + ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` +} + +// MemoryStats aggregates All memory stats since container inception +type MemoryStats struct { + // current res_counter usage for memory + Usage uint64 `json:"usage"` + // maximum usage ever recorded. + MaxUsage uint64 `json:"max_usage"` + // TODO(vishh): Export these as stronger types. + // all the stats exported via memory.stat. + Stats map[string]uint64 `json:"stats"` + // number of times memory usage hits limits. + Failcnt uint64 `json:"failcnt"` + Limit uint64 `json:"limit"` +} + +// BlkioStatEntry is one small entity to store a piece of Blkio stats +// TODO Windows: This can be factored out +type BlkioStatEntry struct { + Major uint64 `json:"major"` + Minor uint64 `json:"minor"` + Op string `json:"op"` + Value uint64 `json:"value"` +} + +// BlkioStats stores All IO service stats for data read and write +// TODO Windows: This can be factored out +type BlkioStats struct { + // number of bytes transferred to and from the block device + IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` + IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` + IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` + IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` + IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` + IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` + IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` + SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` +} + +// NetworkStats aggregates All network stats of one container +// TODO Windows: This will require refactoring +type NetworkStats struct { + RxBytes uint64 `json:"rx_bytes"` + RxPackets uint64 `json:"rx_packets"` + RxErrors uint64 `json:"rx_errors"` + RxDropped uint64 `json:"rx_dropped"` + TxBytes uint64 `json:"tx_bytes"` + TxPackets uint64 `json:"tx_packets"` + TxErrors uint64 `json:"tx_errors"` + TxDropped uint64 `json:"tx_dropped"` +} + +// PidsStats contains the stats of a container's pids +type PidsStats struct { + // Current is the number of pids in the cgroup + Current uint64 `json:"current,omitempty"` + // Limit is the hard limit on the number of pids in the cgroup. + // A "Limit" of 0 means that there is no limit. + Limit uint64 `json:"limit,omitempty"` +} + +// Stats is Ultimate struct aggregating all types of stats of one container +type Stats struct { + Read time.Time `json:"read"` + PreCPUStats CPUStats `json:"precpu_stats,omitempty"` + CPUStats CPUStats `json:"cpu_stats,omitempty"` + MemoryStats MemoryStats `json:"memory_stats,omitempty"` + BlkioStats BlkioStats `json:"blkio_stats,omitempty"` + PidsStats PidsStats `json:"pids_stats,omitempty"` +} + +// StatsJSON is newly used Networks +type StatsJSON struct { + Stats + + // Networks request version >=1.21 + Networks map[string]NetworkStats `json:"networks,omitempty"` +} diff --git a/vendor/src/github.com/docker/engine-api/types/strslice/strslice.go b/vendor/src/github.com/docker/engine-api/types/strslice/strslice.go new file mode 100644 index 00000000..bad493fb --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/types/strslice/strslice.go @@ -0,0 +1,30 @@ +package strslice + +import "encoding/json" + +// StrSlice represents a string or an array of strings. +// We need to override the json decoder to accept both options. +type StrSlice []string + +// UnmarshalJSON decodes the byte slice whether it's a string or an array of +// strings. This method is needed to implement json.Unmarshaler. +func (e *StrSlice) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + // With no input, we preserve the existing value by returning nil and + // leaving the target alone. This allows defining default values for + // the type. + return nil + } + + p := make([]string, 0, 1) + if err := json.Unmarshal(b, &p); err != nil { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + p = append(p, s) + } + + *e = p + return nil +} diff --git a/vendor/src/github.com/docker/engine-api/types/swarm/common.go b/vendor/src/github.com/docker/engine-api/types/swarm/common.go new file mode 100644 index 00000000..b87f5453 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/types/swarm/common.go @@ -0,0 +1,21 @@ +package swarm + +import "time" + +// Version represent the internal object version. +type Version struct { + Index uint64 `json:",omitempty"` +} + +// Meta is base object inherited by most of the other once. +type Meta struct { + Version Version `json:",omitempty"` + CreatedAt time.Time `json:",omitempty"` + UpdatedAt time.Time `json:",omitempty"` +} + +// Annotations represents how to describe an object. +type Annotations struct { + Name string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` +} diff --git a/vendor/src/github.com/docker/engine-api/types/swarm/container.go b/vendor/src/github.com/docker/engine-api/types/swarm/container.go new file mode 100644 index 00000000..29f2e8a6 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/types/swarm/container.go @@ -0,0 +1,67 @@ +package swarm + +import "time" + +// ContainerSpec represents the spec of a container. +type ContainerSpec struct { + Image string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Command []string `json:",omitempty"` + Args []string `json:",omitempty"` + Env []string `json:",omitempty"` + Dir string `json:",omitempty"` + User string `json:",omitempty"` + Mounts []Mount `json:",omitempty"` + StopGracePeriod *time.Duration `json:",omitempty"` +} + +// MountType represents the type of a mount. +type MountType string + +const ( + // MountTypeBind BIND + MountTypeBind MountType = "bind" + // MountTypeVolume VOLUME + MountTypeVolume MountType = "volume" +) + +// Mount represents a mount (volume). +type Mount struct { + Type MountType `json:",omitempty"` + Source string `json:",omitempty"` + Target string `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + + BindOptions *BindOptions `json:",omitempty"` + VolumeOptions *VolumeOptions `json:",omitempty"` +} + +// MountPropagation represents the propagation of a mount. +type MountPropagation string + +const ( + // MountPropagationRPrivate RPRIVATE + MountPropagationRPrivate MountPropagation = "rprivate" + // MountPropagationPrivate PRIVATE + MountPropagationPrivate MountPropagation = "private" + // MountPropagationRShared RSHARED + MountPropagationRShared MountPropagation = "rshared" + // MountPropagationShared SHARED + MountPropagationShared MountPropagation = "shared" + // MountPropagationRSlave RSLAVE + MountPropagationRSlave MountPropagation = "rslave" + // MountPropagationSlave SLAVE + MountPropagationSlave MountPropagation = "slave" +) + +// BindOptions defines options specific to mounts of type "bind". +type BindOptions struct { + Propagation MountPropagation `json:",omitempty"` +} + +// VolumeOptions represents the options for a mount of type volume. +type VolumeOptions struct { + NoCopy bool `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + DriverConfig *Driver `json:",omitempty"` +} diff --git a/vendor/src/github.com/docker/engine-api/types/swarm/network.go b/vendor/src/github.com/docker/engine-api/types/swarm/network.go new file mode 100644 index 00000000..84804da2 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/types/swarm/network.go @@ -0,0 +1,99 @@ +package swarm + +// Endpoint represents an endpoint. +type Endpoint struct { + Spec EndpointSpec `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` + VirtualIPs []EndpointVirtualIP `json:",omitempty"` +} + +// EndpointSpec represents the spec of an endpoint. +type EndpointSpec struct { + Mode ResolutionMode `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` +} + +// ResolutionMode represents a resolution mode. +type ResolutionMode string + +const ( + // ResolutionModeVIP VIP + ResolutionModeVIP ResolutionMode = "vip" + // ResolutionModeDNSRR DNSRR + ResolutionModeDNSRR ResolutionMode = "dnsrr" +) + +// PortConfig represents the config of a port. +type PortConfig struct { + Name string `json:",omitempty"` + Protocol PortConfigProtocol `json:",omitempty"` + TargetPort uint32 `json:",omitempty"` + PublishedPort uint32 `json:",omitempty"` +} + +// PortConfigProtocol represents the protocol of a port. +type PortConfigProtocol string + +const ( + // TODO(stevvooe): These should be used generally, not just for PortConfig. + + // PortConfigProtocolTCP TCP + PortConfigProtocolTCP PortConfigProtocol = "tcp" + // PortConfigProtocolUDP UDP + PortConfigProtocolUDP PortConfigProtocol = "udp" +) + +// EndpointVirtualIP represents the virtual ip of a port. +type EndpointVirtualIP struct { + NetworkID string `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// Network represents a network. +type Network struct { + ID string + Meta + Spec NetworkSpec `json:",omitempty"` + DriverState Driver `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` +} + +// NetworkSpec represents the spec of a network. +type NetworkSpec struct { + Annotations + DriverConfiguration *Driver `json:",omitempty"` + IPv6Enabled bool `json:",omitempty"` + Internal bool `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` +} + +// NetworkAttachmentConfig represents the configuration of a network attachment. +type NetworkAttachmentConfig struct { + Target string `json:",omitempty"` + Aliases []string `json:",omitempty"` +} + +// NetworkAttachment represents a network attachment. +type NetworkAttachment struct { + Network Network `json:",omitempty"` + Addresses []string `json:",omitempty"` +} + +// IPAMOptions represents ipam options. +type IPAMOptions struct { + Driver Driver `json:",omitempty"` + Configs []IPAMConfig `json:",omitempty"` +} + +// IPAMConfig represents ipam configuration. +type IPAMConfig struct { + Subnet string `json:",omitempty"` + Range string `json:",omitempty"` + Gateway string `json:",omitempty"` +} + +// Driver represents a driver (network/volume). +type Driver struct { + Name string `json:",omitempty"` + Options map[string]string `json:",omitempty"` +} diff --git a/vendor/src/github.com/docker/engine-api/types/swarm/node.go b/vendor/src/github.com/docker/engine-api/types/swarm/node.go new file mode 100644 index 00000000..9987662a --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/types/swarm/node.go @@ -0,0 +1,107 @@ +package swarm + +// Node represents a node. +type Node struct { + ID string + Meta + + Spec NodeSpec `json:",omitempty"` + Description NodeDescription `json:",omitempty"` + Status NodeStatus `json:",omitempty"` + ManagerStatus *ManagerStatus `json:",omitempty"` +} + +// NodeSpec represents the spec of a node. +type NodeSpec struct { + Annotations + Role NodeRole `json:",omitempty"` + Availability NodeAvailability `json:",omitempty"` +} + +// NodeRole represents the role of a node. +type NodeRole string + +const ( + // NodeRoleWorker WORKER + NodeRoleWorker NodeRole = "worker" + // NodeRoleManager MANAGER + NodeRoleManager NodeRole = "manager" +) + +// NodeAvailability represents the availability of a node. +type NodeAvailability string + +const ( + // NodeAvailabilityActive ACTIVE + NodeAvailabilityActive NodeAvailability = "active" + // NodeAvailabilityPause PAUSE + NodeAvailabilityPause NodeAvailability = "pause" + // NodeAvailabilityDrain DRAIN + NodeAvailabilityDrain NodeAvailability = "drain" +) + +// NodeDescription represents the description of a node. +type NodeDescription struct { + Hostname string `json:",omitempty"` + Platform Platform `json:",omitempty"` + Resources Resources `json:",omitempty"` + Engine EngineDescription `json:",omitempty"` +} + +// Platform represents the platfrom (Arch/OS). +type Platform struct { + Architecture string `json:",omitempty"` + OS string `json:",omitempty"` +} + +// EngineDescription represents the description of an engine. +type EngineDescription struct { + EngineVersion string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Plugins []PluginDescription `json:",omitempty"` +} + +// PluginDescription represents the description of an engine plugin. +type PluginDescription struct { + Type string `json:",omitempty"` + Name string `json:",omitempty"` +} + +// NodeStatus represents the status of a node. +type NodeStatus struct { + State NodeState `json:",omitempty"` + Message string `json:",omitempty"` +} + +// Reachability represents the reachability of a node. +type Reachability string + +const ( + // ReachabilityUnknown UNKNOWN + ReachabilityUnknown Reachability = "unknown" + // ReachabilityUnreachable UNREACHABLE + ReachabilityUnreachable Reachability = "unreachable" + // ReachabilityReachable REACHABLE + ReachabilityReachable Reachability = "reachable" +) + +// ManagerStatus represents the status of a manager. +type ManagerStatus struct { + Leader bool `json:",omitempty"` + Reachability Reachability `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// NodeState represents the state of a node. +type NodeState string + +const ( + // NodeStateUnknown UNKNOWN + NodeStateUnknown NodeState = "unknown" + // NodeStateDown DOWN + NodeStateDown NodeState = "down" + // NodeStateReady READY + NodeStateReady NodeState = "ready" + // NodeStateDisconnected DISCONNECTED + NodeStateDisconnected NodeState = "disconnected" +) diff --git a/vendor/src/github.com/docker/engine-api/types/swarm/service.go b/vendor/src/github.com/docker/engine-api/types/swarm/service.go new file mode 100644 index 00000000..676fc0e0 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/types/swarm/service.go @@ -0,0 +1,73 @@ +package swarm + +import "time" + +// Service represents a service. +type Service struct { + ID string + Meta + Spec ServiceSpec `json:",omitempty"` + Endpoint Endpoint `json:",omitempty"` + UpdateStatus UpdateStatus `json:",omitempty"` +} + +// ServiceSpec represents the spec of a service. +type ServiceSpec struct { + Annotations + + // TaskTemplate defines how the service should construct new tasks when + // orchestrating this service. + TaskTemplate TaskSpec `json:",omitempty"` + Mode ServiceMode `json:",omitempty"` + UpdateConfig *UpdateConfig `json:",omitempty"` + Networks []NetworkAttachmentConfig `json:",omitempty"` + EndpointSpec *EndpointSpec `json:",omitempty"` +} + +// ServiceMode represents the mode of a service. +type ServiceMode struct { + Replicated *ReplicatedService `json:",omitempty"` + Global *GlobalService `json:",omitempty"` +} + +// UpdateState is the state of a service update. +type UpdateState string + +const ( + // UpdateStateUpdating is the updating state. + UpdateStateUpdating UpdateState = "updating" + // UpdateStatePaused is the paused state. + UpdateStatePaused UpdateState = "paused" + // UpdateStateCompleted is the completed state. + UpdateStateCompleted UpdateState = "completed" +) + +// UpdateStatus reports the status of a service update. +type UpdateStatus struct { + State UpdateState `json:",omitempty"` + StartedAt time.Time `json:",omitempty"` + CompletedAt time.Time `json:",omitempty"` + Message string `json:",omitempty"` +} + +// ReplicatedService is a kind of ServiceMode. +type ReplicatedService struct { + Replicas *uint64 `json:",omitempty"` +} + +// GlobalService is a kind of ServiceMode. +type GlobalService struct{} + +const ( + // UpdateFailureActionPause PAUSE + UpdateFailureActionPause = "pause" + // UpdateFailureActionContinue CONTINUE + UpdateFailureActionContinue = "continue" +) + +// UpdateConfig represents the update configuration. +type UpdateConfig struct { + Parallelism uint64 `json:",omitempty"` + Delay time.Duration `json:",omitempty"` + FailureAction string `json:",omitempty"` +} diff --git a/vendor/src/github.com/docker/engine-api/types/swarm/swarm.go b/vendor/src/github.com/docker/engine-api/types/swarm/swarm.go new file mode 100644 index 00000000..0a541410 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/types/swarm/swarm.go @@ -0,0 +1,141 @@ +package swarm + +import "time" + +// ClusterInfo represents info about a the cluster for outputing in "info" +// it contains the same information as "Swarm", but without the JoinTokens +type ClusterInfo struct { + ID string + Meta + Spec Spec +} + +// Swarm represents a swarm. +type Swarm struct { + ClusterInfo + JoinTokens JoinTokens +} + +// JoinTokens contains the tokens workers and managers need to join the swarm. +type JoinTokens struct { + Worker string + Manager string +} + +// Spec represents the spec of a swarm. +type Spec struct { + Annotations + + Orchestration OrchestrationConfig `json:",omitempty"` + Raft RaftConfig `json:",omitempty"` + Dispatcher DispatcherConfig `json:",omitempty"` + CAConfig CAConfig `json:",omitempty"` + TaskDefaults TaskDefaults `json:",omitempty"` +} + +// OrchestrationConfig represents orchestration configuration. +type OrchestrationConfig struct { + TaskHistoryRetentionLimit int64 `json:",omitempty"` +} + +// TaskDefaults parameterizes cluster-level task creation with default values. +type TaskDefaults struct { + // LogDriver selects the log driver to use for tasks created in the + // orchestrator if unspecified by a service. + // + // Updating this value will only have an affect on new tasks. Old tasks + // will continue use their previously configured log driver until + // recreated. + LogDriver *Driver `json:",omitempty"` +} + +// RaftConfig represents raft configuration. +type RaftConfig struct { + SnapshotInterval uint64 `json:",omitempty"` + KeepOldSnapshots uint64 `json:",omitempty"` + LogEntriesForSlowFollowers uint64 `json:",omitempty"` + HeartbeatTick uint32 `json:",omitempty"` + ElectionTick uint32 `json:",omitempty"` +} + +// DispatcherConfig represents dispatcher configuration. +type DispatcherConfig struct { + HeartbeatPeriod uint64 `json:",omitempty"` +} + +// CAConfig represents CA configuration. +type CAConfig struct { + NodeCertExpiry time.Duration `json:",omitempty"` + ExternalCAs []*ExternalCA `json:",omitempty"` +} + +// ExternalCAProtocol represents type of external CA. +type ExternalCAProtocol string + +// ExternalCAProtocolCFSSL CFSSL +const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl" + +// ExternalCA defines external CA to be used by the cluster. +type ExternalCA struct { + Protocol ExternalCAProtocol + URL string + Options map[string]string `json:",omitempty"` +} + +// InitRequest is the request used to init a swarm. +type InitRequest struct { + ListenAddr string + AdvertiseAddr string + ForceNewCluster bool + Spec Spec +} + +// JoinRequest is the request used to join a swarm. +type JoinRequest struct { + ListenAddr string + AdvertiseAddr string + RemoteAddrs []string + JoinToken string // accept by secret +} + +// LocalNodeState represents the state of the local node. +type LocalNodeState string + +const ( + // LocalNodeStateInactive INACTIVE + LocalNodeStateInactive LocalNodeState = "inactive" + // LocalNodeStatePending PENDING + LocalNodeStatePending LocalNodeState = "pending" + // LocalNodeStateActive ACTIVE + LocalNodeStateActive LocalNodeState = "active" + // LocalNodeStateError ERROR + LocalNodeStateError LocalNodeState = "error" +) + +// Info represents generic information about swarm. +type Info struct { + NodeID string + NodeAddr string + + LocalNodeState LocalNodeState + ControlAvailable bool + Error string + + RemoteManagers []Peer + Nodes int + Managers int + + Cluster ClusterInfo +} + +// Peer represents a peer. +type Peer struct { + NodeID string + Addr string +} + +// UpdateFlags contains flags for SwarmUpdate. +type UpdateFlags struct { + RotateWorkerToken bool + RotateManagerToken bool +} diff --git a/vendor/src/github.com/docker/engine-api/types/swarm/task.go b/vendor/src/github.com/docker/engine-api/types/swarm/task.go new file mode 100644 index 00000000..fa8228a4 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/types/swarm/task.go @@ -0,0 +1,115 @@ +package swarm + +import "time" + +// TaskState represents the state of a task. +type TaskState string + +const ( + // TaskStateNew NEW + TaskStateNew TaskState = "new" + // TaskStateAllocated ALLOCATED + TaskStateAllocated TaskState = "allocated" + // TaskStatePending PENDING + TaskStatePending TaskState = "pending" + // TaskStateAssigned ASSIGNED + TaskStateAssigned TaskState = "assigned" + // TaskStateAccepted ACCEPTED + TaskStateAccepted TaskState = "accepted" + // TaskStatePreparing PREPARING + TaskStatePreparing TaskState = "preparing" + // TaskStateReady READY + TaskStateReady TaskState = "ready" + // TaskStateStarting STARTING + TaskStateStarting TaskState = "starting" + // TaskStateRunning RUNNING + TaskStateRunning TaskState = "running" + // TaskStateComplete COMPLETE + TaskStateComplete TaskState = "complete" + // TaskStateShutdown SHUTDOWN + TaskStateShutdown TaskState = "shutdown" + // TaskStateFailed FAILED + TaskStateFailed TaskState = "failed" + // TaskStateRejected REJECTED + TaskStateRejected TaskState = "rejected" +) + +// Task represents a task. +type Task struct { + ID string + Meta + + Spec TaskSpec `json:",omitempty"` + ServiceID string `json:",omitempty"` + Slot int `json:",omitempty"` + NodeID string `json:",omitempty"` + Status TaskStatus `json:",omitempty"` + DesiredState TaskState `json:",omitempty"` + NetworksAttachments []NetworkAttachment `json:",omitempty"` +} + +// TaskSpec represents the spec of a task. +type TaskSpec struct { + ContainerSpec ContainerSpec `json:",omitempty"` + Resources *ResourceRequirements `json:",omitempty"` + RestartPolicy *RestartPolicy `json:",omitempty"` + Placement *Placement `json:",omitempty"` + + // LogDriver specifies the LogDriver to use for tasks created from this + // spec. If not present, the one on cluster default on swarm.Spec will be + // used, finally falling back to the engine default if not specified. + LogDriver *Driver `json:",omitempty"` +} + +// Resources represents resources (CPU/Memory). +type Resources struct { + NanoCPUs int64 `json:",omitempty"` + MemoryBytes int64 `json:",omitempty"` +} + +// ResourceRequirements represents resources requirements. +type ResourceRequirements struct { + Limits *Resources `json:",omitempty"` + Reservations *Resources `json:",omitempty"` +} + +// Placement represents orchestration parameters. +type Placement struct { + Constraints []string `json:",omitempty"` +} + +// RestartPolicy represents the restart policy. +type RestartPolicy struct { + Condition RestartPolicyCondition `json:",omitempty"` + Delay *time.Duration `json:",omitempty"` + MaxAttempts *uint64 `json:",omitempty"` + Window *time.Duration `json:",omitempty"` +} + +// RestartPolicyCondition represents when to restart. +type RestartPolicyCondition string + +const ( + // RestartPolicyConditionNone NONE + RestartPolicyConditionNone RestartPolicyCondition = "none" + // RestartPolicyConditionOnFailure ON_FAILURE + RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure" + // RestartPolicyConditionAny ANY + RestartPolicyConditionAny RestartPolicyCondition = "any" +) + +// TaskStatus represents the status of a task. +type TaskStatus struct { + Timestamp time.Time `json:",omitempty"` + State TaskState `json:",omitempty"` + Message string `json:",omitempty"` + Err string `json:",omitempty"` + ContainerStatus ContainerStatus `json:",omitempty"` +} + +// ContainerStatus represents the status of a container. +type ContainerStatus struct { + ContainerID string `json:",omitempty"` + PID int `json:",omitempty"` + ExitCode int `json:",omitempty"` +} diff --git a/vendor/src/github.com/docker/engine-api/types/time/duration_convert.go b/vendor/src/github.com/docker/engine-api/types/time/duration_convert.go new file mode 100644 index 00000000..63e1eec1 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/types/time/duration_convert.go @@ -0,0 +1,12 @@ +package time + +import ( + "strconv" + "time" +) + +// DurationToSecondsString converts the specified duration to the number +// seconds it represents, formatted as a string. +func DurationToSecondsString(duration time.Duration) string { + return strconv.FormatFloat(duration.Seconds(), 'f', 0, 64) +} diff --git a/vendor/src/github.com/docker/engine-api/types/time/timestamp.go b/vendor/src/github.com/docker/engine-api/types/time/timestamp.go new file mode 100644 index 00000000..d3695ba7 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/types/time/timestamp.go @@ -0,0 +1,124 @@ +package time + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// These are additional predefined layouts for use in Time.Format and Time.Parse +// with --since and --until parameters for `docker logs` and `docker events` +const ( + rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone + rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone + dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00 + dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00 +) + +// GetTimestamp tries to parse given string as golang duration, +// then RFC3339 time and finally as a Unix timestamp. If +// any of these were successful, it returns a Unix timestamp +// as string otherwise returns the given value back. +// In case of duration input, the returned timestamp is computed +// as the given reference time minus the amount of the duration. +func GetTimestamp(value string, reference time.Time) (string, error) { + if d, err := time.ParseDuration(value); value != "0" && err == nil { + return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil + } + + var format string + var parseInLocation bool + + // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation + parseInLocation = !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) + + if strings.Contains(value, ".") { + if parseInLocation { + format = rFC3339NanoLocal + } else { + format = time.RFC3339Nano + } + } else if strings.Contains(value, "T") { + // we want the number of colons in the T portion of the timestamp + tcolons := strings.Count(value, ":") + // if parseInLocation is off and we have a +/- zone offset (not Z) then + // there will be an extra colon in the input for the tz offset subtract that + // colon from the tcolons count + if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 { + tcolons-- + } + if parseInLocation { + switch tcolons { + case 0: + format = "2006-01-02T15" + case 1: + format = "2006-01-02T15:04" + default: + format = rFC3339Local + } + } else { + switch tcolons { + case 0: + format = "2006-01-02T15Z07:00" + case 1: + format = "2006-01-02T15:04Z07:00" + default: + format = time.RFC3339 + } + } + } else if parseInLocation { + format = dateLocal + } else { + format = dateWithZone + } + + var t time.Time + var err error + + if parseInLocation { + t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone())) + } else { + t, err = time.Parse(format, value) + } + + if err != nil { + // if there is a `-` then its an RFC3339 like timestamp otherwise assume unixtimestamp + if strings.Contains(value, "-") { + return "", err // was probably an RFC3339 like timestamp but the parser failed with an error + } + return value, nil // unixtimestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) + } + + return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil +} + +// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the +// format "%d.%09d", time.Unix(), int64(time.Nanosecond())) +// if the incoming nanosecond portion is longer or shorter than 9 digits it is +// converted to nanoseconds. The expectation is that the seconds and +// seconds will be used to create a time variable. For example: +// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) +// if err == nil since := time.Unix(seconds, nanoseconds) +// returns seconds as def(aultSeconds) if value == "" +func ParseTimestamps(value string, def int64) (int64, int64, error) { + if value == "" { + return def, 0, nil + } + sa := strings.SplitN(value, ".", 2) + s, err := strconv.ParseInt(sa[0], 10, 64) + if err != nil { + return s, 0, err + } + if len(sa) != 2 { + return s, 0, nil + } + n, err := strconv.ParseInt(sa[1], 10, 64) + if err != nil { + return s, n, err + } + // should already be in nanoseconds but just in case convert n to nanoseonds + n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1])))) + return s, n, nil +} diff --git a/vendor/src/github.com/docker/engine-api/types/types.go b/vendor/src/github.com/docker/engine-api/types/types.go new file mode 100644 index 00000000..b6f91259 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/types/types.go @@ -0,0 +1,515 @@ +package types + +import ( + "os" + "time" + + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/network" + "github.com/docker/engine-api/types/registry" + "github.com/docker/engine-api/types/swarm" + "github.com/docker/go-connections/nat" +) + +// ContainerCreateResponse contains the information returned to a client on the +// creation of a new container. +type ContainerCreateResponse struct { + // ID is the ID of the created container. + ID string `json:"Id"` + + // Warnings are any warnings encountered during the creation of the container. + Warnings []string `json:"Warnings"` +} + +// ContainerExecCreateResponse contains response of Remote API: +// POST "/containers/{name:.*}/exec" +type ContainerExecCreateResponse struct { + // ID is the exec ID. + ID string `json:"Id"` +} + +// ContainerUpdateResponse contains response of Remote API: +// POST "/containers/{name:.*}/update" +type ContainerUpdateResponse struct { + // Warnings are any warnings encountered during the updating of the container. + Warnings []string `json:"Warnings"` +} + +// AuthResponse contains response of Remote API: +// POST "/auth" +type AuthResponse struct { + // Status is the authentication status + Status string `json:"Status"` + + // IdentityToken is an opaque token used for authenticating + // a user after a successful login. + IdentityToken string `json:"IdentityToken,omitempty"` +} + +// ContainerWaitResponse contains response of Remote API: +// POST "/containers/"+containerID+"/wait" +type ContainerWaitResponse struct { + // StatusCode is the status code of the wait job + StatusCode int `json:"StatusCode"` +} + +// ContainerCommitResponse contains response of Remote API: +// POST "/commit?container="+containerID +type ContainerCommitResponse struct { + ID string `json:"Id"` +} + +// ContainerChange contains response of Remote API: +// GET "/containers/{name:.*}/changes" +type ContainerChange struct { + Kind int + Path string +} + +// ImageHistory contains response of Remote API: +// GET "/images/{name:.*}/history" +type ImageHistory struct { + ID string `json:"Id"` + Created int64 + CreatedBy string + Tags []string + Size int64 + Comment string +} + +// ImageDelete contains response of Remote API: +// DELETE "/images/{name:.*}" +type ImageDelete struct { + Untagged string `json:",omitempty"` + Deleted string `json:",omitempty"` +} + +// Image contains response of Remote API: +// GET "/images/json" +type Image struct { + ID string `json:"Id"` + ParentID string `json:"ParentId"` + RepoTags []string + RepoDigests []string + Created int64 + Size int64 + VirtualSize int64 + Labels map[string]string +} + +// GraphDriverData returns Image's graph driver config info +// when calling inspect command +type GraphDriverData struct { + Name string + Data map[string]string +} + +// RootFS returns Image's RootFS description including the layer IDs. +type RootFS struct { + Type string + Layers []string `json:",omitempty"` + BaseLayer string `json:",omitempty"` +} + +// ImageInspect contains response of Remote API: +// GET "/images/{name:.*}/json" +type ImageInspect struct { + ID string `json:"Id"` + RepoTags []string + RepoDigests []string + Parent string + Comment string + Created string + Container string + ContainerConfig *container.Config + DockerVersion string + Author string + Config *container.Config + Architecture string + Os string + Size int64 + VirtualSize int64 + GraphDriver GraphDriverData + RootFS RootFS +} + +// Port stores open ports info of container +// e.g. {"PrivatePort": 8080, "PublicPort": 80, "Type": "tcp"} +type Port struct { + IP string `json:",omitempty"` + PrivatePort int + PublicPort int `json:",omitempty"` + Type string +} + +// Container contains response of Remote API: +// GET "/containers/json" +type Container struct { + ID string `json:"Id"` + Names []string + Image string + ImageID string + Command string + Created int64 + Ports []Port + SizeRw int64 `json:",omitempty"` + SizeRootFs int64 `json:",omitempty"` + Labels map[string]string + State string + Status string + HostConfig struct { + NetworkMode string `json:",omitempty"` + } + NetworkSettings *SummaryNetworkSettings + Mounts []MountPoint +} + +// CopyConfig contains request body of Remote API: +// POST "/containers/"+containerID+"/copy" +type CopyConfig struct { + Resource string +} + +// ContainerPathStat is used to encode the header from +// GET "/containers/{name:.*}/archive" +// "Name" is the file or directory name. +type ContainerPathStat struct { + Name string `json:"name"` + Size int64 `json:"size"` + Mode os.FileMode `json:"mode"` + Mtime time.Time `json:"mtime"` + LinkTarget string `json:"linkTarget"` +} + +// ContainerProcessList contains response of Remote API: +// GET "/containers/{name:.*}/top" +type ContainerProcessList struct { + Processes [][]string + Titles []string +} + +// Version contains response of Remote API: +// GET "/version" +type Version struct { + Version string + APIVersion string `json:"ApiVersion"` + GitCommit string + GoVersion string + Os string + Arch string + KernelVersion string `json:",omitempty"` + Experimental bool `json:",omitempty"` + BuildTime string `json:",omitempty"` +} + +// Info contains response of Remote API: +// GET "/info" +type Info struct { + ID string + Containers int + ContainersRunning int + ContainersPaused int + ContainersStopped int + Images int + Driver string + DriverStatus [][2]string + SystemStatus [][2]string + Plugins PluginsInfo + MemoryLimit bool + SwapLimit bool + KernelMemory bool + CPUCfsPeriod bool `json:"CpuCfsPeriod"` + CPUCfsQuota bool `json:"CpuCfsQuota"` + CPUShares bool + CPUSet bool + IPv4Forwarding bool + BridgeNfIptables bool + BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` + Debug bool + NFd int + OomKillDisable bool + NGoroutines int + SystemTime string + ExecutionDriver string + LoggingDriver string + CgroupDriver string + NEventsListener int + KernelVersion string + OperatingSystem string + OSType string + Architecture string + IndexServerAddress string + RegistryConfig *registry.ServiceConfig + NCPU int + MemTotal int64 + DockerRootDir string + HTTPProxy string `json:"HttpProxy"` + HTTPSProxy string `json:"HttpsProxy"` + NoProxy string + Name string + Labels []string + ExperimentalBuild bool + ServerVersion string + ClusterStore string + ClusterAdvertise string + SecurityOptions []string + Runtimes map[string]Runtime + DefaultRuntime string + Swarm swarm.Info + // LiveRestoreEnabled determines whether containers should be kept + // running when the daemon is shutdown or upon daemon start if + // running containers are detected + LiveRestoreEnabled bool +} + +// PluginsInfo is a temp struct holding Plugins name +// registered with docker daemon. It is used by Info struct +type PluginsInfo struct { + // List of Volume plugins registered + Volume []string + // List of Network plugins registered + Network []string + // List of Authorization plugins registered + Authorization []string +} + +// ExecStartCheck is a temp struct used by execStart +// Config fields is part of ExecConfig in runconfig package +type ExecStartCheck struct { + // ExecStart will first check if it's detached + Detach bool + // Check if there's a tty + Tty bool +} + +// HealthcheckResult stores information about a single run of a healthcheck probe +type HealthcheckResult struct { + Start time.Time // Start is the time this check started + End time.Time // End is the time this check ended + ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe + Output string // Output from last check +} + +// Health states +const ( + Starting = "starting" // Starting indicates that the container is not yet ready + Healthy = "healthy" // Healthy indicates that the container is running correctly + Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem +) + +// Health stores information about the container's healthcheck results +type Health struct { + Status string // Status is one of Starting, Healthy or Unhealthy + FailingStreak int // FailingStreak is the number of consecutive failures + Log []*HealthcheckResult // Log contains the last few results (oldest first) +} + +// ContainerState stores container's running state +// it's part of ContainerJSONBase and will return by "inspect" command +type ContainerState struct { + Status string + Running bool + Paused bool + Restarting bool + OOMKilled bool + Dead bool + Pid int + ExitCode int + Error string + StartedAt string + FinishedAt string + Health *Health `json:",omitempty"` +} + +// ContainerNode stores information about the node that a container +// is running on. It's only available in Docker Swarm +type ContainerNode struct { + ID string + IPAddress string `json:"IP"` + Addr string + Name string + Cpus int + Memory int64 + Labels map[string]string +} + +// ContainerJSONBase contains response of Remote API: +// GET "/containers/{name:.*}/json" +type ContainerJSONBase struct { + ID string `json:"Id"` + Created string + Path string + Args []string + State *ContainerState + Image string + ResolvConfPath string + HostnamePath string + HostsPath string + LogPath string + Node *ContainerNode `json:",omitempty"` + Name string + RestartCount int + Driver string + MountLabel string + ProcessLabel string + AppArmorProfile string + ExecIDs []string + HostConfig *container.HostConfig + GraphDriver GraphDriverData + SizeRw *int64 `json:",omitempty"` + SizeRootFs *int64 `json:",omitempty"` +} + +// ContainerJSON is newly used struct along with MountPoint +type ContainerJSON struct { + *ContainerJSONBase + Mounts []MountPoint + Config *container.Config + NetworkSettings *NetworkSettings +} + +// NetworkSettings exposes the network settings in the api +type NetworkSettings struct { + NetworkSettingsBase + DefaultNetworkSettings + Networks map[string]*network.EndpointSettings +} + +// SummaryNetworkSettings provides a summary of container's networks +// in /containers/json +type SummaryNetworkSettings struct { + Networks map[string]*network.EndpointSettings +} + +// NetworkSettingsBase holds basic information about networks +type NetworkSettingsBase struct { + Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`) + SandboxID string // SandboxID uniquely represents a container's network stack + HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface + LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix + LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address + Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port + SandboxKey string // SandboxKey identifies the sandbox + SecondaryIPAddresses []network.Address + SecondaryIPv6Addresses []network.Address +} + +// DefaultNetworkSettings holds network information +// during the 2 release deprecation period. +// It will be removed in Docker 1.11. +type DefaultNetworkSettings struct { + EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox + Gateway string // Gateway holds the gateway address for the network + GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address + GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address + IPAddress string // IPAddress holds the IPv4 address for the network + IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address + IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 + MacAddress string // MacAddress holds the MAC address for the network +} + +// MountPoint represents a mount point configuration inside the container. +type MountPoint struct { + Name string `json:",omitempty"` + Source string + Destination string + Driver string `json:",omitempty"` + Mode string + RW bool + Propagation string +} + +// Volume represents the configuration of a volume for the remote API +type Volume struct { + Name string // Name is the name of the volume + Driver string // Driver is the Driver name used to create the volume + Mountpoint string // Mountpoint is the location on disk of the volume + Status map[string]interface{} `json:",omitempty"` // Status provides low-level status information about the volume + Labels map[string]string // Labels is metadata specific to the volume + Scope string // Scope describes the level at which the volume exists (e.g. `global` for cluster-wide or `local` for machine level) +} + +// VolumesListResponse contains the response for the remote API: +// GET "/volumes" +type VolumesListResponse struct { + Volumes []*Volume // Volumes is the list of volumes being returned + Warnings []string // Warnings is a list of warnings that occurred when getting the list from the volume drivers +} + +// VolumeCreateRequest contains the response for the remote API: +// POST "/volumes/create" +type VolumeCreateRequest struct { + Name string // Name is the requested name of the volume + Driver string // Driver is the name of the driver that should be used to create the volume + DriverOpts map[string]string // DriverOpts holds the driver specific options to use for when creating the volume. + Labels map[string]string // Labels holds metadata specific to the volume being created. +} + +// NetworkResource is the body of the "get network" http response message +type NetworkResource struct { + Name string // Name is the requested name of the network + ID string `json:"Id"` // ID uniquely identifies a network on a single machine + Scope string // Scope describes the level at which the network exists (e.g. `global` for cluster-wide or `local` for machine level) + Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) + EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 + IPAM network.IPAM // IPAM is the network's IP Address Management + Internal bool // Internal represents if the network is used internal only + Containers map[string]EndpointResource // Containers contains endpoints belonging to the network + Options map[string]string // Options holds the network specific options to use for when creating the network + Labels map[string]string // Labels holds metadata specific to the network being created +} + +// EndpointResource contains network resources allocated and used for a container in a network +type EndpointResource struct { + Name string + EndpointID string + MacAddress string + IPv4Address string + IPv6Address string +} + +// NetworkCreate is the expected body of the "create network" http request message +type NetworkCreate struct { + CheckDuplicate bool + Driver string + EnableIPv6 bool + IPAM network.IPAM + Internal bool + Options map[string]string + Labels map[string]string +} + +// NetworkCreateRequest is the request message sent to the server for network create call. +type NetworkCreateRequest struct { + NetworkCreate + Name string +} + +// NetworkCreateResponse is the response message sent by the server for network create call +type NetworkCreateResponse struct { + ID string `json:"Id"` + Warning string +} + +// NetworkConnect represents the data to be used to connect a container to the network +type NetworkConnect struct { + Container string + EndpointConfig *network.EndpointSettings `json:",omitempty"` +} + +// NetworkDisconnect represents the data to be used to disconnect a container from the network +type NetworkDisconnect struct { + Container string + Force bool +} + +// Checkpoint represents the details of a checkpoint +type Checkpoint struct { + Name string // Name is the name of the checkpoint +} + +// Runtime describes an OCI runtime +type Runtime struct { + Path string `json:"path"` + Args []string `json:"runtimeArgs,omitempty"` +} diff --git a/vendor/src/github.com/docker/engine-api/types/versions/README.md b/vendor/src/github.com/docker/engine-api/types/versions/README.md new file mode 100644 index 00000000..cdac50a5 --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/types/versions/README.md @@ -0,0 +1,14 @@ +## Legacy API type versions + +This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. + +Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. + +### Package name conventions + +The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: + +1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. +2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. + +For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/vendor/src/github.com/docker/engine-api/types/versions/compare.go b/vendor/src/github.com/docker/engine-api/types/versions/compare.go new file mode 100644 index 00000000..611d4fed --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/types/versions/compare.go @@ -0,0 +1,62 @@ +package versions + +import ( + "strconv" + "strings" +) + +// compare compares two version strings +// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. +func compare(v1, v2 string) int { + var ( + currTab = strings.Split(v1, ".") + otherTab = strings.Split(v2, ".") + ) + + max := len(currTab) + if len(otherTab) > max { + max = len(otherTab) + } + for i := 0; i < max; i++ { + var currInt, otherInt int + + if len(currTab) > i { + currInt, _ = strconv.Atoi(currTab[i]) + } + if len(otherTab) > i { + otherInt, _ = strconv.Atoi(otherTab[i]) + } + if currInt > otherInt { + return 1 + } + if otherInt > currInt { + return -1 + } + } + return 0 +} + +// LessThan checks if a version is less than another +func LessThan(v, other string) bool { + return compare(v, other) == -1 +} + +// LessThanOrEqualTo checks if a version is less than or equal to another +func LessThanOrEqualTo(v, other string) bool { + return compare(v, other) <= 0 +} + +// GreaterThan checks if a version is greater than another +func GreaterThan(v, other string) bool { + return compare(v, other) == 1 +} + +// GreaterThanOrEqualTo checks if a version is greater than or equal to another +func GreaterThanOrEqualTo(v, other string) bool { + return compare(v, other) >= 0 +} + +// Equal checks if a version is equal to another +func Equal(v, other string) bool { + return compare(v, other) == 0 +} diff --git a/vendor/src/github.com/docker/engine-api/types/versions/v1p20/types.go b/vendor/src/github.com/docker/engine-api/types/versions/v1p20/types.go new file mode 100644 index 00000000..5736efad --- /dev/null +++ b/vendor/src/github.com/docker/engine-api/types/versions/v1p20/types.go @@ -0,0 +1,40 @@ +// Package v1p20 provides specific API types for the API version 1, patch 20. +package v1p20 + +import ( + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "github.com/docker/go-connections/nat" +) + +// ContainerJSON is a backcompatibility struct for the API 1.20 +type ContainerJSON struct { + *types.ContainerJSONBase + Mounts []types.MountPoint + Config *ContainerConfig + NetworkSettings *NetworkSettings +} + +// ContainerConfig is a backcompatibility struct used in ContainerJSON for the API 1.20 +type ContainerConfig struct { + *container.Config + + MacAddress string + NetworkDisabled bool + ExposedPorts map[nat.Port]struct{} + + // backward compatibility, they now live in HostConfig + VolumeDriver string +} + +// StatsJSON is a backcompatibility struct used in Stats for APIs prior to 1.21 +type StatsJSON struct { + types.Stats + Network types.NetworkStats `json:"network,omitempty"` +} + +// NetworkSettings is a backward compatible struct for APIs prior to 1.21 +type NetworkSettings struct { + types.NetworkSettingsBase + types.DefaultNetworkSettings +} diff --git a/vendor/src/github.com/docker/go-connections/nat/nat.go b/vendor/src/github.com/docker/go-connections/nat/nat.go index e19c73c3..4d5f5ae6 100644 --- a/vendor/src/github.com/docker/go-connections/nat/nat.go +++ b/vendor/src/github.com/docker/go-connections/nat/nat.go @@ -155,33 +155,36 @@ type PortMapping struct { Binding PortBinding } +func splitParts(rawport string) (string, string, string) { + parts := strings.Split(rawport, ":") + n := len(parts) + containerport := parts[n-1] + + switch n { + case 1: + return "", "", containerport + case 2: + return "", parts[0], containerport + case 3: + return parts[0], parts[1], containerport + default: + return strings.Join(parts[:n-2], ":"), parts[n-2], containerport + } +} + // ParsePortSpec parses a port specification string into a slice of PortMappings func ParsePortSpec(rawPort string) ([]PortMapping, error) { - proto := "tcp" + var proto string + rawIP, hostPort, containerPort := splitParts(rawPort) + proto, containerPort = SplitProtoPort(containerPort) - if i := strings.LastIndex(rawPort, "/"); i != -1 { - proto = rawPort[i+1:] - rawPort = rawPort[:i] - } - if !strings.Contains(rawPort, ":") { - rawPort = fmt.Sprintf("::%s", rawPort) - } else if len(strings.Split(rawPort, ":")) == 2 { - rawPort = fmt.Sprintf(":%s", rawPort) - } - - parts, err := PartParser(portSpecTemplate, rawPort) + // Strip [] from IPV6 addresses + ip, _, err := net.SplitHostPort(rawIP + ":") if err != nil { - return nil, err + return nil, fmt.Errorf("Invalid ip address %v: %s", rawIP, err) } - - var ( - containerPort = parts["containerPort"] - rawIP = parts["ip"] - hostPort = parts["hostPort"] - ) - - if rawIP != "" && net.ParseIP(rawIP) == nil { - return nil, fmt.Errorf("Invalid ip address: %s", rawIP) + if ip != "" && net.ParseIP(ip) == nil { + return nil, fmt.Errorf("Invalid ip address: %s", ip) } if containerPort == "" { return nil, fmt.Errorf("No port specified: %s", rawPort) @@ -230,7 +233,7 @@ func ParsePortSpec(rawPort string) ([]PortMapping, error) { } binding := PortBinding{ - HostIP: rawIP, + HostIP: ip, HostPort: hostPort, } ports = append(ports, PortMapping{Port: port, Binding: binding}) diff --git a/vendor/src/github.com/docker/go-connections/nat/parse.go b/vendor/src/github.com/docker/go-connections/nat/parse.go index 87205020..892adf8c 100644 --- a/vendor/src/github.com/docker/go-connections/nat/parse.go +++ b/vendor/src/github.com/docker/go-connections/nat/parse.go @@ -8,6 +8,7 @@ import ( // PartParser parses and validates the specified string (data) using the specified template // e.g. ip:public:private -> 192.168.0.1:80:8000 +// DEPRECATED: do not use, this function may be removed in a future version func PartParser(template, data string) (map[string]string, error) { // ip:public:private var ( diff --git a/vendor/src/github.com/docker/go-connections/sockets/sockets.go b/vendor/src/github.com/docker/go-connections/sockets/sockets.go index 1739cecf..a1d7beb4 100644 --- a/vendor/src/github.com/docker/go-connections/sockets/sockets.go +++ b/vendor/src/github.com/docker/go-connections/sockets/sockets.go @@ -2,6 +2,7 @@ package sockets import ( + "errors" "net" "net/http" "time" @@ -10,6 +11,9 @@ import ( // Why 32? See https://github.com/docker/docker/pull/8035. const defaultTimeout = 32 * time.Second +// ErrProtocolNotAvailable is returned when a given transport protocol is not provided by the operating system. +var ErrProtocolNotAvailable = errors.New("protocol not available") + // ConfigureTransport configures the specified Transport according to the // specified proto and addr. // If the proto is unix (using a unix socket to communicate) or npipe the @@ -17,17 +21,9 @@ const defaultTimeout = 32 * time.Second func ConfigureTransport(tr *http.Transport, proto, addr string) error { switch proto { case "unix": - // No need for compression in local communications. - tr.DisableCompression = true - tr.Dial = func(_, _ string) (net.Conn, error) { - return net.DialTimeout(proto, addr, defaultTimeout) - } + return configureUnixTransport(tr, proto, addr) case "npipe": - // No need for compression in local communications. - tr.DisableCompression = true - tr.Dial = func(_, _ string) (net.Conn, error) { - return DialPipe(addr, defaultTimeout) - } + return configureNpipeTransport(tr, proto, addr) default: tr.Proxy = http.ProxyFromEnvironment dialer, err := DialerFromEnvironment(&net.Dialer{ diff --git a/vendor/src/github.com/docker/go-connections/sockets/sockets_unix.go b/vendor/src/github.com/docker/go-connections/sockets/sockets_unix.go index b255ac9a..386cf0db 100644 --- a/vendor/src/github.com/docker/go-connections/sockets/sockets_unix.go +++ b/vendor/src/github.com/docker/go-connections/sockets/sockets_unix.go @@ -3,11 +3,31 @@ package sockets import ( + "fmt" "net" + "net/http" "syscall" "time" ) +const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path) + +func configureUnixTransport(tr *http.Transport, proto, addr string) error { + if len(addr) > maxUnixSocketPathSize { + return fmt.Errorf("Unix socket path %q is too long", addr) + } + // No need for compression in local communications. + tr.DisableCompression = true + tr.Dial = func(_, _ string) (net.Conn, error) { + return net.DialTimeout(proto, addr, defaultTimeout) + } + return nil +} + +func configureNpipeTransport(tr *http.Transport, proto, addr string) error { + return ErrProtocolNotAvailable +} + // DialPipe connects to a Windows named pipe. // This is not supported on other OSes. func DialPipe(_ string, _ time.Duration) (net.Conn, error) { diff --git a/vendor/src/github.com/docker/go-connections/sockets/sockets_windows.go b/vendor/src/github.com/docker/go-connections/sockets/sockets_windows.go index 1f3540b2..5c21644e 100644 --- a/vendor/src/github.com/docker/go-connections/sockets/sockets_windows.go +++ b/vendor/src/github.com/docker/go-connections/sockets/sockets_windows.go @@ -2,11 +2,25 @@ package sockets import ( "net" + "net/http" "time" "github.com/Microsoft/go-winio" ) +func configureUnixTransport(tr *http.Transport, proto, addr string) error { + return ErrProtocolNotAvailable +} + +func configureNpipeTransport(tr *http.Transport, proto, addr string) error { + // No need for compression in local communications. + tr.DisableCompression = true + tr.Dial = func(_, _ string) (net.Conn, error) { + return DialPipe(addr, defaultTimeout) + } + return nil +} + // DialPipe connects to a Windows named pipe. func DialPipe(addr string, timeout time.Duration) (net.Conn, error) { return winio.DialPipe(addr, &timeout) diff --git a/vendor/src/github.com/docker/go-connections/sockets/tcp_socket.go b/vendor/src/github.com/docker/go-connections/sockets/tcp_socket.go index 8a82727d..53cbb6c7 100644 --- a/vendor/src/github.com/docker/go-connections/sockets/tcp_socket.go +++ b/vendor/src/github.com/docker/go-connections/sockets/tcp_socket.go @@ -7,7 +7,7 @@ import ( ) // NewTCPSocket creates a TCP socket listener with the specified address and -// and the specified tls configuration. If TLSConfig is set, will encapsulate the +// the specified tls configuration. If TLSConfig is set, will encapsulate the // TCP listener inside a TLS one. func NewTCPSocket(addr string, tlsConfig *tls.Config) (net.Listener, error) { l, err := net.Listen("tcp", addr) diff --git a/vendor/src/github.com/docker/go-connections/tlsconfig/certpool_go17.go b/vendor/src/github.com/docker/go-connections/tlsconfig/certpool_go17.go new file mode 100644 index 00000000..352d342a --- /dev/null +++ b/vendor/src/github.com/docker/go-connections/tlsconfig/certpool_go17.go @@ -0,0 +1,21 @@ +// +build go1.7 + +package tlsconfig + +import ( + "crypto/x509" + "runtime" + + "github.com/Sirupsen/logrus" +) + +// SystemCertPool returns a copy of the system cert pool, +// returns an error if failed to load or empty pool on windows. +func SystemCertPool() (*x509.CertPool, error) { + certpool, err := x509.SystemCertPool() + if err != nil && runtime.GOOS == "windows" { + logrus.Warnf("Unable to use system certificate pool: %v", err) + return x509.NewCertPool(), nil + } + return certpool, err +} diff --git a/vendor/src/github.com/docker/go-connections/tlsconfig/certpool_other.go b/vendor/src/github.com/docker/go-connections/tlsconfig/certpool_other.go new file mode 100644 index 00000000..262c95e8 --- /dev/null +++ b/vendor/src/github.com/docker/go-connections/tlsconfig/certpool_other.go @@ -0,0 +1,16 @@ +// +build !go1.7 + +package tlsconfig + +import ( + "crypto/x509" + + "github.com/Sirupsen/logrus" +) + +// SystemCertPool returns an new empty cert pool, +// accessing system cert pool is supported in go 1.7 +func SystemCertPool() (*x509.CertPool, error) { + logrus.Warn("Unable to use system certificate pool: requires building with go 1.7 or later") + return x509.NewCertPool(), nil +} diff --git a/vendor/src/github.com/docker/go-connections/tlsconfig/config.go b/vendor/src/github.com/docker/go-connections/tlsconfig/config.go index 2cd50a62..8bbffcfd 100644 --- a/vendor/src/github.com/docker/go-connections/tlsconfig/config.go +++ b/vendor/src/github.com/docker/go-connections/tlsconfig/config.go @@ -68,10 +68,13 @@ func ClientDefault() *tls.Config { // certPool returns an X.509 certificate pool from `caFile`, the certificate file. func certPool(caFile string) (*x509.CertPool, error) { // If we should verify the server, we need to load a trusted ca - certPool := x509.NewCertPool() + certPool, err := SystemCertPool() + if err != nil { + return nil, fmt.Errorf("failed to read system certificates: %v", err) + } pem, err := ioutil.ReadFile(caFile) if err != nil { - return nil, fmt.Errorf("Could not read CA certificate %q: %v", caFile, err) + return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err) } if !certPool.AppendCertsFromPEM(pem) { return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) diff --git a/vendor/src/github.com/mattn/go-runewidth/.travis.yml b/vendor/src/github.com/mattn/go-runewidth/.travis.yml new file mode 100644 index 00000000..5c9c2a30 --- /dev/null +++ b/vendor/src/github.com/mattn/go-runewidth/.travis.yml @@ -0,0 +1,8 @@ +language: go +go: + - tip +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -repotoken lAKAWPzcGsD3A8yBX3BGGtRUdJ6CaGERL diff --git a/vendor/src/github.com/mattn/go-runewidth/LICENSE b/vendor/src/github.com/mattn/go-runewidth/LICENSE new file mode 100644 index 00000000..91b5cef3 --- /dev/null +++ b/vendor/src/github.com/mattn/go-runewidth/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/src/github.com/mattn/go-runewidth/README.mkd b/vendor/src/github.com/mattn/go-runewidth/README.mkd new file mode 100644 index 00000000..66663a94 --- /dev/null +++ b/vendor/src/github.com/mattn/go-runewidth/README.mkd @@ -0,0 +1,27 @@ +go-runewidth +============ + +[![Build Status](https://travis-ci.org/mattn/go-runewidth.png?branch=master)](https://travis-ci.org/mattn/go-runewidth) +[![Coverage Status](https://coveralls.io/repos/mattn/go-runewidth/badge.png?branch=HEAD)](https://coveralls.io/r/mattn/go-runewidth?branch=HEAD) +[![GoDoc](https://godoc.org/github.com/mattn/go-runewidth?status.svg)](http://godoc.org/github.com/mattn/go-runewidth) +[![Go Report Card](https://goreportcard.com/badge/github.com/mattn/go-runewidth)](https://goreportcard.com/report/github.com/mattn/go-runewidth) + +Provides functions to get fixed width of the character or string. + +Usage +----- + +```go +runewidth.StringWidth("つのだ☆HIRO") == 12 +``` + + +Author +------ + +Yasuhiro Matsumoto + +License +------- + +under the MIT License: http://mattn.mit-license.org/2013 diff --git a/vendor/src/github.com/mattn/go-runewidth/runewidth.go b/vendor/src/github.com/mattn/go-runewidth/runewidth.go new file mode 100644 index 00000000..3e730656 --- /dev/null +++ b/vendor/src/github.com/mattn/go-runewidth/runewidth.go @@ -0,0 +1,481 @@ +package runewidth + +var ( + // EastAsianWidth will be set true if the current locale is CJK + EastAsianWidth = IsEastAsian() + + // DefaultCondition is a condition in current locale + DefaultCondition = &Condition{EastAsianWidth} +) + +type interval struct { + first rune + last rune +} + +var combining = []interval{ + {0x0300, 0x036F}, {0x0483, 0x0486}, {0x0488, 0x0489}, + {0x0591, 0x05BD}, {0x05BF, 0x05BF}, {0x05C1, 0x05C2}, + {0x05C4, 0x05C5}, {0x05C7, 0x05C7}, {0x0600, 0x0603}, + {0x0610, 0x0615}, {0x064B, 0x065E}, {0x0670, 0x0670}, + {0x06D6, 0x06E4}, {0x06E7, 0x06E8}, {0x06EA, 0x06ED}, + {0x070F, 0x070F}, {0x0711, 0x0711}, {0x0730, 0x074A}, + {0x07A6, 0x07B0}, {0x07EB, 0x07F3}, {0x0901, 0x0902}, + {0x093C, 0x093C}, {0x0941, 0x0948}, {0x094D, 0x094D}, + {0x0951, 0x0954}, {0x0962, 0x0963}, {0x0981, 0x0981}, + {0x09BC, 0x09BC}, {0x09C1, 0x09C4}, {0x09CD, 0x09CD}, + {0x09E2, 0x09E3}, {0x0A01, 0x0A02}, {0x0A3C, 0x0A3C}, + {0x0A41, 0x0A42}, {0x0A47, 0x0A48}, {0x0A4B, 0x0A4D}, + {0x0A70, 0x0A71}, {0x0A81, 0x0A82}, {0x0ABC, 0x0ABC}, + {0x0AC1, 0x0AC5}, {0x0AC7, 0x0AC8}, {0x0ACD, 0x0ACD}, + {0x0AE2, 0x0AE3}, {0x0B01, 0x0B01}, {0x0B3C, 0x0B3C}, + {0x0B3F, 0x0B3F}, {0x0B41, 0x0B43}, {0x0B4D, 0x0B4D}, + {0x0B56, 0x0B56}, {0x0B82, 0x0B82}, {0x0BC0, 0x0BC0}, + {0x0BCD, 0x0BCD}, {0x0C3E, 0x0C40}, {0x0C46, 0x0C48}, + {0x0C4A, 0x0C4D}, {0x0C55, 0x0C56}, {0x0CBC, 0x0CBC}, + {0x0CBF, 0x0CBF}, {0x0CC6, 0x0CC6}, {0x0CCC, 0x0CCD}, + {0x0CE2, 0x0CE3}, {0x0D41, 0x0D43}, {0x0D4D, 0x0D4D}, + {0x0DCA, 0x0DCA}, {0x0DD2, 0x0DD4}, {0x0DD6, 0x0DD6}, + {0x0E31, 0x0E31}, {0x0E34, 0x0E3A}, {0x0E47, 0x0E4E}, + {0x0EB1, 0x0EB1}, {0x0EB4, 0x0EB9}, {0x0EBB, 0x0EBC}, + {0x0EC8, 0x0ECD}, {0x0F18, 0x0F19}, {0x0F35, 0x0F35}, + {0x0F37, 0x0F37}, {0x0F39, 0x0F39}, {0x0F71, 0x0F7E}, + {0x0F80, 0x0F84}, {0x0F86, 0x0F87}, {0x0F90, 0x0F97}, + {0x0F99, 0x0FBC}, {0x0FC6, 0x0FC6}, {0x102D, 0x1030}, + {0x1032, 0x1032}, {0x1036, 0x1037}, {0x1039, 0x1039}, + {0x1058, 0x1059}, {0x1160, 0x11FF}, {0x135F, 0x135F}, + {0x1712, 0x1714}, {0x1732, 0x1734}, {0x1752, 0x1753}, + {0x1772, 0x1773}, {0x17B4, 0x17B5}, {0x17B7, 0x17BD}, + {0x17C6, 0x17C6}, {0x17C9, 0x17D3}, {0x17DD, 0x17DD}, + {0x180B, 0x180D}, {0x18A9, 0x18A9}, {0x1920, 0x1922}, + {0x1927, 0x1928}, {0x1932, 0x1932}, {0x1939, 0x193B}, + {0x1A17, 0x1A18}, {0x1B00, 0x1B03}, {0x1B34, 0x1B34}, + {0x1B36, 0x1B3A}, {0x1B3C, 0x1B3C}, {0x1B42, 0x1B42}, + {0x1B6B, 0x1B73}, {0x1DC0, 0x1DCA}, {0x1DFE, 0x1DFF}, + {0x200B, 0x200F}, {0x202A, 0x202E}, {0x2060, 0x2063}, + {0x206A, 0x206F}, {0x20D0, 0x20EF}, {0x302A, 0x302F}, + {0x3099, 0x309A}, {0xA806, 0xA806}, {0xA80B, 0xA80B}, + {0xA825, 0xA826}, {0xFB1E, 0xFB1E}, {0xFE00, 0xFE0F}, + {0xFE20, 0xFE23}, {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFB}, + {0x10A01, 0x10A03}, {0x10A05, 0x10A06}, {0x10A0C, 0x10A0F}, + {0x10A38, 0x10A3A}, {0x10A3F, 0x10A3F}, {0x1D167, 0x1D169}, + {0x1D173, 0x1D182}, {0x1D185, 0x1D18B}, {0x1D1AA, 0x1D1AD}, + {0x1D242, 0x1D244}, {0xE0001, 0xE0001}, {0xE0020, 0xE007F}, + {0xE0100, 0xE01EF}, +} + +type ctype int + +const ( + narrow ctype = iota + ambiguous + wide + halfwidth + fullwidth + neutral +) + +type intervalType struct { + first rune + last rune + ctype ctype +} + +var ctypes = []intervalType{ + {0x0020, 0x007E, narrow}, + {0x00A1, 0x00A1, ambiguous}, + {0x00A2, 0x00A3, narrow}, + {0x00A4, 0x00A4, ambiguous}, + {0x00A5, 0x00A6, narrow}, + {0x00A7, 0x00A8, ambiguous}, + {0x00AA, 0x00AA, ambiguous}, + {0x00AC, 0x00AC, narrow}, + {0x00AD, 0x00AE, ambiguous}, + {0x00AF, 0x00AF, narrow}, + {0x00B0, 0x00B4, ambiguous}, + {0x00B6, 0x00BA, ambiguous}, + {0x00BC, 0x00BF, ambiguous}, + {0x00C6, 0x00C6, ambiguous}, + {0x00D0, 0x00D0, ambiguous}, + {0x00D7, 0x00D8, ambiguous}, + {0x00DE, 0x00E1, ambiguous}, + {0x00E6, 0x00E6, ambiguous}, + {0x00E8, 0x00EA, ambiguous}, + {0x00EC, 0x00ED, ambiguous}, + {0x00F0, 0x00F0, ambiguous}, + {0x00F2, 0x00F3, ambiguous}, + {0x00F7, 0x00FA, ambiguous}, + {0x00FC, 0x00FC, ambiguous}, + {0x00FE, 0x00FE, ambiguous}, + {0x0101, 0x0101, ambiguous}, + {0x0111, 0x0111, ambiguous}, + {0x0113, 0x0113, ambiguous}, + {0x011B, 0x011B, ambiguous}, + {0x0126, 0x0127, ambiguous}, + {0x012B, 0x012B, ambiguous}, + {0x0131, 0x0133, ambiguous}, + {0x0138, 0x0138, ambiguous}, + {0x013F, 0x0142, ambiguous}, + {0x0144, 0x0144, ambiguous}, + {0x0148, 0x014B, ambiguous}, + {0x014D, 0x014D, ambiguous}, + {0x0152, 0x0153, ambiguous}, + {0x0166, 0x0167, ambiguous}, + {0x016B, 0x016B, ambiguous}, + {0x01CE, 0x01CE, ambiguous}, + {0x01D0, 0x01D0, ambiguous}, + {0x01D2, 0x01D2, ambiguous}, + {0x01D4, 0x01D4, ambiguous}, + {0x01D6, 0x01D6, ambiguous}, + {0x01D8, 0x01D8, ambiguous}, + {0x01DA, 0x01DA, ambiguous}, + {0x01DC, 0x01DC, ambiguous}, + {0x0251, 0x0251, ambiguous}, + {0x0261, 0x0261, ambiguous}, + {0x02C4, 0x02C4, ambiguous}, + {0x02C7, 0x02C7, ambiguous}, + {0x02C9, 0x02CB, ambiguous}, + {0x02CD, 0x02CD, ambiguous}, + {0x02D0, 0x02D0, ambiguous}, + {0x02D8, 0x02DB, ambiguous}, + {0x02DD, 0x02DD, ambiguous}, + {0x02DF, 0x02DF, ambiguous}, + {0x0300, 0x036F, ambiguous}, + {0x0391, 0x03A2, ambiguous}, + {0x03A3, 0x03A9, ambiguous}, + {0x03B1, 0x03C1, ambiguous}, + {0x03C3, 0x03C9, ambiguous}, + {0x0401, 0x0401, ambiguous}, + {0x0410, 0x044F, ambiguous}, + {0x0451, 0x0451, ambiguous}, + {0x1100, 0x115F, wide}, + {0x2010, 0x2010, ambiguous}, + {0x2013, 0x2016, ambiguous}, + {0x2018, 0x2019, ambiguous}, + {0x201C, 0x201D, ambiguous}, + {0x2020, 0x2022, ambiguous}, + {0x2024, 0x2027, ambiguous}, + {0x2030, 0x2030, ambiguous}, + {0x2032, 0x2033, ambiguous}, + {0x2035, 0x2035, ambiguous}, + {0x203B, 0x203B, ambiguous}, + {0x203E, 0x203E, ambiguous}, + {0x2074, 0x2074, ambiguous}, + {0x207F, 0x207F, ambiguous}, + {0x2081, 0x2084, ambiguous}, + {0x20A9, 0x20A9, halfwidth}, + {0x20AC, 0x20AC, ambiguous}, + {0x2103, 0x2103, ambiguous}, + {0x2105, 0x2105, ambiguous}, + {0x2109, 0x2109, ambiguous}, + {0x2113, 0x2113, ambiguous}, + {0x2116, 0x2116, ambiguous}, + {0x2121, 0x2122, ambiguous}, + {0x2126, 0x2126, ambiguous}, + {0x212B, 0x212B, ambiguous}, + {0x2153, 0x2154, ambiguous}, + {0x215B, 0x215E, ambiguous}, + {0x2160, 0x216B, ambiguous}, + {0x2170, 0x2179, ambiguous}, + {0x2189, 0x218A, ambiguous}, + {0x2190, 0x2199, ambiguous}, + {0x21B8, 0x21B9, ambiguous}, + {0x21D2, 0x21D2, ambiguous}, + {0x21D4, 0x21D4, ambiguous}, + {0x21E7, 0x21E7, ambiguous}, + {0x2200, 0x2200, ambiguous}, + {0x2202, 0x2203, ambiguous}, + {0x2207, 0x2208, ambiguous}, + {0x220B, 0x220B, ambiguous}, + {0x220F, 0x220F, ambiguous}, + {0x2211, 0x2211, ambiguous}, + {0x2215, 0x2215, ambiguous}, + {0x221A, 0x221A, ambiguous}, + {0x221D, 0x2220, ambiguous}, + {0x2223, 0x2223, ambiguous}, + {0x2225, 0x2225, ambiguous}, + {0x2227, 0x222C, ambiguous}, + {0x222E, 0x222E, ambiguous}, + {0x2234, 0x2237, ambiguous}, + {0x223C, 0x223D, ambiguous}, + {0x2248, 0x2248, ambiguous}, + {0x224C, 0x224C, ambiguous}, + {0x2252, 0x2252, ambiguous}, + {0x2260, 0x2261, ambiguous}, + {0x2264, 0x2267, ambiguous}, + {0x226A, 0x226B, ambiguous}, + {0x226E, 0x226F, ambiguous}, + {0x2282, 0x2283, ambiguous}, + {0x2286, 0x2287, ambiguous}, + {0x2295, 0x2295, ambiguous}, + {0x2299, 0x2299, ambiguous}, + {0x22A5, 0x22A5, ambiguous}, + {0x22BF, 0x22BF, ambiguous}, + {0x2312, 0x2312, ambiguous}, + {0x2329, 0x232A, wide}, + {0x2460, 0x24E9, ambiguous}, + {0x24EB, 0x254B, ambiguous}, + {0x2550, 0x2573, ambiguous}, + {0x2580, 0x258F, ambiguous}, + {0x2592, 0x2595, ambiguous}, + {0x25A0, 0x25A1, ambiguous}, + {0x25A3, 0x25A9, ambiguous}, + {0x25B2, 0x25B3, ambiguous}, + {0x25B6, 0x25B7, ambiguous}, + {0x25BC, 0x25BD, ambiguous}, + {0x25C0, 0x25C1, ambiguous}, + {0x25C6, 0x25C8, ambiguous}, + {0x25CB, 0x25CB, ambiguous}, + {0x25CE, 0x25D1, ambiguous}, + {0x25E2, 0x25E5, ambiguous}, + {0x25EF, 0x25EF, ambiguous}, + {0x2605, 0x2606, ambiguous}, + {0x2609, 0x2609, ambiguous}, + {0x260E, 0x260F, ambiguous}, + {0x2614, 0x2615, ambiguous}, + {0x261C, 0x261C, ambiguous}, + {0x261E, 0x261E, ambiguous}, + {0x2640, 0x2640, ambiguous}, + {0x2642, 0x2642, ambiguous}, + {0x2660, 0x2661, ambiguous}, + {0x2663, 0x2665, ambiguous}, + {0x2667, 0x266A, ambiguous}, + {0x266C, 0x266D, ambiguous}, + {0x266F, 0x266F, ambiguous}, + {0x269E, 0x269F, ambiguous}, + {0x26BE, 0x26BF, ambiguous}, + {0x26C4, 0x26CD, ambiguous}, + {0x26CF, 0x26E1, ambiguous}, + {0x26E3, 0x26E3, ambiguous}, + {0x26E8, 0x26FF, ambiguous}, + {0x273D, 0x273D, ambiguous}, + {0x2757, 0x2757, ambiguous}, + {0x2776, 0x277F, ambiguous}, + {0x27E6, 0x27ED, narrow}, + {0x2985, 0x2986, narrow}, + {0x2B55, 0x2B59, ambiguous}, + {0x2E80, 0x2E9A, wide}, + {0x2E9B, 0x2EF4, wide}, + {0x2F00, 0x2FD6, wide}, + {0x2FF0, 0x2FFC, wide}, + {0x3000, 0x3000, fullwidth}, + {0x3001, 0x303E, wide}, + {0x3041, 0x3097, wide}, + {0x3099, 0x3100, wide}, + {0x3105, 0x312E, wide}, + {0x3131, 0x318F, wide}, + {0x3190, 0x31BB, wide}, + {0x31C0, 0x31E4, wide}, + {0x31F0, 0x321F, wide}, + {0x3220, 0x3247, wide}, + {0x3248, 0x324F, ambiguous}, + {0x3250, 0x32FF, wide}, + {0x3300, 0x4DBF, wide}, + {0x4E00, 0xA48D, wide}, + {0xA490, 0xA4C7, wide}, + {0xA960, 0xA97D, wide}, + {0xAC00, 0xD7A4, wide}, + {0xE000, 0xF8FF, ambiguous}, + {0xF900, 0xFAFF, wide}, + {0xFE00, 0xFE0F, ambiguous}, + {0xFE10, 0xFE1A, wide}, + {0xFE30, 0xFE53, wide}, + {0xFE54, 0xFE67, wide}, + {0xFE68, 0xFE6C, wide}, + {0xFF01, 0xFF60, fullwidth}, + {0xFF61, 0xFFBF, halfwidth}, + {0xFFC2, 0xFFC8, halfwidth}, + {0xFFCA, 0xFFD0, halfwidth}, + {0xFFD2, 0xFFD8, halfwidth}, + {0xFFDA, 0xFFDD, halfwidth}, + {0xFFE0, 0xFFE7, fullwidth}, + {0xFFE8, 0xFFEF, halfwidth}, + {0xFFFD, 0xFFFE, ambiguous}, + {0x1B000, 0x1B002, wide}, + {0x1F100, 0x1F10A, ambiguous}, + {0x1F110, 0x1F12D, ambiguous}, + {0x1F130, 0x1F169, ambiguous}, + {0x1F170, 0x1F19B, ambiguous}, + {0x1F200, 0x1F203, wide}, + {0x1F210, 0x1F23B, wide}, + {0x1F240, 0x1F249, wide}, + {0x1F250, 0x1F252, wide}, + {0x20000, 0x2FFFE, wide}, + {0x30000, 0x3FFFE, wide}, + {0xE0100, 0xE01F0, ambiguous}, + {0xF0000, 0xFFFFD, ambiguous}, + {0x100000, 0x10FFFE, ambiguous}, +} + +// Condition have flag EastAsianWidth whether the current locale is CJK or not. +type Condition struct { + EastAsianWidth bool +} + +// NewCondition return new instance of Condition which is current locale. +func NewCondition() *Condition { + return &Condition{EastAsianWidth} +} + +// RuneWidth returns the number of cells in r. +// See http://www.unicode.org/reports/tr11/ +func (c *Condition) RuneWidth(r rune) int { + if r == 0 { + return 0 + } + if r < 32 || (r >= 0x7f && r < 0xa0) { + return 1 + } + for _, iv := range combining { + if iv.first <= r && r <= iv.last { + return 0 + } + } + + if c.EastAsianWidth && IsAmbiguousWidth(r) { + return 2 + } + + if r >= 0x1100 && + (r <= 0x115f || r == 0x2329 || r == 0x232a || + (r >= 0x2e80 && r <= 0xa4cf && r != 0x303f) || + (r >= 0xac00 && r <= 0xd7a3) || + (r >= 0xf900 && r <= 0xfaff) || + (r >= 0xfe30 && r <= 0xfe6f) || + (r >= 0xff00 && r <= 0xff60) || + (r >= 0xffe0 && r <= 0xffe6) || + (r >= 0x20000 && r <= 0x2fffd) || + (r >= 0x30000 && r <= 0x3fffd)) { + return 2 + } + return 1 +} + +// StringWidth return width as you can see +func (c *Condition) StringWidth(s string) (width int) { + for _, r := range []rune(s) { + width += c.RuneWidth(r) + } + return width +} + +// Truncate return string truncated with w cells +func (c *Condition) Truncate(s string, w int, tail string) string { + if c.StringWidth(s) <= w { + return s + } + r := []rune(s) + tw := c.StringWidth(tail) + w -= tw + width := 0 + i := 0 + for ; i < len(r); i++ { + cw := c.RuneWidth(r[i]) + if width+cw > w { + break + } + width += cw + } + return string(r[0:i]) + tail +} + +// Wrap return string wrapped with w cells +func (c *Condition) Wrap(s string, w int) string { + width := 0 + out := "" + for _, r := range []rune(s) { + cw := RuneWidth(r) + if r == '\n' { + out += string(r) + width = 0 + continue + } else if width+cw > w { + out += "\n" + width = 0 + out += string(r) + width += cw + continue + } + out += string(r) + width += cw + } + return out +} + +// FillLeft return string filled in left by spaces in w cells +func (c *Condition) FillLeft(s string, w int) string { + width := c.StringWidth(s) + count := w - width + if count > 0 { + b := make([]byte, count) + for i := range b { + b[i] = ' ' + } + return string(b) + s + } + return s +} + +// FillRight return string filled in left by spaces in w cells +func (c *Condition) FillRight(s string, w int) string { + width := c.StringWidth(s) + count := w - width + if count > 0 { + b := make([]byte, count) + for i := range b { + b[i] = ' ' + } + return s + string(b) + } + return s +} + +// RuneWidth returns the number of cells in r. +// See http://www.unicode.org/reports/tr11/ +func RuneWidth(r rune) int { + return DefaultCondition.RuneWidth(r) +} + +func ct(r rune) ctype { + for _, iv := range ctypes { + if iv.first <= r && r <= iv.last { + return iv.ctype + } + } + return neutral +} + +// IsAmbiguousWidth returns whether is ambiguous width or not. +func IsAmbiguousWidth(r rune) bool { + return ct(r) == ambiguous +} + +// IsNeutralWidth returns whether is neutral width or not. +func IsNeutralWidth(r rune) bool { + return ct(r) == neutral +} + +// StringWidth return width as you can see +func StringWidth(s string) (width int) { + return DefaultCondition.StringWidth(s) +} + +// Truncate return string truncated with w cells +func Truncate(s string, w int, tail string) string { + return DefaultCondition.Truncate(s, w, tail) +} + +// Wrap return string wrapped with w cells +func Wrap(s string, w int) string { + return DefaultCondition.Wrap(s, w) +} + +// FillLeft return string filled in left by spaces in w cells +func FillLeft(s string, w int) string { + return DefaultCondition.FillLeft(s, w) +} + +// FillRight return string filled in left by spaces in w cells +func FillRight(s string, w int) string { + return DefaultCondition.FillRight(s, w) +} diff --git a/vendor/src/github.com/mattn/go-runewidth/runewidth_js.go b/vendor/src/github.com/mattn/go-runewidth/runewidth_js.go new file mode 100644 index 00000000..0ce32c5e --- /dev/null +++ b/vendor/src/github.com/mattn/go-runewidth/runewidth_js.go @@ -0,0 +1,8 @@ +// +build js + +package runewidth + +func IsEastAsian() bool { + // TODO: Implement this for the web. Detect east asian in a compatible way, and return true. + return false +} diff --git a/vendor/src/github.com/mattn/go-runewidth/runewidth_posix.go b/vendor/src/github.com/mattn/go-runewidth/runewidth_posix.go new file mode 100644 index 00000000..c579e9a3 --- /dev/null +++ b/vendor/src/github.com/mattn/go-runewidth/runewidth_posix.go @@ -0,0 +1,77 @@ +// +build !windows,!js + +package runewidth + +import ( + "os" + "regexp" + "strings" +) + +var reLoc = regexp.MustCompile(`^[a-z][a-z][a-z]?(?:_[A-Z][A-Z])?\.(.+)`) + +var mblenTable = map[string]int{ + "utf-8": 6, + "utf8": 6, + "jis": 8, + "eucjp": 3, + "euckr": 2, + "euccn": 2, + "sjis": 2, + "cp932": 2, + "cp51932": 2, + "cp936": 2, + "cp949": 2, + "cp950": 2, + "big5": 2, + "gbk": 2, + "gb2312": 2, +} + +func isEastAsian(locale string) bool { + charset := strings.ToLower(locale) + r := reLoc.FindStringSubmatch(locale) + if len(r) == 2 { + charset = strings.ToLower(r[1]) + } + + if strings.HasSuffix(charset, "@cjk_narrow") { + return false + } + + for pos, b := range []byte(charset) { + if b == '@' { + charset = charset[:pos] + break + } + } + max := 1 + if m, ok := mblenTable[charset]; ok { + max = m + } + if max > 1 && (charset[0] != 'u' || + strings.HasPrefix(locale, "ja") || + strings.HasPrefix(locale, "ko") || + strings.HasPrefix(locale, "zh")) { + return true + } + return false +} + +// IsEastAsian return true if the current locale is CJK +func IsEastAsian() bool { + locale := os.Getenv("LC_CTYPE") + if locale == "" { + locale = os.Getenv("LANG") + } + + // ignore C locale + if locale == "POSIX" || locale == "C" { + return false + } + if len(locale) > 1 && locale[0] == 'C' && (locale[1] == '.' || locale[1] == '-') { + return false + } + + return isEastAsian(locale) +} diff --git a/vendor/src/github.com/mattn/go-runewidth/runewidth_windows.go b/vendor/src/github.com/mattn/go-runewidth/runewidth_windows.go new file mode 100644 index 00000000..0258876b --- /dev/null +++ b/vendor/src/github.com/mattn/go-runewidth/runewidth_windows.go @@ -0,0 +1,25 @@ +package runewidth + +import ( + "syscall" +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32") + procGetConsoleOutputCP = kernel32.NewProc("GetConsoleOutputCP") +) + +// IsEastAsian return true if the current locale is CJK +func IsEastAsian() bool { + r1, _, _ := procGetConsoleOutputCP.Call() + if r1 == 0 { + return false + } + + switch int(r1) { + case 932, 51932, 936, 949, 950: + return true + } + + return false +} diff --git a/vendor/src/github.com/mistifyio/go-zfs/.gitignore b/vendor/src/github.com/mistifyio/go-zfs/.gitignore new file mode 100644 index 00000000..8000dd9d --- /dev/null +++ b/vendor/src/github.com/mistifyio/go-zfs/.gitignore @@ -0,0 +1 @@ +.vagrant diff --git a/vendor/src/github.com/mistifyio/go-zfs/.travis.yml b/vendor/src/github.com/mistifyio/go-zfs/.travis.yml new file mode 100644 index 00000000..f1fc5d32 --- /dev/null +++ b/vendor/src/github.com/mistifyio/go-zfs/.travis.yml @@ -0,0 +1,41 @@ +language: go +sudo: required +dist: trusty + +branches: + only: + - master + +env: + - rel=0.6.4.2 + - rel=0.6.5.4 + +matrix: + allow_failures: + - env: rel=0.6.5.4 + +go: + - 1.7 + +before_install: + - go get github.com/alecthomas/gometalinter + - gometalinter --install --update + - MAKEFLAGS=-j$(($(grep -c '^processor' /proc/cpuinfo) * 2 + 1)) + - sudo apt-get update -y && sudo apt-get install -y linux-headers-$(uname -r) uuid-dev tree + - cd /tmp + - curl -L https://github.com/zfsonlinux/zfs/releases/download/zfs-$rel/spl-$rel.tar.gz | tar xz + - curl -L https://github.com/zfsonlinux/zfs/releases/download/zfs-$rel/zfs-$rel.tar.gz | tar xz + - (cd spl-$rel && ./configure --prefix=/usr && make && sudo make install) + - (cd zfs-$rel && ./configure --prefix=/usr && make && sudo make install) + - sudo modprobe zfs + - cd $TRAVIS_BUILD_DIR + +script: + - sudo -E $(which go) test -v ./... + - gometalinter --disable=golint --disable=vetshadow --enable=gofmt ./... || true + - gometalinter --disable-all --enable=golint --enable=vetshadow ./... || true + +notifications: + email: false + slack: + secure: "AbDJNjWyf/z+neX0HtoIUynjBcdvbhrsuyzoeaImZaanUtyo3cWNpA1M+5CApDQneaKbLqcehDBTjaLQD1fjXXtWrNvq+FgCRDJ1gvZasq13iJfYe3qtLz7n0YGHqEGzZ1lsheWtle/Sg32RlPAUZrHKWPciu7/Fg1k1ca8FsB4=" diff --git a/vendor/src/github.com/mistifyio/go-zfs/CONTRIBUTING.md b/vendor/src/github.com/mistifyio/go-zfs/CONTRIBUTING.md new file mode 100644 index 00000000..f1880c19 --- /dev/null +++ b/vendor/src/github.com/mistifyio/go-zfs/CONTRIBUTING.md @@ -0,0 +1,60 @@ +## How to Contribute ## + +We always welcome contributions to help make `go-zfs` better. Please take a moment to read this document if you would like to contribute. + +### Reporting issues ### + +We use [Github issues](https://github.com/mistifyio/go-zfs/issues) to track bug reports, feature requests, and submitting pull requests. + +If you find a bug: + +* Use the GitHub issue search to check whether the bug has already been reported. +* If the issue has been fixed, try to reproduce the issue using the latest `master` branch of the repository. +* If the issue still reproduces or has not yet been reported, try to isolate the problem before opening an issue, if possible. Also provide the steps taken to reproduce the bug. + +### Pull requests ### + +We welcome bug fixes, improvements, and new features. Before embarking on making significant changes, please open an issue and ask first so that you do not risk duplicating efforts or spending time working on something that may be out of scope. For minor items, just open a pull request. + +[Fork the project](https://help.github.com/articles/fork-a-repo), clone your fork, and add the upstream to your remote: + + $ git clone git@github.com:/go-zfs.git + $ cd go-zfs + $ git remote add upstream https://github.com/mistifyio/go-zfs.git + +If you need to pull new changes committed upstream: + + $ git checkout master + $ git fetch upstream + $ git merge upstream/master + +Don' work directly on master as this makes it harder to merge later. Create a feature branch for your fix or new feature: + + $ git checkout -b + +Please try to commit your changes in logical chunks. Ideally, you should include the issue number in the commit message. + + $ git commit -m "Issue # - " + +Push your feature branch to your fork. + + $ git push origin + +[Open a Pull Request](https://help.github.com/articles/using-pull-requests) against the upstream master branch. Please give your pull request a clear title and description and note which issue(s) your pull request fixes. + +* All Go code should be formatted using [gofmt](http://golang.org/cmd/gofmt/). +* Every exported function should have [documentation](http://blog.golang.org/godoc-documenting-go-code) and corresponding [tests](http://golang.org/doc/code.html#Testing). + +**Important:** By submitting a patch, you agree to allow the project owners to license your work under the [Apache 2.0 License](./LICENSE). + +### Go Tools ### +For consistency and to catch minor issues for all of go code, please run the following: +* goimports +* go vet +* golint +* errcheck + +Many editors can execute the above on save. + +---- +Guidelines based on http://azkaban.github.io/contributing.html diff --git a/vendor/src/github.com/mistifyio/go-zfs/LICENSE b/vendor/src/github.com/mistifyio/go-zfs/LICENSE new file mode 100644 index 00000000..f4c265cf --- /dev/null +++ b/vendor/src/github.com/mistifyio/go-zfs/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2014, OmniTI Computer Consulting, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/src/github.com/mistifyio/go-zfs/README.md b/vendor/src/github.com/mistifyio/go-zfs/README.md new file mode 100644 index 00000000..2515e588 --- /dev/null +++ b/vendor/src/github.com/mistifyio/go-zfs/README.md @@ -0,0 +1,54 @@ +# Go Wrapper for ZFS # + +Simple wrappers for ZFS command line tools. + +[![GoDoc](https://godoc.org/github.com/mistifyio/go-zfs?status.svg)](https://godoc.org/github.com/mistifyio/go-zfs) + +## Requirements ## + +You need a working ZFS setup. To use on Ubuntu 14.04, setup ZFS: + + sudo apt-get install python-software-properties + sudo apt-add-repository ppa:zfs-native/stable + sudo apt-get update + sudo apt-get install ubuntu-zfs libzfs-dev + +Developed using Go 1.3, but currently there isn't anything 1.3 specific. Don't use Ubuntu packages for Go, use http://golang.org/doc/install + +Generally you need root privileges to use anything zfs related. + +## Status ## + +This has been only been tested on Ubuntu 14.04 + +In the future, we hope to work directly with libzfs. + +# Hacking # + +The tests have decent examples for most functions. + +```go +//assuming a zpool named test +//error handling ommitted + + +f, err := zfs.CreateFilesystem("test/snapshot-test", nil) +ok(t, err) + +s, err := f.Snapshot("test", nil) +ok(t, err) + +// snapshot is named "test/snapshot-test@test" + +c, err := s.Clone("test/clone-test", nil) + +err := c.Destroy() +err := s.Destroy() +err := f.Destroy() + +``` + +# Contributing # + +See the [contributing guidelines](./CONTRIBUTING.md) + diff --git a/vendor/src/github.com/mistifyio/go-zfs/Vagrantfile b/vendor/src/github.com/mistifyio/go-zfs/Vagrantfile new file mode 100644 index 00000000..3bd6e120 --- /dev/null +++ b/vendor/src/github.com/mistifyio/go-zfs/Vagrantfile @@ -0,0 +1,34 @@ + +VAGRANTFILE_API_VERSION = "2" + +Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| + config.vm.box = "ubuntu/trusty64" + config.ssh.forward_agent = true + + config.vm.synced_folder ".", "/home/vagrant/go/src/github.com/mistifyio/go-zfs", create: true + + config.vm.provision "shell", inline: < /etc/profile.d/go.sh +export GOPATH=\\$HOME/go +export PATH=\\$GOPATH/bin:/usr/local/go/bin:\\$PATH +END + +chown -R vagrant /home/vagrant/go + +apt-get update +apt-get install -y software-properties-common curl +apt-add-repository --yes ppa:zfs-native/stable +apt-get update +apt-get install -y ubuntu-zfs + +cd /home/vagrant +curl -z go1.3.3.linux-amd64.tar.gz -L -O https://storage.googleapis.com/golang/go1.3.3.linux-amd64.tar.gz +tar -C /usr/local -zxf /home/vagrant/go1.3.3.linux-amd64.tar.gz + +cat << END > /etc/sudoers.d/go +Defaults env_keep += "GOPATH" +END + +EOF + +end diff --git a/vendor/src/github.com/mistifyio/go-zfs/error.go b/vendor/src/github.com/mistifyio/go-zfs/error.go new file mode 100644 index 00000000..5408ccdb --- /dev/null +++ b/vendor/src/github.com/mistifyio/go-zfs/error.go @@ -0,0 +1,18 @@ +package zfs + +import ( + "fmt" +) + +// Error is an error which is returned when the `zfs` or `zpool` shell +// commands return with a non-zero exit code. +type Error struct { + Err error + Debug string + Stderr string +} + +// Error returns the string representation of an Error. +func (e Error) Error() string { + return fmt.Sprintf("%s: %q => %s", e.Err, e.Debug, e.Stderr) +} diff --git a/vendor/src/github.com/mistifyio/go-zfs/utils.go b/vendor/src/github.com/mistifyio/go-zfs/utils.go new file mode 100644 index 00000000..c7178d6f --- /dev/null +++ b/vendor/src/github.com/mistifyio/go-zfs/utils.go @@ -0,0 +1,352 @@ +package zfs + +import ( + "bytes" + "errors" + "fmt" + "io" + "os/exec" + "regexp" + "runtime" + "strconv" + "strings" + + "github.com/pborman/uuid" +) + +type command struct { + Command string + Stdin io.Reader + Stdout io.Writer +} + +func (c *command) Run(arg ...string) ([][]string, error) { + + cmd := exec.Command(c.Command, arg...) + + var stdout, stderr bytes.Buffer + + if c.Stdout == nil { + cmd.Stdout = &stdout + } else { + cmd.Stdout = c.Stdout + } + + if c.Stdin != nil { + cmd.Stdin = c.Stdin + + } + cmd.Stderr = &stderr + + id := uuid.New() + joinedArgs := strings.Join(cmd.Args, " ") + + logger.Log([]string{"ID:" + id, "START", joinedArgs}) + err := cmd.Run() + logger.Log([]string{"ID:" + id, "FINISH"}) + + if err != nil { + return nil, &Error{ + Err: err, + Debug: strings.Join([]string{cmd.Path, joinedArgs}, " "), + Stderr: stderr.String(), + } + } + + // assume if you passed in something for stdout, that you know what to do with it + if c.Stdout != nil { + return nil, nil + } + + lines := strings.Split(stdout.String(), "\n") + + //last line is always blank + lines = lines[0 : len(lines)-1] + output := make([][]string, len(lines)) + + for i, l := range lines { + output[i] = strings.Fields(l) + } + + return output, nil +} + +func setString(field *string, value string) { + v := "" + if value != "-" { + v = value + } + *field = v +} + +func setUint(field *uint64, value string) error { + var v uint64 + if value != "-" { + var err error + v, err = strconv.ParseUint(value, 10, 64) + if err != nil { + return err + } + } + *field = v + return nil +} + +func (ds *Dataset) parseLine(line []string) error { + var err error + + if len(line) != len(dsPropList) { + return errors.New("Output does not match what is expected on this platform") + } + setString(&ds.Name, line[0]) + setString(&ds.Origin, line[1]) + + if err = setUint(&ds.Used, line[2]); err != nil { + return err + } + if err = setUint(&ds.Avail, line[3]); err != nil { + return err + } + + setString(&ds.Mountpoint, line[4]) + setString(&ds.Compression, line[5]) + setString(&ds.Type, line[6]) + + if err = setUint(&ds.Volsize, line[7]); err != nil { + return err + } + if err = setUint(&ds.Quota, line[8]); err != nil { + return err + } + + if runtime.GOOS == "solaris" { + return nil + } + + if err = setUint(&ds.Written, line[9]); err != nil { + return err + } + if err = setUint(&ds.Logicalused, line[10]); err != nil { + return err + } + if err = setUint(&ds.Usedbydataset, line[11]); err != nil { + return err + } + return nil +} + +/* + * from zfs diff`s escape function: + * + * Prints a file name out a character at a time. If the character is + * not in the range of what we consider "printable" ASCII, display it + * as an escaped 3-digit octal value. ASCII values less than a space + * are all control characters and we declare the upper end as the + * DELete character. This also is the last 7-bit ASCII character. + * We choose to treat all 8-bit ASCII as not printable for this + * application. + */ +func unescapeFilepath(path string) (string, error) { + buf := make([]byte, 0, len(path)) + llen := len(path) + for i := 0; i < llen; { + if path[i] == '\\' { + if llen < i+4 { + return "", fmt.Errorf("Invalid octal code: too short") + } + octalCode := path[(i + 1):(i + 4)] + val, err := strconv.ParseUint(octalCode, 8, 8) + if err != nil { + return "", fmt.Errorf("Invalid octal code: %v", err) + } + buf = append(buf, byte(val)) + i += 4 + } else { + buf = append(buf, path[i]) + i++ + } + } + return string(buf), nil +} + +var changeTypeMap = map[string]ChangeType{ + "-": Removed, + "+": Created, + "M": Modified, + "R": Renamed, +} +var inodeTypeMap = map[string]InodeType{ + "B": BlockDevice, + "C": CharacterDevice, + "/": Directory, + ">": Door, + "|": NamedPipe, + "@": SymbolicLink, + "P": EventPort, + "=": Socket, + "F": File, +} + +// matches (+1) or (-1) +var referenceCountRegex = regexp.MustCompile("\\(([+-]\\d+?)\\)") + +func parseReferenceCount(field string) (int, error) { + matches := referenceCountRegex.FindStringSubmatch(field) + if matches == nil { + return 0, fmt.Errorf("Regexp does not match") + } + return strconv.Atoi(matches[1]) +} + +func parseInodeChange(line []string) (*InodeChange, error) { + llen := len(line) + if llen < 1 { + return nil, fmt.Errorf("Empty line passed") + } + + changeType := changeTypeMap[line[0]] + if changeType == 0 { + return nil, fmt.Errorf("Unknown change type '%s'", line[0]) + } + + switch changeType { + case Renamed: + if llen != 4 { + return nil, fmt.Errorf("Mismatching number of fields: expect 4, got: %d", llen) + } + case Modified: + if llen != 4 && llen != 3 { + return nil, fmt.Errorf("Mismatching number of fields: expect 3..4, got: %d", llen) + } + default: + if llen != 3 { + return nil, fmt.Errorf("Mismatching number of fields: expect 3, got: %d", llen) + } + } + + inodeType := inodeTypeMap[line[1]] + if inodeType == 0 { + return nil, fmt.Errorf("Unknown inode type '%s'", line[1]) + } + + path, err := unescapeFilepath(line[2]) + if err != nil { + return nil, fmt.Errorf("Failed to parse filename: %v", err) + } + + var newPath string + var referenceCount int + switch changeType { + case Renamed: + newPath, err = unescapeFilepath(line[3]) + if err != nil { + return nil, fmt.Errorf("Failed to parse filename: %v", err) + } + case Modified: + if llen == 4 { + referenceCount, err = parseReferenceCount(line[3]) + if err != nil { + return nil, fmt.Errorf("Failed to parse reference count: %v", err) + } + } + default: + newPath = "" + } + + return &InodeChange{ + Change: changeType, + Type: inodeType, + Path: path, + NewPath: newPath, + ReferenceCountChange: referenceCount, + }, nil +} + +// example input +//M / /testpool/bar/ +//+ F /testpool/bar/hello.txt +//M / /testpool/bar/hello.txt (+1) +//M / /testpool/bar/hello-hardlink +func parseInodeChanges(lines [][]string) ([]*InodeChange, error) { + changes := make([]*InodeChange, len(lines)) + + for i, line := range lines { + c, err := parseInodeChange(line) + if err != nil { + return nil, fmt.Errorf("Failed to parse line %d of zfs diff: %v, got: '%s'", i, err, line) + } + changes[i] = c + } + return changes, nil +} + +func listByType(t, filter string) ([]*Dataset, error) { + args := []string{"list", "-rHp", "-t", t, "-o", dsPropListOptions} + + if filter != "" { + args = append(args, filter) + } + out, err := zfs(args...) + if err != nil { + return nil, err + } + + var datasets []*Dataset + + name := "" + var ds *Dataset + for _, line := range out { + if name != line[0] { + name = line[0] + ds = &Dataset{Name: name} + datasets = append(datasets, ds) + } + if err := ds.parseLine(line); err != nil { + return nil, err + } + } + + return datasets, nil +} + +func propsSlice(properties map[string]string) []string { + args := make([]string, 0, len(properties)*3) + for k, v := range properties { + args = append(args, "-o") + args = append(args, fmt.Sprintf("%s=%s", k, v)) + } + return args +} + +func (z *Zpool) parseLine(line []string) error { + prop := line[1] + val := line[2] + + var err error + + switch prop { + case "name": + setString(&z.Name, val) + case "health": + setString(&z.Health, val) + case "allocated": + err = setUint(&z.Allocated, val) + case "size": + err = setUint(&z.Size, val) + case "free": + err = setUint(&z.Free, val) + case "fragmentation": + // Trim trailing "%" before parsing uint + err = setUint(&z.Fragmentation, val[:len(val)-1]) + case "readonly": + z.ReadOnly = val == "on" + case "freeing": + err = setUint(&z.Freeing, val) + case "leaked": + err = setUint(&z.Leaked, val) + case "dedupratio": + // Trim trailing "x" before parsing float64 + z.DedupRatio, err = strconv.ParseFloat(val[:len(val)-1], 64) + } + return err +} diff --git a/vendor/src/github.com/mistifyio/go-zfs/utils_notsolaris.go b/vendor/src/github.com/mistifyio/go-zfs/utils_notsolaris.go new file mode 100644 index 00000000..b2b0cd96 --- /dev/null +++ b/vendor/src/github.com/mistifyio/go-zfs/utils_notsolaris.go @@ -0,0 +1,17 @@ +// +build !solaris + +package zfs + +import ( + "strings" +) + +// List of ZFS properties to retrieve from zfs list command on a non-Solaris platform +var dsPropList = []string{"name", "origin", "used", "available", "mountpoint", "compression", "type", "volsize", "quota", "written", "logicalused", "usedbydataset"} + +var dsPropListOptions = strings.Join(dsPropList, ",") + +// List of Zpool properties to retrieve from zpool list command on a non-Solaris platform +var zpoolPropList = []string{"name", "health", "allocated", "size", "free", "readonly", "dedupratio", "fragmentation", "freeing", "leaked"} +var zpoolPropListOptions = strings.Join(zpoolPropList, ",") +var zpoolArgs = []string{"get", "-p", zpoolPropListOptions} diff --git a/vendor/src/github.com/mistifyio/go-zfs/utils_solaris.go b/vendor/src/github.com/mistifyio/go-zfs/utils_solaris.go new file mode 100644 index 00000000..4b0a9e92 --- /dev/null +++ b/vendor/src/github.com/mistifyio/go-zfs/utils_solaris.go @@ -0,0 +1,17 @@ +// +build solaris + +package zfs + +import ( + "strings" +) + +// List of ZFS properties to retrieve from zfs list command on a Solaris platform +var dsPropList = []string{"name", "origin", "used", "available", "mountpoint", "compression", "type", "volsize", "quota"} + +var dsPropListOptions = strings.Join(dsPropList, ",") + +// List of Zpool properties to retrieve from zpool list command on a non-Solaris platform +var zpoolPropList = []string{"name", "health", "allocated", "size", "free", "readonly", "dedupratio"} +var zpoolPropListOptions = strings.Join(zpoolPropList, ",") +var zpoolArgs = []string{"get", "-p", zpoolPropListOptions} diff --git a/vendor/src/github.com/mistifyio/go-zfs/zfs.go b/vendor/src/github.com/mistifyio/go-zfs/zfs.go new file mode 100644 index 00000000..63a9eff1 --- /dev/null +++ b/vendor/src/github.com/mistifyio/go-zfs/zfs.go @@ -0,0 +1,451 @@ +// Package zfs provides wrappers around the ZFS command line tools. +package zfs + +import ( + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +// ZFS dataset types, which can indicate if a dataset is a filesystem, +// snapshot, or volume. +const ( + DatasetFilesystem = "filesystem" + DatasetSnapshot = "snapshot" + DatasetVolume = "volume" +) + +// Dataset is a ZFS dataset. A dataset could be a clone, filesystem, snapshot, +// or volume. The Type struct member can be used to determine a dataset's type. +// +// The field definitions can be found in the ZFS manual: +// http://www.freebsd.org/cgi/man.cgi?zfs(8). +type Dataset struct { + Name string + Origin string + Used uint64 + Avail uint64 + Mountpoint string + Compression string + Type string + Written uint64 + Volsize uint64 + Logicalused uint64 + Usedbydataset uint64 + Quota uint64 +} + +// InodeType is the type of inode as reported by Diff +type InodeType int + +// Types of Inodes +const ( + _ = iota // 0 == unknown type + BlockDevice InodeType = iota + CharacterDevice + Directory + Door + NamedPipe + SymbolicLink + EventPort + Socket + File +) + +// ChangeType is the type of inode change as reported by Diff +type ChangeType int + +// Types of Changes +const ( + _ = iota // 0 == unknown type + Removed ChangeType = iota + Created + Modified + Renamed +) + +// DestroyFlag is the options flag passed to Destroy +type DestroyFlag int + +// Valid destroy options +const ( + DestroyDefault DestroyFlag = 1 << iota + DestroyRecursive = 1 << iota + DestroyRecursiveClones = 1 << iota + DestroyDeferDeletion = 1 << iota + DestroyForceUmount = 1 << iota +) + +// InodeChange represents a change as reported by Diff +type InodeChange struct { + Change ChangeType + Type InodeType + Path string + NewPath string + ReferenceCountChange int +} + +// Logger can be used to log commands/actions +type Logger interface { + Log(cmd []string) +} + +type defaultLogger struct{} + +func (*defaultLogger) Log(cmd []string) { + return +} + +var logger Logger = &defaultLogger{} + +// SetLogger set a log handler to log all commands including arguments before +// they are executed +func SetLogger(l Logger) { + if l != nil { + logger = l + } +} + +// zfs is a helper function to wrap typical calls to zfs. +func zfs(arg ...string) ([][]string, error) { + c := command{Command: "zfs"} + return c.Run(arg...) +} + +// Datasets returns a slice of ZFS datasets, regardless of type. +// A filter argument may be passed to select a dataset with the matching name, +// or empty string ("") may be used to select all datasets. +func Datasets(filter string) ([]*Dataset, error) { + return listByType("all", filter) +} + +// Snapshots returns a slice of ZFS snapshots. +// A filter argument may be passed to select a snapshot with the matching name, +// or empty string ("") may be used to select all snapshots. +func Snapshots(filter string) ([]*Dataset, error) { + return listByType(DatasetSnapshot, filter) +} + +// Filesystems returns a slice of ZFS filesystems. +// A filter argument may be passed to select a filesystem with the matching name, +// or empty string ("") may be used to select all filesystems. +func Filesystems(filter string) ([]*Dataset, error) { + return listByType(DatasetFilesystem, filter) +} + +// Volumes returns a slice of ZFS volumes. +// A filter argument may be passed to select a volume with the matching name, +// or empty string ("") may be used to select all volumes. +func Volumes(filter string) ([]*Dataset, error) { + return listByType(DatasetVolume, filter) +} + +// GetDataset retrieves a single ZFS dataset by name. This dataset could be +// any valid ZFS dataset type, such as a clone, filesystem, snapshot, or volume. +func GetDataset(name string) (*Dataset, error) { + out, err := zfs("list", "-Hp", "-o", dsPropListOptions, name) + if err != nil { + return nil, err + } + + ds := &Dataset{Name: name} + for _, line := range out { + if err := ds.parseLine(line); err != nil { + return nil, err + } + } + + return ds, nil +} + +// Clone clones a ZFS snapshot and returns a clone dataset. +// An error will be returned if the input dataset is not of snapshot type. +func (d *Dataset) Clone(dest string, properties map[string]string) (*Dataset, error) { + if d.Type != DatasetSnapshot { + return nil, errors.New("can only clone snapshots") + } + args := make([]string, 2, 4) + args[0] = "clone" + args[1] = "-p" + if properties != nil { + args = append(args, propsSlice(properties)...) + } + args = append(args, []string{d.Name, dest}...) + _, err := zfs(args...) + if err != nil { + return nil, err + } + return GetDataset(dest) +} + +// Unmount unmounts currently mounted ZFS file systems. +func (d *Dataset) Unmount(force bool) (*Dataset, error) { + if d.Type == DatasetSnapshot { + return nil, errors.New("cannot unmount snapshots") + } + args := make([]string, 1, 3) + args[0] = "umount" + if force { + args = append(args, "-f") + } + args = append(args, d.Name) + _, err := zfs(args...) + if err != nil { + return nil, err + } + return GetDataset(d.Name) +} + +// Mount mounts ZFS file systems. +func (d *Dataset) Mount(overlay bool, options []string) (*Dataset, error) { + if d.Type == DatasetSnapshot { + return nil, errors.New("cannot mount snapshots") + } + args := make([]string, 1, 5) + args[0] = "mount" + if overlay { + args = append(args, "-O") + } + if options != nil { + args = append(args, "-o") + args = append(args, strings.Join(options, ",")) + } + args = append(args, d.Name) + _, err := zfs(args...) + if err != nil { + return nil, err + } + return GetDataset(d.Name) +} + +// ReceiveSnapshot receives a ZFS stream from the input io.Reader, creates a +// new snapshot with the specified name, and streams the input data into the +// newly-created snapshot. +func ReceiveSnapshot(input io.Reader, name string) (*Dataset, error) { + c := command{Command: "zfs", Stdin: input} + _, err := c.Run("receive", name) + if err != nil { + return nil, err + } + return GetDataset(name) +} + +// SendSnapshot sends a ZFS stream of a snapshot to the input io.Writer. +// An error will be returned if the input dataset is not of snapshot type. +func (d *Dataset) SendSnapshot(output io.Writer) error { + if d.Type != DatasetSnapshot { + return errors.New("can only send snapshots") + } + + c := command{Command: "zfs", Stdout: output} + _, err := c.Run("send", d.Name) + return err +} + +// CreateVolume creates a new ZFS volume with the specified name, size, and +// properties. +// A full list of available ZFS properties may be found here: +// https://www.freebsd.org/cgi/man.cgi?zfs(8). +func CreateVolume(name string, size uint64, properties map[string]string) (*Dataset, error) { + args := make([]string, 4, 5) + args[0] = "create" + args[1] = "-p" + args[2] = "-V" + args[3] = strconv.FormatUint(size, 10) + if properties != nil { + args = append(args, propsSlice(properties)...) + } + args = append(args, name) + _, err := zfs(args...) + if err != nil { + return nil, err + } + return GetDataset(name) +} + +// Destroy destroys a ZFS dataset. If the destroy bit flag is set, any +// descendents of the dataset will be recursively destroyed, including snapshots. +// If the deferred bit flag is set, the snapshot is marked for deferred +// deletion. +func (d *Dataset) Destroy(flags DestroyFlag) error { + args := make([]string, 1, 3) + args[0] = "destroy" + if flags&DestroyRecursive != 0 { + args = append(args, "-r") + } + + if flags&DestroyRecursiveClones != 0 { + args = append(args, "-R") + } + + if flags&DestroyDeferDeletion != 0 { + args = append(args, "-d") + } + + if flags&DestroyForceUmount != 0 { + args = append(args, "-f") + } + + args = append(args, d.Name) + _, err := zfs(args...) + return err +} + +// SetProperty sets a ZFS property on the receiving dataset. +// A full list of available ZFS properties may be found here: +// https://www.freebsd.org/cgi/man.cgi?zfs(8). +func (d *Dataset) SetProperty(key, val string) error { + prop := strings.Join([]string{key, val}, "=") + _, err := zfs("set", prop, d.Name) + return err +} + +// GetProperty returns the current value of a ZFS property from the +// receiving dataset. +// A full list of available ZFS properties may be found here: +// https://www.freebsd.org/cgi/man.cgi?zfs(8). +func (d *Dataset) GetProperty(key string) (string, error) { + out, err := zfs("get", "-H", key, d.Name) + if err != nil { + return "", err + } + + return out[0][2], nil +} + +// Rename renames a dataset. +func (d *Dataset) Rename(name string, createParent bool, recursiveRenameSnapshots bool) (*Dataset, error) { + args := make([]string, 3, 5) + args[0] = "rename" + args[1] = d.Name + args[2] = name + if createParent { + args = append(args, "-p") + } + if recursiveRenameSnapshots { + args = append(args, "-r") + } + _, err := zfs(args...) + if err != nil { + return d, err + } + + return GetDataset(name) +} + +// Snapshots returns a slice of all ZFS snapshots of a given dataset. +func (d *Dataset) Snapshots() ([]*Dataset, error) { + return Snapshots(d.Name) +} + +// CreateFilesystem creates a new ZFS filesystem with the specified name and +// properties. +// A full list of available ZFS properties may be found here: +// https://www.freebsd.org/cgi/man.cgi?zfs(8). +func CreateFilesystem(name string, properties map[string]string) (*Dataset, error) { + args := make([]string, 1, 4) + args[0] = "create" + + if properties != nil { + args = append(args, propsSlice(properties)...) + } + + args = append(args, name) + _, err := zfs(args...) + if err != nil { + return nil, err + } + return GetDataset(name) +} + +// Snapshot creates a new ZFS snapshot of the receiving dataset, using the +// specified name. Optionally, the snapshot can be taken recursively, creating +// snapshots of all descendent filesystems in a single, atomic operation. +func (d *Dataset) Snapshot(name string, recursive bool) (*Dataset, error) { + args := make([]string, 1, 4) + args[0] = "snapshot" + if recursive { + args = append(args, "-r") + } + snapName := fmt.Sprintf("%s@%s", d.Name, name) + args = append(args, snapName) + _, err := zfs(args...) + if err != nil { + return nil, err + } + return GetDataset(snapName) +} + +// Rollback rolls back the receiving ZFS dataset to a previous snapshot. +// Optionally, intermediate snapshots can be destroyed. A ZFS snapshot +// rollback cannot be completed without this option, if more recent +// snapshots exist. +// An error will be returned if the input dataset is not of snapshot type. +func (d *Dataset) Rollback(destroyMoreRecent bool) error { + if d.Type != DatasetSnapshot { + return errors.New("can only rollback snapshots") + } + + args := make([]string, 1, 3) + args[0] = "rollback" + if destroyMoreRecent { + args = append(args, "-r") + } + args = append(args, d.Name) + + _, err := zfs(args...) + return err +} + +// Children returns a slice of children of the receiving ZFS dataset. +// A recursion depth may be specified, or a depth of 0 allows unlimited +// recursion. +func (d *Dataset) Children(depth uint64) ([]*Dataset, error) { + args := []string{"list"} + if depth > 0 { + args = append(args, "-d") + args = append(args, strconv.FormatUint(depth, 10)) + } else { + args = append(args, "-r") + } + args = append(args, "-t", "all", "-Hp", "-o", dsPropListOptions) + args = append(args, d.Name) + + out, err := zfs(args...) + if err != nil { + return nil, err + } + + var datasets []*Dataset + name := "" + var ds *Dataset + for _, line := range out { + if name != line[0] { + name = line[0] + ds = &Dataset{Name: name} + datasets = append(datasets, ds) + } + if err := ds.parseLine(line); err != nil { + return nil, err + } + } + return datasets[1:], nil +} + +// Diff returns changes between a snapshot and the given ZFS dataset. +// The snapshot name must include the filesystem part as it is possible to +// compare clones with their origin snapshots. +func (d *Dataset) Diff(snapshot string) ([]*InodeChange, error) { + args := []string{"diff", "-FH", snapshot, d.Name}[:] + out, err := zfs(args...) + if err != nil { + return nil, err + } + inodeChanges, err := parseInodeChanges(out) + if err != nil { + return nil, err + } + return inodeChanges, nil +} diff --git a/vendor/src/github.com/mistifyio/go-zfs/zpool.go b/vendor/src/github.com/mistifyio/go-zfs/zpool.go new file mode 100644 index 00000000..d8db945d --- /dev/null +++ b/vendor/src/github.com/mistifyio/go-zfs/zpool.go @@ -0,0 +1,112 @@ +package zfs + +// ZFS zpool states, which can indicate if a pool is online, offline, +// degraded, etc. More information regarding zpool states can be found here: +// https://docs.oracle.com/cd/E19253-01/819-5461/gamno/index.html. +const ( + ZpoolOnline = "ONLINE" + ZpoolDegraded = "DEGRADED" + ZpoolFaulted = "FAULTED" + ZpoolOffline = "OFFLINE" + ZpoolUnavail = "UNAVAIL" + ZpoolRemoved = "REMOVED" +) + +// Zpool is a ZFS zpool. A pool is a top-level structure in ZFS, and can +// contain many descendent datasets. +type Zpool struct { + Name string + Health string + Allocated uint64 + Size uint64 + Free uint64 + Fragmentation uint64 + ReadOnly bool + Freeing uint64 + Leaked uint64 + DedupRatio float64 +} + +// zpool is a helper function to wrap typical calls to zpool. +func zpool(arg ...string) ([][]string, error) { + c := command{Command: "zpool"} + return c.Run(arg...) +} + +// GetZpool retrieves a single ZFS zpool by name. +func GetZpool(name string) (*Zpool, error) { + args := zpoolArgs + args = append(args, name) + out, err := zpool(args...) + if err != nil { + return nil, err + } + + // there is no -H + out = out[1:] + + z := &Zpool{Name: name} + for _, line := range out { + if err := z.parseLine(line); err != nil { + return nil, err + } + } + + return z, nil +} + +// Datasets returns a slice of all ZFS datasets in a zpool. +func (z *Zpool) Datasets() ([]*Dataset, error) { + return Datasets(z.Name) +} + +// Snapshots returns a slice of all ZFS snapshots in a zpool. +func (z *Zpool) Snapshots() ([]*Dataset, error) { + return Snapshots(z.Name) +} + +// CreateZpool creates a new ZFS zpool with the specified name, properties, +// and optional arguments. +// A full list of available ZFS properties and command-line arguments may be +// found here: https://www.freebsd.org/cgi/man.cgi?zfs(8). +func CreateZpool(name string, properties map[string]string, args ...string) (*Zpool, error) { + cli := make([]string, 1, 4) + cli[0] = "create" + if properties != nil { + cli = append(cli, propsSlice(properties)...) + } + cli = append(cli, name) + cli = append(cli, args...) + _, err := zpool(cli...) + if err != nil { + return nil, err + } + + return &Zpool{Name: name}, nil +} + +// Destroy destroys a ZFS zpool by name. +func (z *Zpool) Destroy() error { + _, err := zpool("destroy", z.Name) + return err +} + +// ListZpools list all ZFS zpools accessible on the current system. +func ListZpools() ([]*Zpool, error) { + args := []string{"list", "-Ho", "name"} + out, err := zpool(args...) + if err != nil { + return nil, err + } + + var pools []*Zpool + + for _, line := range out { + z, err := GetZpool(line[0]) + if err != nil { + return nil, err + } + pools = append(pools, z) + } + return pools, nil +} diff --git a/vendor/src/github.com/mtrmac/gpgme/.gitignore b/vendor/src/github.com/mtrmac/gpgme/.gitignore new file mode 100644 index 00000000..0210b26e --- /dev/null +++ b/vendor/src/github.com/mtrmac/gpgme/.gitignore @@ -0,0 +1 @@ +testdata/gpghome/random_seed diff --git a/vendor/src/github.com/mtrmac/gpgme/LICENSE b/vendor/src/github.com/mtrmac/gpgme/LICENSE new file mode 100644 index 00000000..06d4ab77 --- /dev/null +++ b/vendor/src/github.com/mtrmac/gpgme/LICENSE @@ -0,0 +1,12 @@ +Copyright (c) 2015, James Fargher +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/src/github.com/mtrmac/gpgme/README.md b/vendor/src/github.com/mtrmac/gpgme/README.md new file mode 100644 index 00000000..4770b82a --- /dev/null +++ b/vendor/src/github.com/mtrmac/gpgme/README.md @@ -0,0 +1,13 @@ +# GPGME (golang) + +Go wrapper for the GPGME library. + +This library is intended for use with desktop applications. If you are looking to add OpenPGP support to a server application I suggest you first look at [golang.org/x/crypto/openpgp](https://godoc.org/golang.org/x/crypto/openpgp). + +## Installation + + go get -u github.com/proglottis/gpgme + +## Documentation + +* [godoc](https://godoc.org/github.com/proglottis/gpgme) diff --git a/vendor/src/github.com/mtrmac/gpgme/callbacks.go b/vendor/src/github.com/mtrmac/gpgme/callbacks.go new file mode 100644 index 00000000..d1dc610d --- /dev/null +++ b/vendor/src/github.com/mtrmac/gpgme/callbacks.go @@ -0,0 +1,42 @@ +package gpgme + +import ( + "sync" +) + +var callbacks struct { + sync.Mutex + m map[uintptr]interface{} + c uintptr +} + +func callbackAdd(v interface{}) uintptr { + callbacks.Lock() + defer callbacks.Unlock() + if callbacks.m == nil { + callbacks.m = make(map[uintptr]interface{}) + } + callbacks.c++ + ret := callbacks.c + callbacks.m[ret] = v + return ret +} + +func callbackLookup(c uintptr) interface{} { + callbacks.Lock() + defer callbacks.Unlock() + ret := callbacks.m[c] + if ret == nil { + panic("callback pointer not found") + } + return ret +} + +func callbackDelete(c uintptr) { + callbacks.Lock() + defer callbacks.Unlock() + if callbacks.m[c] == nil { + panic("callback pointer not found") + } + delete(callbacks.m, c) +} diff --git a/vendor/src/github.com/mtrmac/gpgme/data.go b/vendor/src/github.com/mtrmac/gpgme/data.go new file mode 100644 index 00000000..eebc9726 --- /dev/null +++ b/vendor/src/github.com/mtrmac/gpgme/data.go @@ -0,0 +1,191 @@ +package gpgme + +// #include +// #include +// #include +// #include "go_gpgme.h" +import "C" + +import ( + "io" + "os" + "runtime" + "unsafe" +) + +const ( + SeekSet = C.SEEK_SET + SeekCur = C.SEEK_CUR + SeekEnd = C.SEEK_END +) + +//export gogpgme_readfunc +func gogpgme_readfunc(handle, buffer unsafe.Pointer, size C.size_t) C.ssize_t { + d := callbackLookup(uintptr(handle)).(*Data) + if len(d.buf) < int(size) { + d.buf = make([]byte, size) + } + n, err := d.r.Read(d.buf[:size]) + if err != nil && err != io.EOF { + C.gpgme_err_set_errno(C.EIO) + return -1 + } + C.memcpy(buffer, unsafe.Pointer(&d.buf[0]), C.size_t(n)) + return C.ssize_t(n) +} + +//export gogpgme_writefunc +func gogpgme_writefunc(handle, buffer unsafe.Pointer, size C.size_t) C.ssize_t { + d := callbackLookup(uintptr(handle)).(*Data) + if len(d.buf) < int(size) { + d.buf = make([]byte, size) + } + C.memcpy(unsafe.Pointer(&d.buf[0]), buffer, C.size_t(size)) + n, err := d.w.Write(d.buf[:size]) + if err != nil && err != io.EOF { + C.gpgme_err_set_errno(C.EIO) + return -1 + } + return C.ssize_t(n) +} + +//export gogpgme_seekfunc +func gogpgme_seekfunc(handle unsafe.Pointer, offset C.off_t, whence C.int) C.off_t { + d := callbackLookup(uintptr(handle)).(*Data) + n, err := d.s.Seek(int64(offset), int(whence)) + if err != nil { + C.gpgme_err_set_errno(C.EIO) + return -1 + } + return C.off_t(n) +} + +// The Data buffer used to communicate with GPGME +type Data struct { + dh C.gpgme_data_t + buf []byte + cbs C.struct_gpgme_data_cbs + r io.Reader + w io.Writer + s io.Seeker + cbc uintptr +} + +func newData() *Data { + d := &Data{} + runtime.SetFinalizer(d, (*Data).Close) + return d +} + +// NewData returns a new memory based data buffer +func NewData() (*Data, error) { + d := newData() + return d, handleError(C.gpgme_data_new(&d.dh)) +} + +// NewDataFile returns a new file based data buffer +func NewDataFile(f *os.File) (*Data, error) { + d := newData() + return d, handleError(C.gpgme_data_new_from_fd(&d.dh, C.int(f.Fd()))) +} + +// NewDataBytes returns a new memory based data buffer that contains `b` bytes +func NewDataBytes(b []byte) (*Data, error) { + d := newData() + var cb *C.char + if len(b) != 0 { + cb = (*C.char)(unsafe.Pointer(&b[0])) + } + return d, handleError(C.gpgme_data_new_from_mem(&d.dh, cb, C.size_t(len(b)), 1)) +} + +// NewDataReader returns a new callback based data buffer +func NewDataReader(r io.Reader) (*Data, error) { + d := newData() + d.r = r + d.cbs.read = C.gpgme_data_read_cb_t(C.gogpgme_readfunc) + cbc := callbackAdd(d) + d.cbc = cbc + return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc))) +} + +// NewDataWriter returns a new callback based data buffer +func NewDataWriter(w io.Writer) (*Data, error) { + d := newData() + d.w = w + d.cbs.write = C.gpgme_data_write_cb_t(C.gogpgme_writefunc) + cbc := callbackAdd(d) + d.cbc = cbc + return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc))) +} + +// NewDataReadWriter returns a new callback based data buffer +func NewDataReadWriter(rw io.ReadWriter) (*Data, error) { + d := newData() + d.r = rw + d.w = rw + d.cbs.read = C.gpgme_data_read_cb_t(C.gogpgme_readfunc) + d.cbs.write = C.gpgme_data_write_cb_t(C.gogpgme_writefunc) + cbc := callbackAdd(d) + d.cbc = cbc + return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc))) +} + +// NewDataReadWriteSeeker returns a new callback based data buffer +func NewDataReadWriteSeeker(rw io.ReadWriteSeeker) (*Data, error) { + d := newData() + d.r = rw + d.w = rw + d.s = rw + d.cbs.read = C.gpgme_data_read_cb_t(C.gogpgme_readfunc) + d.cbs.write = C.gpgme_data_write_cb_t(C.gogpgme_writefunc) + d.cbs.seek = C.gpgme_data_seek_cb_t(C.gogpgme_seekfunc) + cbc := callbackAdd(d) + d.cbc = cbc + return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc))) +} + +// Close releases any resources associated with the data buffer +func (d *Data) Close() error { + if d.dh == nil { + return nil + } + if d.cbc > 0 { + callbackDelete(d.cbc) + } + _, err := C.gpgme_data_release(d.dh) + d.dh = nil + return err +} + +func (d *Data) Write(p []byte) (int, error) { + n, err := C.gpgme_data_write(d.dh, unsafe.Pointer(&p[0]), C.size_t(len(p))) + if err != nil { + return 0, err + } + if n == 0 { + return 0, io.EOF + } + return int(n), nil +} + +func (d *Data) Read(p []byte) (int, error) { + n, err := C.gpgme_data_read(d.dh, unsafe.Pointer(&p[0]), C.size_t(len(p))) + if err != nil { + return 0, err + } + if n == 0 { + return 0, io.EOF + } + return int(n), nil +} + +func (d *Data) Seek(offset int64, whence int) (int64, error) { + n, err := C.gpgme_data_seek(d.dh, C.off_t(offset), C.int(whence)) + return int64(n), err +} + +// Name returns the associated filename if any +func (d *Data) Name() string { + return C.GoString(C.gpgme_data_get_file_name(d.dh)) +} diff --git a/vendor/src/github.com/mtrmac/gpgme/go_gpgme.c b/vendor/src/github.com/mtrmac/gpgme/go_gpgme.c new file mode 100644 index 00000000..b887574e --- /dev/null +++ b/vendor/src/github.com/mtrmac/gpgme/go_gpgme.c @@ -0,0 +1,89 @@ +#include "go_gpgme.h" + +gpgme_error_t gogpgme_data_new_from_cbs(gpgme_data_t *dh, gpgme_data_cbs_t cbs, uintptr_t handle) { + return gpgme_data_new_from_cbs(dh, cbs, (void *)handle); +} + +void gogpgme_set_passphrase_cb(gpgme_ctx_t ctx, gpgme_passphrase_cb_t cb, uintptr_t handle) { + gpgme_set_passphrase_cb(ctx, cb, (void *)handle); +} + +unsigned int key_revoked(gpgme_key_t k) { + return k->revoked; +} + +unsigned int key_expired(gpgme_key_t k) { + return k->expired; +} + +unsigned int key_disabled(gpgme_key_t k) { + return k->disabled; +} + +unsigned int key_invalid(gpgme_key_t k) { + return k->invalid; +} + +unsigned int key_can_encrypt(gpgme_key_t k) { + return k->can_encrypt; +} + +unsigned int key_can_sign(gpgme_key_t k) { + return k->can_sign; +} + +unsigned int key_can_certify(gpgme_key_t k) { + return k->can_certify; +} + +unsigned int key_secret(gpgme_key_t k) { + return k->secret; +} + +unsigned int key_can_authenticate(gpgme_key_t k) { + return k->can_authenticate; +} + +unsigned int key_is_qualified(gpgme_key_t k) { + return k->is_qualified; +} + +unsigned int signature_wrong_key_usage(gpgme_signature_t s) { + return s->wrong_key_usage; +} + +unsigned int signature_pka_trust(gpgme_signature_t s) { + return s->pka_trust; +} + +unsigned int signature_chain_model(gpgme_signature_t s) { + return s->chain_model; +} + +unsigned int subkey_revoked(gpgme_subkey_t k) { + return k->revoked; +} + +unsigned int subkey_expired(gpgme_subkey_t k) { + return k->expired; +} + +unsigned int subkey_disabled(gpgme_subkey_t k) { + return k->disabled; +} + +unsigned int subkey_invalid(gpgme_subkey_t k) { + return k->invalid; +} + +unsigned int subkey_secret(gpgme_subkey_t k) { + return k->secret; +} + +unsigned int uid_revoked(gpgme_user_id_t u) { + return u->revoked; +} + +unsigned int uid_invalid(gpgme_user_id_t u) { + return u->invalid; +} diff --git a/vendor/src/github.com/mtrmac/gpgme/go_gpgme.h b/vendor/src/github.com/mtrmac/gpgme/go_gpgme.h new file mode 100644 index 00000000..a3678b12 --- /dev/null +++ b/vendor/src/github.com/mtrmac/gpgme/go_gpgme.h @@ -0,0 +1,37 @@ +#ifndef GO_GPGME_H +#define GO_GPGME_H + +#define _FILE_OFFSET_BITS 64 +#include + +#include + +extern ssize_t gogpgme_readfunc(void *handle, void *buffer, size_t size); +extern ssize_t gogpgme_writefunc(void *handle, void *buffer, size_t size); +extern off_t gogpgme_seekfunc(void *handle, off_t offset, int whence); +extern gpgme_error_t gogpgme_passfunc(void *hook, char *uid_hint, char *passphrase_info, int prev_was_bad, int fd); +extern gpgme_error_t gogpgme_data_new_from_cbs(gpgme_data_t *dh, gpgme_data_cbs_t cbs, uintptr_t handle); +extern void gogpgme_set_passphrase_cb(gpgme_ctx_t ctx, gpgme_passphrase_cb_t cb, uintptr_t handle); + +extern unsigned int key_revoked(gpgme_key_t k); +extern unsigned int key_expired(gpgme_key_t k); +extern unsigned int key_disabled(gpgme_key_t k); +extern unsigned int key_invalid(gpgme_key_t k); +extern unsigned int key_can_encrypt(gpgme_key_t k); +extern unsigned int key_can_sign(gpgme_key_t k); +extern unsigned int key_can_certify(gpgme_key_t k); +extern unsigned int key_secret(gpgme_key_t k); +extern unsigned int key_can_authenticate(gpgme_key_t k); +extern unsigned int key_is_qualified(gpgme_key_t k); +extern unsigned int signature_wrong_key_usage(gpgme_signature_t s); +extern unsigned int signature_pka_trust(gpgme_signature_t s); +extern unsigned int signature_chain_model(gpgme_signature_t s); +extern unsigned int subkey_revoked(gpgme_subkey_t k); +extern unsigned int subkey_expired(gpgme_subkey_t k); +extern unsigned int subkey_disabled(gpgme_subkey_t k); +extern unsigned int subkey_invalid(gpgme_subkey_t k); +extern unsigned int subkey_secret(gpgme_subkey_t k); +extern unsigned int uid_revoked(gpgme_user_id_t u); +extern unsigned int uid_invalid(gpgme_user_id_t u); + +#endif diff --git a/vendor/src/github.com/mtrmac/gpgme/gpgme.go b/vendor/src/github.com/mtrmac/gpgme/gpgme.go new file mode 100644 index 00000000..5f1793ea --- /dev/null +++ b/vendor/src/github.com/mtrmac/gpgme/gpgme.go @@ -0,0 +1,740 @@ +// Package gpgme provides a Go wrapper for the GPGME library +package gpgme + +// #cgo LDFLAGS: -lgpgme -lassuan -lgpg-error +// #cgo CPPFLAGS: -D_FILE_OFFSET_BITS=64 +// #include +// #include +// #include "go_gpgme.h" +import "C" + +import ( + "io" + "os" + "runtime" + "time" + "unsafe" +) + +var Version string + +func init() { + Version = C.GoString(C.gpgme_check_version(nil)) +} + +// Callback is the function that is called when a passphrase is required +type Callback func(uidHint string, prevWasBad bool, f *os.File) error + +//export gogpgme_passfunc +func gogpgme_passfunc(hook unsafe.Pointer, uid_hint, passphrase_info *C.char, prev_was_bad, fd C.int) C.gpgme_error_t { + c := callbackLookup(uintptr(hook)).(*Context) + go_uid_hint := C.GoString(uid_hint) + f := os.NewFile(uintptr(fd), go_uid_hint) + defer f.Close() + err := c.callback(go_uid_hint, prev_was_bad != 0, f) + if err != nil { + return C.GPG_ERR_CANCELED + } + return 0 +} + +type Protocol int + +const ( + ProtocolOpenPGP Protocol = C.GPGME_PROTOCOL_OpenPGP + ProtocolCMS Protocol = C.GPGME_PROTOCOL_CMS + ProtocolGPGConf Protocol = C.GPGME_PROTOCOL_GPGCONF + ProtocolAssuan Protocol = C.GPGME_PROTOCOL_ASSUAN + ProtocolG13 Protocol = C.GPGME_PROTOCOL_G13 + ProtocolUIServer Protocol = C.GPGME_PROTOCOL_UISERVER + // ProtocolSpawn Protocol = C.GPGME_PROTOCOL_SPAWN // Unavailable in 1.4.3 + ProtocolDefault Protocol = C.GPGME_PROTOCOL_DEFAULT + ProtocolUnknown Protocol = C.GPGME_PROTOCOL_UNKNOWN +) + +type PinEntryMode int + +// const ( // Unavailable in 1.3.2 +// PinEntryDefault PinEntryMode = C.GPGME_PINENTRY_MODE_DEFAULT +// PinEntryAsk PinEntryMode = C.GPGME_PINENTRY_MODE_ASK +// PinEntryCancel PinEntryMode = C.GPGME_PINENTRY_MODE_CANCEL +// PinEntryError PinEntryMode = C.GPGME_PINENTRY_MODE_ERROR +// PinEntryLoopback PinEntryMode = C.GPGME_PINENTRY_MODE_LOOPBACK +// ) + +type EncryptFlag uint + +const ( + EncryptAlwaysTrust EncryptFlag = C.GPGME_ENCRYPT_ALWAYS_TRUST + EncryptNoEncryptTo EncryptFlag = C.GPGME_ENCRYPT_NO_ENCRYPT_TO + EncryptPrepare EncryptFlag = C.GPGME_ENCRYPT_PREPARE + EncryptExceptSign EncryptFlag = C.GPGME_ENCRYPT_EXPECT_SIGN + // EncryptNoCompress EncryptFlag = C.GPGME_ENCRYPT_NO_COMPRESS // Unavailable in 1.4.3 +) + +type HashAlgo int + +// const values for HashAlgo values should be added when necessary. + +type KeyListMode uint + +const ( + KeyListModeLocal KeyListMode = C.GPGME_KEYLIST_MODE_LOCAL + KeyListModeExtern KeyListMode = C.GPGME_KEYLIST_MODE_EXTERN + KeyListModeSigs KeyListMode = C.GPGME_KEYLIST_MODE_SIGS + KeyListModeSigNotations KeyListMode = C.GPGME_KEYLIST_MODE_SIG_NOTATIONS + // KeyListModeWithSecret KeyListMode = C.GPGME_KEYLIST_MODE_WITH_SECRET // Unavailable in 1.4.3 + KeyListModeEphemeral KeyListMode = C.GPGME_KEYLIST_MODE_EPHEMERAL + KeyListModeModeValidate KeyListMode = C.GPGME_KEYLIST_MODE_VALIDATE +) + +type PubkeyAlgo int + +// const values for PubkeyAlgo values should be added when necessary. + +type SigMode int + +const ( + SigModeNormal SigMode = C.GPGME_SIG_MODE_NORMAL + SigModeDetach SigMode = C.GPGME_SIG_MODE_DETACH + SigModeClear SigMode = C.GPGME_SIG_MODE_CLEAR +) + +type SigSum int + +const ( + SigSumValid SigSum = C.GPGME_SIGSUM_VALID + SigSumGreen SigSum = C.GPGME_SIGSUM_GREEN + SigSumRed SigSum = C.GPGME_SIGSUM_RED + SigSumKeyRevoked SigSum = C.GPGME_SIGSUM_KEY_REVOKED + SigSumKeyExpired SigSum = C.GPGME_SIGSUM_KEY_EXPIRED + SigSumSigExpired SigSum = C.GPGME_SIGSUM_SIG_EXPIRED + SigSumKeyMissing SigSum = C.GPGME_SIGSUM_KEY_MISSING + SigSumCRLMissing SigSum = C.GPGME_SIGSUM_CRL_MISSING + SigSumCRLTooOld SigSum = C.GPGME_SIGSUM_CRL_TOO_OLD + SigSumBadPolicy SigSum = C.GPGME_SIGSUM_BAD_POLICY + SigSumSysError SigSum = C.GPGME_SIGSUM_SYS_ERROR +) + +type Validity int + +const ( + ValidityUnknown Validity = C.GPGME_VALIDITY_UNKNOWN + ValidityUndefined Validity = C.GPGME_VALIDITY_UNDEFINED + ValidityNever Validity = C.GPGME_VALIDITY_NEVER + ValidityMarginal Validity = C.GPGME_VALIDITY_MARGINAL + ValidityFull Validity = C.GPGME_VALIDITY_FULL + ValidityUltimate Validity = C.GPGME_VALIDITY_ULTIMATE +) + +type ErrorCode int + +const ( + ErrorNoError ErrorCode = C.GPG_ERR_NO_ERROR + ErrorEOF ErrorCode = C.GPG_ERR_EOF +) + +// Error is a wrapper for GPGME errors +type Error struct { + err C.gpgme_error_t +} + +func (e Error) Code() ErrorCode { + return ErrorCode(C.gpgme_err_code(e.err)) +} + +func (e Error) Error() string { + return C.GoString(C.gpgme_strerror(e.err)) +} + +func handleError(err C.gpgme_error_t) error { + e := Error{err: err} + if e.Code() == ErrorNoError { + return nil + } + return e +} + +func cbool(b bool) C.int { + if b { + return 1 + } + return 0 +} + +func EngineCheckVersion(p Protocol) error { + return handleError(C.gpgme_engine_check_version(C.gpgme_protocol_t(p))) +} + +type EngineInfo struct { + info C.gpgme_engine_info_t +} + +func (e *EngineInfo) Next() *EngineInfo { + if e.info.next == nil { + return nil + } + return &EngineInfo{info: e.info.next} +} + +func (e *EngineInfo) Protocol() Protocol { + return Protocol(e.info.protocol) +} + +func (e *EngineInfo) FileName() string { + return C.GoString(e.info.file_name) +} + +func (e *EngineInfo) Version() string { + return C.GoString(e.info.version) +} + +func (e *EngineInfo) RequiredVersion() string { + return C.GoString(e.info.req_version) +} + +func (e *EngineInfo) HomeDir() string { + return C.GoString(e.info.home_dir) +} + +func GetEngineInfo() (*EngineInfo, error) { + info := &EngineInfo{} + return info, handleError(C.gpgme_get_engine_info(&info.info)) +} + +func SetEngineInfo(proto Protocol, fileName, homeDir string) error { + var cfn, chome *C.char + if fileName != "" { + cfn = C.CString(fileName) + defer C.free(unsafe.Pointer(cfn)) + } + if homeDir != "" { + chome = C.CString(homeDir) + defer C.free(unsafe.Pointer(chome)) + } + return handleError(C.gpgme_set_engine_info(C.gpgme_protocol_t(proto), cfn, chome)) +} + +func FindKeys(pattern string, secretOnly bool) ([]*Key, error) { + var keys []*Key + ctx, err := New() + if err != nil { + return keys, err + } + defer ctx.Release() + if err := ctx.KeyListStart(pattern, secretOnly); err != nil { + return keys, err + } + defer ctx.KeyListEnd() + for ctx.KeyListNext() { + keys = append(keys, ctx.Key) + } + if ctx.KeyError != nil { + return keys, ctx.KeyError + } + return keys, nil +} + +func Decrypt(r io.Reader) (*Data, error) { + ctx, err := New() + if err != nil { + return nil, err + } + defer ctx.Release() + cipher, err := NewDataReader(r) + if err != nil { + return nil, err + } + defer cipher.Close() + plain, err := NewData() + if err != nil { + return nil, err + } + err = ctx.Decrypt(cipher, plain) + plain.Seek(0, SeekSet) + return plain, err +} + +type Context struct { + Key *Key + KeyError error + + callback Callback + cbc uintptr + + ctx C.gpgme_ctx_t +} + +func New() (*Context, error) { + c := &Context{} + err := C.gpgme_new(&c.ctx) + runtime.SetFinalizer(c, (*Context).Release) + return c, handleError(err) +} + +func (c *Context) Release() { + if c.ctx == nil { + return + } + if c.cbc > 0 { + callbackDelete(c.cbc) + } + C.gpgme_release(c.ctx) + c.ctx = nil +} + +func (c *Context) SetArmor(yes bool) { + C.gpgme_set_armor(c.ctx, cbool(yes)) +} + +func (c *Context) Armor() bool { + return C.gpgme_get_armor(c.ctx) != 0 +} + +func (c *Context) SetTextMode(yes bool) { + C.gpgme_set_textmode(c.ctx, cbool(yes)) +} + +func (c *Context) TextMode() bool { + return C.gpgme_get_textmode(c.ctx) != 0 +} + +func (c *Context) SetProtocol(p Protocol) error { + return handleError(C.gpgme_set_protocol(c.ctx, C.gpgme_protocol_t(p))) +} + +func (c *Context) Protocol() Protocol { + return Protocol(C.gpgme_get_protocol(c.ctx)) +} + +func (c *Context) SetKeyListMode(m KeyListMode) error { + return handleError(C.gpgme_set_keylist_mode(c.ctx, C.gpgme_keylist_mode_t(m))) +} + +func (c *Context) KeyListMode() KeyListMode { + return KeyListMode(C.gpgme_get_keylist_mode(c.ctx)) +} + +// Unavailable in 1.3.2: +// func (c *Context) SetPinEntryMode(m PinEntryMode) error { +// return handleError(C.gpgme_set_pinentry_mode(c.ctx, C.gpgme_pinentry_mode_t(m))) +// } + +// Unavailable in 1.3.2: +// func (c *Context) PinEntryMode() PinEntryMode { +// return PinEntryMode(C.gpgme_get_pinentry_mode(c.ctx)) +// } + +func (c *Context) SetCallback(callback Callback) error { + var err error + c.callback = callback + if c.cbc > 0 { + callbackDelete(c.cbc) + } + if callback != nil { + cbc := callbackAdd(c) + c.cbc = cbc + _, err = C.gogpgme_set_passphrase_cb(c.ctx, C.gpgme_passphrase_cb_t(C.gogpgme_passfunc), C.uintptr_t(cbc)) + } else { + c.cbc = 0 + _, err = C.gogpgme_set_passphrase_cb(c.ctx, nil, 0) + } + return err +} + +func (c *Context) EngineInfo() *EngineInfo { + return &EngineInfo{info: C.gpgme_ctx_get_engine_info(c.ctx)} +} + +func (c *Context) SetEngineInfo(proto Protocol, fileName, homeDir string) error { + var cfn, chome *C.char + if fileName != "" { + cfn = C.CString(fileName) + defer C.free(unsafe.Pointer(cfn)) + } + if homeDir != "" { + chome = C.CString(homeDir) + defer C.free(unsafe.Pointer(chome)) + } + return handleError(C.gpgme_ctx_set_engine_info(c.ctx, C.gpgme_protocol_t(proto), cfn, chome)) +} + +func (c *Context) KeyListStart(pattern string, secretOnly bool) error { + cpattern := C.CString(pattern) + defer C.free(unsafe.Pointer(cpattern)) + err := C.gpgme_op_keylist_start(c.ctx, cpattern, cbool(secretOnly)) + return handleError(err) +} + +func (c *Context) KeyListNext() bool { + c.Key = newKey() + err := handleError(C.gpgme_op_keylist_next(c.ctx, &c.Key.k)) + if err != nil { + if e, ok := err.(Error); ok && e.Code() == ErrorEOF { + c.KeyError = nil + } else { + c.KeyError = err + } + return false + } + c.KeyError = nil + return true +} + +func (c *Context) KeyListEnd() error { + return handleError(C.gpgme_op_keylist_end(c.ctx)) +} + +func (c *Context) GetKey(fingerprint string, secret bool) (*Key, error) { + key := newKey() + cfpr := C.CString(fingerprint) + defer C.free(unsafe.Pointer(cfpr)) + return key, handleError(C.gpgme_get_key(c.ctx, cfpr, &key.k, cbool(secret))) +} + +func (c *Context) Decrypt(ciphertext, plaintext *Data) error { + return handleError(C.gpgme_op_decrypt(c.ctx, ciphertext.dh, plaintext.dh)) +} + +func (c *Context) DecryptVerify(ciphertext, plaintext *Data) error { + return handleError(C.gpgme_op_decrypt_verify(c.ctx, ciphertext.dh, plaintext.dh)) +} + +type Signature struct { + Summary SigSum + Fingerprint string + Status error + Timestamp time.Time + ExpTimestamp time.Time + WrongKeyUsage bool + PKATrust uint + ChainModel bool + Validity Validity + ValidityReason error + PubkeyAlgo PubkeyAlgo + HashAlgo HashAlgo +} + +func (c *Context) Verify(sig, signedText, plain *Data) (string, []Signature, error) { + var signedTextPtr, plainPtr C.gpgme_data_t = nil, nil + if signedText != nil { + signedTextPtr = signedText.dh + } + if plain != nil { + plainPtr = plain.dh + } + err := handleError(C.gpgme_op_verify(c.ctx, sig.dh, signedTextPtr, plainPtr)) + if err != nil { + return "", nil, err + } + res := C.gpgme_op_verify_result(c.ctx) + sigs := []Signature{} + for s := res.signatures; s != nil; s = s.next { + sig := Signature{ + Summary: SigSum(s.summary), + Fingerprint: C.GoString(s.fpr), + Status: handleError(s.status), + // s.notations not implemented + Timestamp: time.Unix(int64(s.timestamp), 0), + ExpTimestamp: time.Unix(int64(s.exp_timestamp), 0), + WrongKeyUsage: C.signature_wrong_key_usage(s) != 0, + PKATrust: uint(C.signature_pka_trust(s)), + ChainModel: C.signature_chain_model(s) != 0, + Validity: Validity(s.validity), + ValidityReason: handleError(s.validity_reason), + PubkeyAlgo: PubkeyAlgo(s.pubkey_algo), + HashAlgo: HashAlgo(s.hash_algo), + } + sigs = append(sigs, sig) + } + return C.GoString(res.file_name), sigs, nil +} + +func (c *Context) Encrypt(recipients []*Key, flags EncryptFlag, plaintext, ciphertext *Data) error { + size := unsafe.Sizeof(new(C.gpgme_key_t)) + recp := C.calloc(C.size_t(len(recipients)+1), C.size_t(size)) + defer C.free(recp) + for i := range recipients { + ptr := (*C.gpgme_key_t)(unsafe.Pointer(uintptr(recp) + size*uintptr(i))) + *ptr = recipients[i].k + } + err := C.gpgme_op_encrypt(c.ctx, (*C.gpgme_key_t)(recp), C.gpgme_encrypt_flags_t(flags), plaintext.dh, ciphertext.dh) + return handleError(err) +} + +func (c *Context) Sign(signers []*Key, plain, sig *Data, mode SigMode) error { + C.gpgme_signers_clear(c.ctx) + for _, k := range signers { + if err := handleError(C.gpgme_signers_add(c.ctx, k.k)); err != nil { + C.gpgme_signers_clear(c.ctx) + return err + } + } + return handleError(C.gpgme_op_sign(c.ctx, plain.dh, sig.dh, C.gpgme_sig_mode_t(mode))) +} + +// ImportStatusFlags describes the type of ImportStatus.Status. The C API in gpgme.h simply uses "unsigned". +type ImportStatusFlags uint + +const ( + ImportNew ImportStatusFlags = C.GPGME_IMPORT_NEW + ImportUID ImportStatusFlags = C.GPGME_IMPORT_UID + ImportSIG ImportStatusFlags = C.GPGME_IMPORT_SIG + ImportSubKey ImportStatusFlags = C.GPGME_IMPORT_SUBKEY + ImportSecret ImportStatusFlags = C.GPGME_IMPORT_SECRET +) + +type ImportStatus struct { + Fingerprint string + Result error + Status ImportStatusFlags +} + +type ImportResult struct { + Considered int + NoUserID int + Imported int + ImportedRSA int + Unchanged int + NewUserIDs int + NewSubKeys int + NewSignatures int + NewRevocations int + SecretRead int + SecretImported int + SecretUnchanged int + NotImported int + Imports []ImportStatus +} + +func (c *Context) Import(keyData *Data) (*ImportResult, error) { + err := handleError(C.gpgme_op_import(c.ctx, keyData.dh)) + if err != nil { + return nil, err + } + res := C.gpgme_op_import_result(c.ctx) + imports := []ImportStatus{} + for s := res.imports; s != nil; s = s.next { + imports = append(imports, ImportStatus{ + Fingerprint: C.GoString(s.fpr), + Result: handleError(s.result), + Status: ImportStatusFlags(s.status), + }) + } + return &ImportResult{ + Considered: int(res.considered), + NoUserID: int(res.no_user_id), + Imported: int(res.imported), + ImportedRSA: int(res.imported_rsa), + Unchanged: int(res.unchanged), + NewUserIDs: int(res.new_user_ids), + NewSubKeys: int(res.new_sub_keys), + NewSignatures: int(res.new_signatures), + NewRevocations: int(res.new_revocations), + SecretRead: int(res.secret_read), + SecretImported: int(res.secret_imported), + SecretUnchanged: int(res.secret_unchanged), + NotImported: int(res.not_imported), + Imports: imports, + }, nil +} + +type Key struct { + k C.gpgme_key_t +} + +func newKey() *Key { + k := &Key{} + runtime.SetFinalizer(k, (*Key).Release) + return k +} + +func (k *Key) Release() { + C.gpgme_key_release(k.k) + k.k = nil +} + +func (k *Key) Revoked() bool { + return C.key_revoked(k.k) != 0 +} + +func (k *Key) Expired() bool { + return C.key_expired(k.k) != 0 +} + +func (k *Key) Disabled() bool { + return C.key_disabled(k.k) != 0 +} + +func (k *Key) Invalid() bool { + return C.key_invalid(k.k) != 0 +} + +func (k *Key) CanEncrypt() bool { + return C.key_can_encrypt(k.k) != 0 +} + +func (k *Key) CanSign() bool { + return C.key_can_sign(k.k) != 0 +} + +func (k *Key) CanCertify() bool { + return C.key_can_certify(k.k) != 0 +} + +func (k *Key) Secret() bool { + return C.key_secret(k.k) != 0 +} + +func (k *Key) CanAuthenticate() bool { + return C.key_can_authenticate(k.k) != 0 +} + +func (k *Key) IsQualified() bool { + return C.key_is_qualified(k.k) != 0 +} + +func (k *Key) Protocol() Protocol { + return Protocol(k.k.protocol) +} + +func (k *Key) IssuerSerial() string { + return C.GoString(k.k.issuer_serial) +} + +func (k *Key) IssuerName() string { + return C.GoString(k.k.issuer_name) +} + +func (k *Key) ChainID() string { + return C.GoString(k.k.chain_id) +} + +func (k *Key) OwnerTrust() Validity { + return Validity(k.k.owner_trust) +} + +func (k *Key) SubKeys() *SubKey { + if k.k.subkeys == nil { + return nil + } + return &SubKey{k: k.k.subkeys, parent: k} +} + +func (k *Key) UserIDs() *UserID { + if k.k.uids == nil { + return nil + } + return &UserID{u: k.k.uids, parent: k} +} + +func (k *Key) KeyListMode() KeyListMode { + return KeyListMode(k.k.keylist_mode) +} + +type SubKey struct { + k C.gpgme_subkey_t + parent *Key // make sure the key is not released when we have a reference to a subkey +} + +func (k *SubKey) Next() *SubKey { + if k.k.next == nil { + return nil + } + return &SubKey{k: k.k.next, parent: k.parent} +} + +func (k *SubKey) Revoked() bool { + return C.subkey_revoked(k.k) != 0 +} + +func (k *SubKey) Expired() bool { + return C.subkey_expired(k.k) != 0 +} + +func (k *SubKey) Disabled() bool { + return C.subkey_disabled(k.k) != 0 +} + +func (k *SubKey) Invalid() bool { + return C.subkey_invalid(k.k) != 0 +} + +func (k *SubKey) Secret() bool { + return C.subkey_secret(k.k) != 0 +} + +func (k *SubKey) KeyID() string { + return C.GoString(k.k.keyid) +} + +func (k *SubKey) Fingerprint() string { + return C.GoString(k.k.fpr) +} + +func (k *SubKey) Created() time.Time { + if k.k.timestamp <= 0 { + return time.Time{} + } + return time.Unix(int64(k.k.timestamp), 0) +} + +func (k *SubKey) Expires() time.Time { + if k.k.expires <= 0 { + return time.Time{} + } + return time.Unix(int64(k.k.expires), 0) +} + +func (k *SubKey) CardNumber() string { + return C.GoString(k.k.card_number) +} + +type UserID struct { + u C.gpgme_user_id_t + parent *Key // make sure the key is not released when we have a reference to a user ID +} + +func (u *UserID) Next() *UserID { + if u.u.next == nil { + return nil + } + return &UserID{u: u.u.next, parent: u.parent} +} + +func (u *UserID) Revoked() bool { + return C.uid_revoked(u.u) != 0 +} + +func (u *UserID) Invalid() bool { + return C.uid_invalid(u.u) != 0 +} + +func (u *UserID) Validity() Validity { + return Validity(u.u.validity) +} + +func (u *UserID) UID() string { + return C.GoString(u.u.uid) +} + +func (u *UserID) Name() string { + return C.GoString(u.u.name) +} + +func (u *UserID) Comment() string { + return C.GoString(u.u.comment) +} + +func (u *UserID) Email() string { + return C.GoString(u.u.email) +} + +// This is somewhat of a horrible hack. We need to unset GPG_AGENT_INFO so that gpgme does not pass --use-agent to GPG. +// os.Unsetenv should be enough, but that only calls the underlying C library (which gpgme uses) if cgo is involved +// - and cgo can't be used in tests. So, provide this helper for test initialization. +func unsetenvGPGAgentInfo() { + v := C.CString("GPG_AGENT_INFO") + defer C.free(unsafe.Pointer(v)) + C.unsetenv(v) +} diff --git a/vendor/src/github.com/opencontainers/image-spec/specs-go/v1/config.go b/vendor/src/github.com/opencontainers/image-spec/specs-go/v1/config.go index ccf3af9f..d4238362 100644 --- a/vendor/src/github.com/opencontainers/image-spec/specs-go/v1/config.go +++ b/vendor/src/github.com/opencontainers/image-spec/specs-go/v1/config.go @@ -17,34 +17,34 @@ package v1 // ImageConfig defines the execution parameters which should be used as a base when running a container using an image. type ImageConfig struct { // User defines the username or UID which the process in the container should run as. - User string `json:"User"` + User string `json:"User,omitempty"` // Memory defines the memory limit. - Memory int64 `json:"Memory"` + Memory int64 `json:"Memory,omitempty"` // MemorySwap defines the total memory usage limit (memory + swap). - MemorySwap int64 `json:"MemorySwap"` + MemorySwap int64 `json:"MemorySwap,omitempty"` // CPUShares is the CPU shares (relative weight vs. other containers). - CPUShares int64 `json:"CpuShares"` + CPUShares int64 `json:"CpuShares,omitempty"` // ExposedPorts a set of ports to expose from a container running this image. - ExposedPorts map[string]struct{} `json:"ExposedPorts"` + ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` // Env is a list of environment variables to be used in a container. - Env []string `json:"Env"` + Env []string `json:"Env,omitempty"` // Entrypoint defines a list of arguments to use as the command to execute when the container starts. - Entrypoint []string `json:"Entrypoint"` + Entrypoint []string `json:"Entrypoint,omitempty"` // Cmd defines the default arguments to the entrypoint of the container. - Cmd []string `json:"Cmd"` + Cmd []string `json:"Cmd,omitempty"` // Volumes is a set of directories which should be created as data volumes in a container running this image. - Volumes map[string]struct{} `json:"Volumes"` + Volumes map[string]struct{} `json:"Volumes,omitempty"` // WorkingDir sets the current working directory of the entrypoint process in the container. - WorkingDir string `json:"WorkingDir"` + WorkingDir string `json:"WorkingDir,omitempty"` } // RootFS describes a layer content addresses @@ -59,28 +59,28 @@ type RootFS struct { // History describes the history of a layer. type History struct { // Created is the creation time. - Created string `json:"created"` + Created string `json:"created,omitempty"` // CreatedBy is the command which created the layer. - CreatedBy string `json:"created_by"` + CreatedBy string `json:"created_by,omitempty"` // Author is the author of the build point. - Author string `json:"author"` + Author string `json:"author,omitempty"` // Comment is a custom message set when creating the layer. - Comment string `json:"comment"` + Comment string `json:"comment,omitempty"` // EmptyLayer is used to mark if the history item created a filesystem diff. - EmptyLayer bool `json:"empty_layer"` + EmptyLayer bool `json:"empty_layer,omitempty"` } // Image is the JSON structure which describes some basic information about the image. type Image struct { // Created defines an ISO-8601 formatted combined date and time at which the image was created. - Created string `json:"created"` + Created string `json:"created,omitempty"` // Author defines the name and/or email address of the person or entity which created and is responsible for maintaining the image. - Author string `json:"author"` + Author string `json:"author,omitempty"` // Architecture is the CPU architecture which the binaries in this image are built to run on. Architecture string `json:"architecture"` @@ -89,11 +89,11 @@ type Image struct { OS string `json:"os"` // Config defines the execution parameters which should be used as a base when running a container using the image. - Config ImageConfig `json:"config"` + Config ImageConfig `json:"config,omitempty"` // RootFS references the layer content addresses used by the image. RootFS RootFS `json:"rootfs"` // History describes the history of each layer. - History []History `json:"history"` + History []History `json:"history,omitempty"` } diff --git a/vendor/src/github.com/opencontainers/image-spec/specs-go/descriptor.go b/vendor/src/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go similarity index 88% rename from vendor/src/github.com/opencontainers/image-spec/specs-go/descriptor.go rename to vendor/src/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go index 7a206559..83dde966 100644 --- a/vendor/src/github.com/opencontainers/image-spec/specs-go/descriptor.go +++ b/vendor/src/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package specs +package v1 // Descriptor describes the disposition of targeted content. type Descriptor struct { @@ -24,4 +24,7 @@ type Descriptor struct { // Size specifies the size in bytes of the blob. Size int64 `json:"size"` + + // URLs specifies a list of URLs from which this object MAY be downloaded + URLs []string `json:"urls,omitempty"` } diff --git a/vendor/src/github.com/opencontainers/image-spec/specs-go/v1/layout.go b/vendor/src/github.com/opencontainers/image-spec/specs-go/v1/layout.go new file mode 100644 index 00000000..c44486bf --- /dev/null +++ b/vendor/src/github.com/opencontainers/image-spec/specs-go/v1/layout.go @@ -0,0 +1,28 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import "regexp" + +// ImageLayout is the structure in the "oci-layout" file, found in the root +// of an OCI Image-layout directory. +type ImageLayout struct { + Version string `json:"imageLayoutVersion"` +} + +var ( + // RefsRegexp matches requirement of image-layout 'refs' charset. + RefsRegexp = regexp.MustCompile(`^[a-zA-Z0-9-._]+$`) +) diff --git a/vendor/src/github.com/opencontainers/image-spec/specs-go/v1/manifest.go b/vendor/src/github.com/opencontainers/image-spec/specs-go/v1/manifest.go index 10bde087..7d160437 100644 --- a/vendor/src/github.com/opencontainers/image-spec/specs-go/v1/manifest.go +++ b/vendor/src/github.com/opencontainers/image-spec/specs-go/v1/manifest.go @@ -22,11 +22,11 @@ type Manifest struct { // Config references a configuration object for a container, by digest. // The referenced configuration object is a JSON blob that the runtime uses to set up the container. - Config specs.Descriptor `json:"config"` + Config Descriptor `json:"config"` // Layers is an indexed list of layers referenced by the manifest. - Layers []specs.Descriptor `json:"layers"` + Layers []Descriptor `json:"layers"` // Annotations contains arbitrary metadata for the manifest list. - Annotations map[string]string `json:"annotations"` + Annotations map[string]string `json:"annotations,omitempty"` } diff --git a/vendor/src/github.com/opencontainers/image-spec/specs-go/v1/manifest_list.go b/vendor/src/github.com/opencontainers/image-spec/specs-go/v1/manifest_list.go index 8c02ee28..d127ab68 100644 --- a/vendor/src/github.com/opencontainers/image-spec/specs-go/v1/manifest_list.go +++ b/vendor/src/github.com/opencontainers/image-spec/specs-go/v1/manifest_list.go @@ -44,7 +44,7 @@ type Platform struct { // ManifestDescriptor describes a platform specific manifest. type ManifestDescriptor struct { - specs.Descriptor + Descriptor // Platform describes the platform which the image in the manifest runs on. Platform Platform `json:"platform"` @@ -58,5 +58,5 @@ type ManifestList struct { Manifests []ManifestDescriptor `json:"manifests"` // Annotations contains arbitrary metadata for the manifest list. - Annotations map[string]string `json:"annotations"` + Annotations map[string]string `json:"annotations,omitempty"` } diff --git a/vendor/src/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/vendor/src/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go index 90199dd3..d6592f54 100644 --- a/vendor/src/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go +++ b/vendor/src/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go @@ -25,11 +25,11 @@ const ( MediaTypeImageManifestList = "application/vnd.oci.image.manifest.list.v1+json" // MediaTypeImageLayer is the media type used for layers referenced by the manifest. - MediaTypeImageLayer = "application/vnd.oci.image.layer.tar+gzip" + MediaTypeImageLayer = "application/vnd.oci.image.layer.v1.tar+gzip" // MediaTypeImageLayerNonDistributable is the media type for layers referenced by // the manifest but with distribution restrictions. - MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.tar+gzip" + MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" // MediaTypeImageConfig specifies the media type for the image configuration. MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" diff --git a/vendor/src/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/src/github.com/opencontainers/image-spec/specs-go/version.go index 77704299..48df9acc 100644 --- a/vendor/src/github.com/opencontainers/image-spec/specs-go/version.go +++ b/vendor/src/github.com/opencontainers/image-spec/specs-go/version.go @@ -18,14 +18,14 @@ import "fmt" const ( // VersionMajor is for an API incompatible changes - VersionMajor = 0 + VersionMajor = 1 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 3 + VersionMinor = 0 // VersionPatch is for backwards-compatible bug fixes VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "-dev" + VersionDev = "-rc2-dev" ) // Version is the specification version that the package types support. diff --git a/vendor/src/github.com/opencontainers/image-spec/specs-go/versioned.go b/vendor/src/github.com/opencontainers/image-spec/specs-go/versioned.go index ca0bd412..d01a1a89 100644 --- a/vendor/src/github.com/opencontainers/image-spec/specs-go/versioned.go +++ b/vendor/src/github.com/opencontainers/image-spec/specs-go/versioned.go @@ -22,5 +22,5 @@ type Versioned struct { SchemaVersion int `json:"schemaVersion"` // MediaType is the media type of this schema. - MediaType string `json:"mediaType,omitempty"` + MediaType string `json:"mediaType"` } diff --git a/vendor/src/github.com/pborman/uuid/.travis.yml b/vendor/src/github.com/pborman/uuid/.travis.yml new file mode 100644 index 00000000..d8156a60 --- /dev/null +++ b/vendor/src/github.com/pborman/uuid/.travis.yml @@ -0,0 +1,9 @@ +language: go + +go: + - 1.4.3 + - 1.5.3 + - tip + +script: + - go test -v ./... diff --git a/vendor/src/github.com/pborman/uuid/CONTRIBUTING.md b/vendor/src/github.com/pborman/uuid/CONTRIBUTING.md new file mode 100644 index 00000000..04fdf09f --- /dev/null +++ b/vendor/src/github.com/pborman/uuid/CONTRIBUTING.md @@ -0,0 +1,10 @@ +# How to contribute + +We definitely welcome patches and contribution to this project! + +### Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +You may have already signed it for other Google projects. diff --git a/vendor/src/github.com/pborman/uuid/CONTRIBUTORS b/vendor/src/github.com/pborman/uuid/CONTRIBUTORS new file mode 100644 index 00000000..b382a04e --- /dev/null +++ b/vendor/src/github.com/pborman/uuid/CONTRIBUTORS @@ -0,0 +1 @@ +Paul Borman diff --git a/vendor/src/github.com/pborman/uuid/LICENSE b/vendor/src/github.com/pborman/uuid/LICENSE new file mode 100644 index 00000000..5dc68268 --- /dev/null +++ b/vendor/src/github.com/pborman/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/src/github.com/pborman/uuid/README.md b/vendor/src/github.com/pborman/uuid/README.md new file mode 100644 index 00000000..f023d47c --- /dev/null +++ b/vendor/src/github.com/pborman/uuid/README.md @@ -0,0 +1,13 @@ +This project was automatically exported from code.google.com/p/go-uuid + +# uuid ![build status](https://travis-ci.org/pborman/uuid.svg?branch=master) +The uuid package generates and inspects UUIDs based on [RFC 412](http://tools.ietf.org/html/rfc4122) and DCE 1.1: Authentication and Security Services. + +###### Install +`go get github.com/pborman/uuid` + +###### Documentation +[![GoDoc](https://godoc.org/github.com/pborman/uuid?status.svg)](http://godoc.org/github.com/pborman/uuid) + +Full `go doc` style documentation for the package can be viewed online without installing this package by using the GoDoc site here: +http://godoc.org/github.com/pborman/uuid diff --git a/vendor/src/github.com/pborman/uuid/dce.go b/vendor/src/github.com/pborman/uuid/dce.go new file mode 100644 index 00000000..50a0f2d0 --- /dev/null +++ b/vendor/src/github.com/pborman/uuid/dce.go @@ -0,0 +1,84 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) UUID { + uuid := NewUUID() + if uuid != nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCEPerson(Person, uint32(os.Getuid())) +func NewDCEPerson() UUID { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCEGroup(Group, uint32(os.Getgid())) +func NewDCEGroup() UUID { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID or false. +func (uuid UUID) Domain() (Domain, bool) { + if v, _ := uuid.Version(); v != 2 { + return 0, false + } + return Domain(uuid[9]), true +} + +// Id returns the id for a Version 2 UUID or false. +func (uuid UUID) Id() (uint32, bool) { + if v, _ := uuid.Version(); v != 2 { + return 0, false + } + return binary.BigEndian.Uint32(uuid[0:4]), true +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/vendor/src/github.com/pborman/uuid/doc.go b/vendor/src/github.com/pborman/uuid/doc.go new file mode 100644 index 00000000..d8bd013e --- /dev/null +++ b/vendor/src/github.com/pborman/uuid/doc.go @@ -0,0 +1,8 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The uuid package generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security Services. +package uuid diff --git a/vendor/src/github.com/pborman/uuid/hash.go b/vendor/src/github.com/pborman/uuid/hash.go new file mode 100644 index 00000000..a0420c1e --- /dev/null +++ b/vendor/src/github.com/pborman/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known Name Space IDs and UUIDs +var ( + NameSpace_DNS = Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + NameSpace_URL = Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8") + NameSpace_OID = Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8") + NameSpace_X500 = Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8") + NIL = Parse("00000000-0000-0000-0000-000000000000") +) + +// NewHash returns a new UUID derived from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space) + h.Write([]byte(data)) + s := h.Sum(nil) + uuid := make([]byte, 16) + copy(uuid, s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/vendor/src/github.com/pborman/uuid/json.go b/vendor/src/github.com/pborman/uuid/json.go new file mode 100644 index 00000000..9dda1dfb --- /dev/null +++ b/vendor/src/github.com/pborman/uuid/json.go @@ -0,0 +1,34 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "errors" + +func (u UUID) MarshalJSON() ([]byte, error) { + if len(u) != 16 { + return []byte(`""`), nil + } + var js [38]byte + js[0] = '"' + encodeHex(js[1:], u) + js[37] = '"' + return js[:], nil +} + +func (u *UUID) UnmarshalJSON(data []byte) error { + if string(data) == `""` { + return nil + } + if data[0] != '"' { + return errors.New("invalid UUID format") + } + data = data[1 : len(data)-1] + uu := Parse(string(data)) + if uu == nil { + return errors.New("invalid UUID format") + } + *u = uu + return nil +} diff --git a/vendor/src/github.com/pborman/uuid/node.go b/vendor/src/github.com/pborman/uuid/node.go new file mode 100644 index 00000000..42d60da8 --- /dev/null +++ b/vendor/src/github.com/pborman/uuid/node.go @@ -0,0 +1,117 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "net" + "sync" +) + +var ( + nodeMu sync.Mutex + interfaces []net.Interface // cached list of interfaces + ifname string // name of interface being used + nodeID []byte // hardware for version 1 UUIDs +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + defer nodeMu.Unlock() + nodeMu.Lock() + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + return setNodeInterface(name) +} + +func setNodeInterface(name string) bool { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil && name != "" { + return false + } + } + + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + if setNodeID(ifs.HardwareAddr) { + ifname = ifs.Name + return true + } + } + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + if nodeID == nil { + nodeID = make([]byte, 6) + } + randomBits(nodeID) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + defer nodeMu.Unlock() + nodeMu.Lock() + if nodeID == nil { + setNodeInterface("") + } + nid := make([]byte, 6) + copy(nid, nodeID) + return nid +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + if setNodeID(id) { + ifname = "user" + return true + } + return false +} + +func setNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + if nodeID == nil { + nodeID = make([]byte, 6) + } + copy(nodeID, id) + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + if len(uuid) != 16 { + return nil + } + node := make([]byte, 6) + copy(node, uuid[10:]) + return node +} diff --git a/vendor/src/github.com/pborman/uuid/sql.go b/vendor/src/github.com/pborman/uuid/sql.go new file mode 100644 index 00000000..d015bfd1 --- /dev/null +++ b/vendor/src/github.com/pborman/uuid/sql.go @@ -0,0 +1,66 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "database/sql/driver" + "errors" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src.(type) { + case string: + // if an empty UUID comes from a table, we return a null UUID + if src.(string) == "" { + return nil + } + + // see uuid.Parse for required string format + parsed := Parse(src.(string)) + + if parsed == nil { + return errors.New("Scan: invalid UUID format") + } + + *uuid = parsed + case []byte: + b := src.([]byte) + + // if an empty UUID comes from a table, we return a null UUID + if len(b) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(b) == 16 { + *uuid = UUID(b) + } else { + u := Parse(string(b)) + + if u == nil { + return errors.New("Scan: invalid UUID format") + } + + *uuid = u + } + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} + +// Value implements sql.Valuer so that UUIDs can be written to databases +// transparently. Currently, UUIDs map to strings. Please consult +// database-specific driver documentation for matching types. +func (uuid UUID) Value() (driver.Value, error) { + return uuid.String(), nil +} diff --git a/vendor/src/github.com/pborman/uuid/time.go b/vendor/src/github.com/pborman/uuid/time.go new file mode 100644 index 00000000..eedf2421 --- /dev/null +++ b/vendor/src/github.com/pborman/uuid/time.go @@ -0,0 +1,132 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + timeMu sync.Mutex + lasttime uint64 // last time we returned + clock_seq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { + defer timeMu.Unlock() + timeMu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clock_seq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clock_seq = ((clock_seq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clock_seq, nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence a new random +// clock sequence is generated the first time a clock sequence is requested by +// ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated +// for +func ClockSequence() int { + defer timeMu.Unlock() + timeMu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clock_seq == 0 { + setClockSequence(-1) + } + return int(clock_seq & 0x3fff) +} + +// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer timeMu.Unlock() + timeMu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + old_seq := clock_seq + clock_seq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if old_seq != clock_seq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. It returns false if uuid is not valid. The time is only well defined +// for version 1 and 2 UUIDs. +func (uuid UUID) Time() (Time, bool) { + if len(uuid) != 16 { + return 0, false + } + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time), true +} + +// ClockSequence returns the clock sequence encoded in uuid. It returns false +// if uuid is not valid. The clock sequence is only well defined for version 1 +// and 2 UUIDs. +func (uuid UUID) ClockSequence() (int, bool) { + if len(uuid) != 16 { + return 0, false + } + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff, true +} diff --git a/vendor/src/github.com/pborman/uuid/util.go b/vendor/src/github.com/pborman/uuid/util.go new file mode 100644 index 00000000..fc8e052c --- /dev/null +++ b/vendor/src/github.com/pborman/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts the the first two hex bytes of x into a byte. +func xtob(x string) (byte, bool) { + b1 := xvalues[x[0]] + b2 := xvalues[x[1]] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/vendor/src/github.com/pborman/uuid/uuid.go b/vendor/src/github.com/pborman/uuid/uuid.go new file mode 100644 index 00000000..7c643cf0 --- /dev/null +++ b/vendor/src/github.com/pborman/uuid/uuid.go @@ -0,0 +1,201 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "fmt" + "io" + "strings" +) + +// Array is a pass-by-value UUID that can be used as an effecient key in a map. +type Array [16]byte + +// UUID converts uuid into a slice. +func (uuid Array) UUID() UUID { + return uuid[:] +} + +// String returns the string representation of uuid, +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. +func (uuid Array) String() string { + return uuid.UUID().String() +} + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID []byte + +// A Version represents a UUIDs version. +type Version byte + +// A Variant represents a UUIDs variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +var rander = rand.Reader // random function + +// New returns a new random (version 4) UUID as a string. It is a convenience +// function for NewRandom().String(). +func New() string { + return NewRandom().String() +} + +// Parse decodes s into a UUID or returns nil. Both the UUID form of +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded. +func Parse(s string) UUID { + if len(s) == 36+9 { + if strings.ToLower(s[:9]) != "urn:uuid:" { + return nil + } + s = s[9:] + } else if len(s) != 36 { + return nil + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return nil + } + var uuid [16]byte + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + if v, ok := xtob(s[x:]); !ok { + return nil + } else { + uuid[i] = v + } + } + return uuid[:] +} + +// Equal returns true if uuid1 and uuid2 are equal. +func Equal(uuid1, uuid2 UUID) bool { + return bytes.Equal(uuid1, uuid2) +} + +// Array returns an array representation of uuid that can be used as a map key. +// Array panics if uuid is not valid. +func (uuid UUID) Array() Array { + if len(uuid) != 16 { + panic("invalid uuid") + } + var a Array + copy(a[:], uuid) + return a +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + if len(uuid) != 16 { + return "" + } + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + if len(uuid) != 16 { + return "" + } + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst[:], uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) +} + +// Variant returns the variant encoded in uuid. It returns Invalid if +// uuid is invalid. +func (uuid UUID) Variant() Variant { + if len(uuid) != 16 { + return Invalid + } + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } +} + +// Version returns the version of uuid. It returns false if uuid is not +// valid. +func (uuid UUID) Version() (Version, bool) { + if len(uuid) != 16 { + return 0, false + } + return Version(uuid[6] >> 4), true +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implements io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} diff --git a/vendor/src/github.com/pborman/uuid/version1.go b/vendor/src/github.com/pborman/uuid/version1.go new file mode 100644 index 00000000..0127eacf --- /dev/null +++ b/vendor/src/github.com/pborman/uuid/version1.go @@ -0,0 +1,41 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil. +func NewUUID() UUID { + if nodeID == nil { + SetNodeInterface("") + } + + now, seq, err := GetTime() + if err != nil { + return nil + } + + uuid := make([]byte, 16) + + time_low := uint32(now & 0xffffffff) + time_mid := uint16((now >> 32) & 0xffff) + time_hi := uint16((now >> 48) & 0x0fff) + time_hi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], time_low) + binary.BigEndian.PutUint16(uuid[4:], time_mid) + binary.BigEndian.PutUint16(uuid[6:], time_hi) + binary.BigEndian.PutUint16(uuid[8:], seq) + copy(uuid[10:], nodeID) + + return uuid +} diff --git a/vendor/src/github.com/pborman/uuid/version4.go b/vendor/src/github.com/pborman/uuid/version4.go new file mode 100644 index 00000000..b3d4a368 --- /dev/null +++ b/vendor/src/github.com/pborman/uuid/version4.go @@ -0,0 +1,25 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +// Random returns a Random (Version 4) UUID or panics. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// A note about uniqueness derived from from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() UUID { + uuid := make([]byte, 16) + randomBits([]byte(uuid)) + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid +} diff --git a/vendor/src/gopkg.in/cheggaaa/pb.v1/.travis.yml b/vendor/src/gopkg.in/cheggaaa/pb.v1/.travis.yml new file mode 100644 index 00000000..8ee0750d --- /dev/null +++ b/vendor/src/gopkg.in/cheggaaa/pb.v1/.travis.yml @@ -0,0 +1,7 @@ +language: go +go: +- 1.4.2 +sudo: false +os: +- linux +- osx diff --git a/vendor/src/gopkg.in/cheggaaa/pb.v1/LICENSE b/vendor/src/gopkg.in/cheggaaa/pb.v1/LICENSE new file mode 100644 index 00000000..51197033 --- /dev/null +++ b/vendor/src/gopkg.in/cheggaaa/pb.v1/LICENSE @@ -0,0 +1,12 @@ +Copyright (c) 2012-2015, Sergey Cherepanov +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/src/gopkg.in/cheggaaa/pb.v1/README.md b/vendor/src/gopkg.in/cheggaaa/pb.v1/README.md new file mode 100644 index 00000000..31babb30 --- /dev/null +++ b/vendor/src/gopkg.in/cheggaaa/pb.v1/README.md @@ -0,0 +1,176 @@ +# Terminal progress bar for Go + +Simple progress bar for console programs. + + +## Installation + +``` +go get gopkg.in/cheggaaa/pb.v1 +``` + +## Usage + +```Go +package main + +import ( + "gopkg.in/cheggaaa/pb.v1" + "time" +) + +func main() { + count := 100000 + bar := pb.StartNew(count) + for i := 0; i < count; i++ { + bar.Increment() + time.Sleep(time.Millisecond) + } + bar.FinishPrint("The End!") +} + +``` + +Result will be like this: + +``` +> go run test.go +37158 / 100000 [================>_______________________________] 37.16% 1m11s +``` + +## Customization + +```Go +// create bar +bar := pb.New(count) + +// refresh info every second (default 200ms) +bar.SetRefreshRate(time.Second) + +// show percents (by default already true) +bar.ShowPercent = true + +// show bar (by default already true) +bar.ShowBar = true + +// no counters +bar.ShowCounters = false + +// show "time left" +bar.ShowTimeLeft = true + +// show average speed +bar.ShowSpeed = true + +// sets the width of the progress bar +bar.SetWidth(80) + +// sets the width of the progress bar, but if terminal size smaller will be ignored +bar.SetMaxWidth(80) + +// convert output to readable format (like KB, MB) +bar.SetUnits(pb.U_BYTES) + +// and start +bar.Start() +``` + +## Progress bar for IO Operations + +```go +// create and start bar +bar := pb.New(myDataLen).SetUnits(pb.U_BYTES) +bar.Start() + +// my io.Reader +r := myReader + +// my io.Writer +w := myWriter + +// create proxy reader +reader := bar.NewProxyReader(r) + +// and copy from pb reader +io.Copy(w, reader) + +``` + +```go +// create and start bar +bar := pb.New(myDataLen).SetUnits(pb.U_BYTES) +bar.Start() + +// my io.Reader +r := myReader + +// my io.Writer +w := myWriter + +// create multi writer +writer := io.MultiWriter(w, bar) + +// and copy +io.Copy(writer, r) + +bar.Finish() +``` + +## Custom Progress Bar Look-and-feel + +```go +bar.Format("<.- >") +``` + +## Multiple Progress Bars (experimental and unstable) + +Do not print to terminal while pool is active. + +```go +package main + +import ( + "math/rand" + "sync" + "time" + + "gopkg.in/cheggaaa/pb.v1" +) + +func main() { + // create bars + first := pb.New(200).Prefix("First ") + second := pb.New(200).Prefix("Second ") + third := pb.New(200).Prefix("Third ") + // start pool + pool, err := pb.StartPool(first, second, third) + if err != nil { + panic(err) + } + // update bars + wg := new(sync.WaitGroup) + for _, bar := range []*pb.ProgressBar{first, second, third} { + wg.Add(1) + go func(cb *pb.ProgressBar) { + for n := 0; n < 200; n++ { + cb.Increment() + time.Sleep(time.Millisecond * time.Duration(rand.Intn(100))) + } + cb.Finish() + wg.Done() + }(bar) + } + wg.Wait() + // close pool + pool.Stop() +} +``` + +The result will be as follows: + +``` +$ go run example/multiple.go +First 141 / 1000 [===============>---------------------------------------] 14.10 % 44s +Second 139 / 1000 [==============>---------------------------------------] 13.90 % 44s +Third 152 / 1000 [================>--------------------------------------] 15.20 % 40s +``` diff --git a/vendor/src/gopkg.in/cheggaaa/pb.v1/format.go b/vendor/src/gopkg.in/cheggaaa/pb.v1/format.go new file mode 100644 index 00000000..d5aeff79 --- /dev/null +++ b/vendor/src/gopkg.in/cheggaaa/pb.v1/format.go @@ -0,0 +1,87 @@ +package pb + +import ( + "fmt" + "strings" + "time" +) + +type Units int + +const ( + // U_NO are default units, they represent a simple value and are not formatted at all. + U_NO Units = iota + // U_BYTES units are formatted in a human readable way (b, Bb, Mb, ...) + U_BYTES + // U_DURATION units are formatted in a human readable way (3h14m15s) + U_DURATION +) + +func Format(i int64) *formatter { + return &formatter{n: i} +} + +type formatter struct { + n int64 + unit Units + width int + perSec bool +} + +func (f *formatter) Value(n int64) *formatter { + f.n = n + return f +} + +func (f *formatter) To(unit Units) *formatter { + f.unit = unit + return f +} + +func (f *formatter) Width(width int) *formatter { + f.width = width + return f +} + +func (f *formatter) PerSec() *formatter { + f.perSec = true + return f +} + +func (f *formatter) String() (out string) { + switch f.unit { + case U_BYTES: + out = formatBytes(f.n) + case U_DURATION: + d := time.Duration(f.n) + if d > time.Hour*24 { + out = fmt.Sprintf("%dd", d/24/time.Hour) + d -= (d / time.Hour / 24) * (time.Hour * 24) + } + out = fmt.Sprintf("%s%v", out, d) + default: + out = fmt.Sprintf(fmt.Sprintf("%%%dd", f.width), f.n) + } + if f.perSec { + out += "/s" + } + return +} + +// Convert bytes to human readable string. Like a 2 MB, 64.2 KB, 52 B +func formatBytes(i int64) (result string) { + switch { + case i > (1024 * 1024 * 1024 * 1024): + result = fmt.Sprintf("%.02f TB", float64(i)/1024/1024/1024/1024) + case i > (1024 * 1024 * 1024): + result = fmt.Sprintf("%.02f GB", float64(i)/1024/1024/1024) + case i > (1024 * 1024): + result = fmt.Sprintf("%.02f MB", float64(i)/1024/1024) + case i > 1024: + result = fmt.Sprintf("%.02f KB", float64(i)/1024) + default: + result = fmt.Sprintf("%d B", i) + } + result = strings.Trim(result, " ") + return +} diff --git a/vendor/src/gopkg.in/cheggaaa/pb.v1/pb.go b/vendor/src/gopkg.in/cheggaaa/pb.v1/pb.go new file mode 100644 index 00000000..f1f15bff --- /dev/null +++ b/vendor/src/gopkg.in/cheggaaa/pb.v1/pb.go @@ -0,0 +1,444 @@ +// Simple console progress bars +package pb + +import ( + "fmt" + "io" + "math" + "strings" + "sync" + "sync/atomic" + "time" + "unicode/utf8" +) + +// Current version +const Version = "1.0.6" + +const ( + // Default refresh rate - 200ms + DEFAULT_REFRESH_RATE = time.Millisecond * 200 + FORMAT = "[=>-]" +) + +// DEPRECATED +// variables for backward compatibility, from now do not work +// use pb.Format and pb.SetRefreshRate +var ( + DefaultRefreshRate = DEFAULT_REFRESH_RATE + BarStart, BarEnd, Empty, Current, CurrentN string +) + +// Create new progress bar object +func New(total int) *ProgressBar { + return New64(int64(total)) +} + +// Create new progress bar object using int64 as total +func New64(total int64) *ProgressBar { + pb := &ProgressBar{ + Total: total, + RefreshRate: DEFAULT_REFRESH_RATE, + ShowPercent: true, + ShowCounters: true, + ShowBar: true, + ShowTimeLeft: true, + ShowFinalTime: true, + Units: U_NO, + ManualUpdate: false, + finish: make(chan struct{}), + currentValue: -1, + mu: new(sync.Mutex), + } + return pb.Format(FORMAT) +} + +// Create new object and start +func StartNew(total int) *ProgressBar { + return New(total).Start() +} + +// Callback for custom output +// For example: +// bar.Callback = func(s string) { +// mySuperPrint(s) +// } +// +type Callback func(out string) + +type ProgressBar struct { + current int64 // current must be first member of struct (https://code.google.com/p/go/issues/detail?id=5278) + + Total int64 + RefreshRate time.Duration + ShowPercent, ShowCounters bool + ShowSpeed, ShowTimeLeft, ShowBar bool + ShowFinalTime bool + Output io.Writer + Callback Callback + NotPrint bool + Units Units + Width int + ForceWidth bool + ManualUpdate bool + AutoStat bool + + // Default width for the time box. + UnitsWidth int + TimeBoxWidth int + + finishOnce sync.Once //Guards isFinish + finish chan struct{} + isFinish bool + + startTime time.Time + startValue int64 + currentValue int64 + + prefix, postfix string + + mu *sync.Mutex + lastPrint string + + BarStart string + BarEnd string + Empty string + Current string + CurrentN string + + AlwaysUpdate bool +} + +// Start print +func (pb *ProgressBar) Start() *ProgressBar { + pb.startTime = time.Now() + pb.startValue = pb.current + if pb.Total == 0 { + pb.ShowTimeLeft = false + pb.ShowPercent = false + pb.AutoStat = false + } + if !pb.ManualUpdate { + pb.Update() // Initial printing of the bar before running the bar refresher. + go pb.refresher() + } + return pb +} + +// Increment current value +func (pb *ProgressBar) Increment() int { + return pb.Add(1) +} + +// Get current value +func (pb *ProgressBar) Get() int64 { + c := atomic.LoadInt64(&pb.current) + return c +} + +// Set current value +func (pb *ProgressBar) Set(current int) *ProgressBar { + return pb.Set64(int64(current)) +} + +// Set64 sets the current value as int64 +func (pb *ProgressBar) Set64(current int64) *ProgressBar { + atomic.StoreInt64(&pb.current, current) + return pb +} + +// Add to current value +func (pb *ProgressBar) Add(add int) int { + return int(pb.Add64(int64(add))) +} + +func (pb *ProgressBar) Add64(add int64) int64 { + return atomic.AddInt64(&pb.current, add) +} + +// Set prefix string +func (pb *ProgressBar) Prefix(prefix string) *ProgressBar { + pb.prefix = prefix + return pb +} + +// Set postfix string +func (pb *ProgressBar) Postfix(postfix string) *ProgressBar { + pb.postfix = postfix + return pb +} + +// Set custom format for bar +// Example: bar.Format("[=>_]") +// Example: bar.Format("[\x00=\x00>\x00-\x00]") // \x00 is the delimiter +func (pb *ProgressBar) Format(format string) *ProgressBar { + var formatEntries []string + if len(format) == 5 { + formatEntries = strings.Split(format, "") + } else { + formatEntries = strings.Split(format, "\x00") + } + if len(formatEntries) == 5 { + pb.BarStart = formatEntries[0] + pb.BarEnd = formatEntries[4] + pb.Empty = formatEntries[3] + pb.Current = formatEntries[1] + pb.CurrentN = formatEntries[2] + } + return pb +} + +// Set bar refresh rate +func (pb *ProgressBar) SetRefreshRate(rate time.Duration) *ProgressBar { + pb.RefreshRate = rate + return pb +} + +// Set units +// bar.SetUnits(U_NO) - by default +// bar.SetUnits(U_BYTES) - for Mb, Kb, etc +func (pb *ProgressBar) SetUnits(units Units) *ProgressBar { + pb.Units = units + return pb +} + +// Set max width, if width is bigger than terminal width, will be ignored +func (pb *ProgressBar) SetMaxWidth(width int) *ProgressBar { + pb.Width = width + pb.ForceWidth = false + return pb +} + +// Set bar width +func (pb *ProgressBar) SetWidth(width int) *ProgressBar { + pb.Width = width + pb.ForceWidth = true + return pb +} + +// End print +func (pb *ProgressBar) Finish() { + //Protect multiple calls + pb.finishOnce.Do(func() { + close(pb.finish) + pb.write(atomic.LoadInt64(&pb.current)) + switch { + case pb.Output != nil: + fmt.Fprintln(pb.Output) + case !pb.NotPrint: + fmt.Println() + } + pb.isFinish = true + }) +} + +// End print and write string 'str' +func (pb *ProgressBar) FinishPrint(str string) { + pb.Finish() + if pb.Output != nil { + fmt.Fprintln(pb.Output, str) + } else { + fmt.Println(str) + } +} + +// implement io.Writer +func (pb *ProgressBar) Write(p []byte) (n int, err error) { + n = len(p) + pb.Add(n) + return +} + +// implement io.Reader +func (pb *ProgressBar) Read(p []byte) (n int, err error) { + n = len(p) + pb.Add(n) + return +} + +// Create new proxy reader over bar +// Takes io.Reader or io.ReadCloser +func (pb *ProgressBar) NewProxyReader(r io.Reader) *Reader { + return &Reader{r, pb} +} + +func (pb *ProgressBar) write(current int64) { + width := pb.GetWidth() + + var percentBox, countersBox, timeLeftBox, speedBox, barBox, end, out string + + // percents + if pb.ShowPercent { + var percent float64 + if pb.Total > 0 { + percent = float64(current) / (float64(pb.Total) / float64(100)) + } else { + percent = float64(current) / float64(100) + } + percentBox = fmt.Sprintf(" %6.02f%%", percent) + } + + // counters + if pb.ShowCounters { + current := Format(current).To(pb.Units).Width(pb.UnitsWidth) + if pb.Total > 0 { + total := Format(pb.Total).To(pb.Units).Width(pb.UnitsWidth) + countersBox = fmt.Sprintf(" %s / %s ", current, total) + } else { + countersBox = fmt.Sprintf(" %s / ? ", current) + } + } + + // time left + fromStart := time.Now().Sub(pb.startTime) + currentFromStart := current - pb.startValue + select { + case <-pb.finish: + if pb.ShowFinalTime { + var left time.Duration + left = (fromStart / time.Second) * time.Second + timeLeftBox = fmt.Sprintf(" %s", left.String()) + } + default: + if pb.ShowTimeLeft && currentFromStart > 0 { + perEntry := fromStart / time.Duration(currentFromStart) + var left time.Duration + if pb.Total > 0 { + left = time.Duration(pb.Total-currentFromStart) * perEntry + left = (left / time.Second) * time.Second + } else { + left = time.Duration(currentFromStart) * perEntry + left = (left / time.Second) * time.Second + } + timeLeft := Format(int64(left)).To(U_DURATION).String() + timeLeftBox = fmt.Sprintf(" %s", timeLeft) + } + } + + if len(timeLeftBox) < pb.TimeBoxWidth { + timeLeftBox = fmt.Sprintf("%s%s", strings.Repeat(" ", pb.TimeBoxWidth-len(timeLeftBox)), timeLeftBox) + } + + // speed + if pb.ShowSpeed && currentFromStart > 0 { + fromStart := time.Now().Sub(pb.startTime) + speed := float64(currentFromStart) / (float64(fromStart) / float64(time.Second)) + speedBox = " " + Format(int64(speed)).To(pb.Units).Width(pb.UnitsWidth).PerSec().String() + } + + barWidth := escapeAwareRuneCountInString(countersBox + pb.BarStart + pb.BarEnd + percentBox + timeLeftBox + speedBox + pb.prefix + pb.postfix) + // bar + if pb.ShowBar { + size := width - barWidth + if size > 0 { + if pb.Total > 0 { + curCount := int(math.Ceil((float64(current) / float64(pb.Total)) * float64(size))) + emptCount := size - curCount + barBox = pb.BarStart + if emptCount < 0 { + emptCount = 0 + } + if curCount > size { + curCount = size + } + if emptCount <= 0 { + barBox += strings.Repeat(pb.Current, curCount) + } else if curCount > 0 { + barBox += strings.Repeat(pb.Current, curCount-1) + pb.CurrentN + } + barBox += strings.Repeat(pb.Empty, emptCount) + pb.BarEnd + } else { + barBox = pb.BarStart + pos := size - int(current)%int(size) + if pos-1 > 0 { + barBox += strings.Repeat(pb.Empty, pos-1) + } + barBox += pb.Current + if size-pos-1 > 0 { + barBox += strings.Repeat(pb.Empty, size-pos-1) + } + barBox += pb.BarEnd + } + } + } + + // check len + out = pb.prefix + countersBox + barBox + percentBox + speedBox + timeLeftBox + pb.postfix + if escapeAwareRuneCountInString(out) < width { + end = strings.Repeat(" ", width-utf8.RuneCountInString(out)) + } + + // and print! + pb.mu.Lock() + pb.lastPrint = out + end + pb.mu.Unlock() + switch { + case pb.isFinish: + return + case pb.Output != nil: + fmt.Fprint(pb.Output, "\r"+out+end) + case pb.Callback != nil: + pb.Callback(out + end) + case !pb.NotPrint: + fmt.Print("\r" + out + end) + } +} + +// GetTerminalWidth - returns terminal width for all platforms. +func GetTerminalWidth() (int, error) { + return terminalWidth() +} + +func (pb *ProgressBar) GetWidth() int { + if pb.ForceWidth { + return pb.Width + } + + width := pb.Width + termWidth, _ := terminalWidth() + if width == 0 || termWidth <= width { + width = termWidth + } + + return width +} + +// Write the current state of the progressbar +func (pb *ProgressBar) Update() { + c := atomic.LoadInt64(&pb.current) + if pb.AlwaysUpdate || c != pb.currentValue { + pb.write(c) + pb.currentValue = c + } + if pb.AutoStat { + if c == 0 { + pb.startTime = time.Now() + pb.startValue = 0 + } else if c >= pb.Total && pb.isFinish != true { + pb.Finish() + } + } +} + +func (pb *ProgressBar) String() string { + return pb.lastPrint +} + +// Internal loop for refreshing the progressbar +func (pb *ProgressBar) refresher() { + for { + select { + case <-pb.finish: + return + case <-time.After(pb.RefreshRate): + pb.Update() + } + } +} + +type window struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} diff --git a/vendor/src/gopkg.in/cheggaaa/pb.v1/pb_appengine.go b/vendor/src/gopkg.in/cheggaaa/pb.v1/pb_appengine.go new file mode 100644 index 00000000..d85dbc3b --- /dev/null +++ b/vendor/src/gopkg.in/cheggaaa/pb.v1/pb_appengine.go @@ -0,0 +1,11 @@ +// +build appengine + +package pb + +import "errors" + +// terminalWidth returns width of the terminal, which is not supported +// and should always failed on appengine classic which is a sandboxed PaaS. +func terminalWidth() (int, error) { + return 0, errors.New("Not supported") +} diff --git a/vendor/src/gopkg.in/cheggaaa/pb.v1/pb_nix.go b/vendor/src/gopkg.in/cheggaaa/pb.v1/pb_nix.go new file mode 100644 index 00000000..c06097b4 --- /dev/null +++ b/vendor/src/gopkg.in/cheggaaa/pb.v1/pb_nix.go @@ -0,0 +1,8 @@ +// +build linux darwin freebsd netbsd openbsd dragonfly +// +build !appengine + +package pb + +import "syscall" + +const sysIoctl = syscall.SYS_IOCTL diff --git a/vendor/src/gopkg.in/cheggaaa/pb.v1/pb_solaris.go b/vendor/src/gopkg.in/cheggaaa/pb.v1/pb_solaris.go new file mode 100644 index 00000000..b7d461e1 --- /dev/null +++ b/vendor/src/gopkg.in/cheggaaa/pb.v1/pb_solaris.go @@ -0,0 +1,6 @@ +// +build solaris +// +build !appengine + +package pb + +const sysIoctl = 54 diff --git a/vendor/src/gopkg.in/cheggaaa/pb.v1/pb_win.go b/vendor/src/gopkg.in/cheggaaa/pb.v1/pb_win.go new file mode 100644 index 00000000..72f68283 --- /dev/null +++ b/vendor/src/gopkg.in/cheggaaa/pb.v1/pb_win.go @@ -0,0 +1,141 @@ +// +build windows + +package pb + +import ( + "errors" + "fmt" + "os" + "sync" + "syscall" + "unsafe" +) + +var tty = os.Stdin + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + + // GetConsoleScreenBufferInfo retrieves information about the + // specified console screen buffer. + // http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + + // GetConsoleMode retrieves the current input mode of a console's + // input buffer or the current output mode of a console screen buffer. + // https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx + getConsoleMode = kernel32.NewProc("GetConsoleMode") + + // SetConsoleMode sets the input mode of a console's input buffer + // or the output mode of a console screen buffer. + // https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx + setConsoleMode = kernel32.NewProc("SetConsoleMode") + + // SetConsoleCursorPosition sets the cursor position in the + // specified console screen buffer. + // https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx + setConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") +) + +type ( + // Defines the coordinates of the upper left and lower right corners + // of a rectangle. + // See + // http://msdn.microsoft.com/en-us/library/windows/desktop/ms686311(v=vs.85).aspx + smallRect struct { + Left, Top, Right, Bottom int16 + } + + // Defines the coordinates of a character cell in a console screen + // buffer. The origin of the coordinate system (0,0) is at the top, left cell + // of the buffer. + // See + // http://msdn.microsoft.com/en-us/library/windows/desktop/ms682119(v=vs.85).aspx + coordinates struct { + X, Y int16 + } + + word int16 + + // Contains information about a console screen buffer. + // http://msdn.microsoft.com/en-us/library/windows/desktop/ms682093(v=vs.85).aspx + consoleScreenBufferInfo struct { + dwSize coordinates + dwCursorPosition coordinates + wAttributes word + srWindow smallRect + dwMaximumWindowSize coordinates + } +) + +// terminalWidth returns width of the terminal. +func terminalWidth() (width int, err error) { + var info consoleScreenBufferInfo + _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(syscall.Stdout), uintptr(unsafe.Pointer(&info)), 0) + if e != 0 { + return 0, error(e) + } + return int(info.dwSize.X) - 1, nil +} + +func getCursorPos() (pos coordinates, err error) { + var info consoleScreenBufferInfo + _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(syscall.Stdout), uintptr(unsafe.Pointer(&info)), 0) + if e != 0 { + return info.dwCursorPosition, error(e) + } + return info.dwCursorPosition, nil +} + +func setCursorPos(pos coordinates) error { + _, _, e := syscall.Syscall(setConsoleCursorPosition.Addr(), 2, uintptr(syscall.Stdout), uintptr(uint32(uint16(pos.Y))<<16|uint32(uint16(pos.X))), 0) + if e != 0 { + return error(e) + } + return nil +} + +var ErrPoolWasStarted = errors.New("Bar pool was started") + +var echoLocked bool +var echoLockMutex sync.Mutex + +var oldState word + +func lockEcho() (quit chan int, err error) { + echoLockMutex.Lock() + defer echoLockMutex.Unlock() + if echoLocked { + err = ErrPoolWasStarted + return + } + echoLocked = true + + if _, _, e := syscall.Syscall(getConsoleMode.Addr(), 2, uintptr(syscall.Stdout), uintptr(unsafe.Pointer(&oldState)), 0); e != 0 { + err = fmt.Errorf("Can't get terminal settings: %v", e) + return + } + + newState := oldState + const ENABLE_ECHO_INPUT = 0x0004 + const ENABLE_LINE_INPUT = 0x0002 + newState = newState & (^(ENABLE_LINE_INPUT | ENABLE_ECHO_INPUT)) + if _, _, e := syscall.Syscall(setConsoleMode.Addr(), 2, uintptr(syscall.Stdout), uintptr(newState), 0); e != 0 { + err = fmt.Errorf("Can't set terminal settings: %v", e) + return + } + return +} + +func unlockEcho() (err error) { + echoLockMutex.Lock() + defer echoLockMutex.Unlock() + if !echoLocked { + return + } + echoLocked = false + if _, _, e := syscall.Syscall(setConsoleMode.Addr(), 2, uintptr(syscall.Stdout), uintptr(oldState), 0); e != 0 { + err = fmt.Errorf("Can't set terminal settings") + } + return +} diff --git a/vendor/src/gopkg.in/cheggaaa/pb.v1/pb_x.go b/vendor/src/gopkg.in/cheggaaa/pb.v1/pb_x.go new file mode 100644 index 00000000..12e6fe6e --- /dev/null +++ b/vendor/src/gopkg.in/cheggaaa/pb.v1/pb_x.go @@ -0,0 +1,110 @@ +// +build linux darwin freebsd netbsd openbsd solaris dragonfly +// +build !appengine + +package pb + +import ( + "errors" + "fmt" + "os" + "os/signal" + "runtime" + "sync" + "syscall" + "unsafe" +) + +const ( + TIOCGWINSZ = 0x5413 + TIOCGWINSZ_OSX = 1074295912 +) + +var tty *os.File + +var ErrPoolWasStarted = errors.New("Bar pool was started") + +var echoLocked bool +var echoLockMutex sync.Mutex + +func init() { + var err error + tty, err = os.Open("/dev/tty") + if err != nil { + tty = os.Stdin + } +} + +// terminalWidth returns width of the terminal. +func terminalWidth() (int, error) { + w := new(window) + tio := syscall.TIOCGWINSZ + if runtime.GOOS == "darwin" { + tio = TIOCGWINSZ_OSX + } + res, _, err := syscall.Syscall(sysIoctl, + tty.Fd(), + uintptr(tio), + uintptr(unsafe.Pointer(w)), + ) + if int(res) == -1 { + return 0, err + } + return int(w.Col), nil +} + +var oldState syscall.Termios + +func lockEcho() (quit chan int, err error) { + echoLockMutex.Lock() + defer echoLockMutex.Unlock() + if echoLocked { + err = ErrPoolWasStarted + return + } + echoLocked = true + + fd := tty.Fd() + if _, _, e := syscall.Syscall6(sysIoctl, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); e != 0 { + err = fmt.Errorf("Can't get terminal settings: %v", e) + return + } + + newState := oldState + newState.Lflag &^= syscall.ECHO + newState.Lflag |= syscall.ICANON | syscall.ISIG + newState.Iflag |= syscall.ICRNL + if _, _, e := syscall.Syscall6(sysIoctl, fd, ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); e != 0 { + err = fmt.Errorf("Can't set terminal settings: %v", e) + return + } + quit = make(chan int, 1) + go catchTerminate(quit) + return +} + +func unlockEcho() (err error) { + echoLockMutex.Lock() + defer echoLockMutex.Unlock() + if !echoLocked { + return + } + echoLocked = false + fd := tty.Fd() + if _, _, e := syscall.Syscall6(sysIoctl, fd, ioctlWriteTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); e != 0 { + err = fmt.Errorf("Can't set terminal settings") + } + return +} + +// listen exit signals and restore terminal state +func catchTerminate(quit chan int) { + sig := make(chan os.Signal, 1) + signal.Notify(sig, os.Interrupt, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGKILL) + defer signal.Stop(sig) + select { + case <-quit: + unlockEcho() + case <-sig: + unlockEcho() + } +} diff --git a/vendor/src/gopkg.in/cheggaaa/pb.v1/pool.go b/vendor/src/gopkg.in/cheggaaa/pb.v1/pool.go new file mode 100644 index 00000000..0b4a4afa --- /dev/null +++ b/vendor/src/gopkg.in/cheggaaa/pb.v1/pool.go @@ -0,0 +1,76 @@ +// +build linux darwin freebsd netbsd openbsd solaris dragonfly windows + +package pb + +import ( + "sync" + "time" +) + +// Create and start new pool with given bars +// You need call pool.Stop() after work +func StartPool(pbs ...*ProgressBar) (pool *Pool, err error) { + pool = new(Pool) + if err = pool.start(); err != nil { + return + } + pool.Add(pbs...) + return +} + +type Pool struct { + RefreshRate time.Duration + bars []*ProgressBar + quit chan int + finishOnce sync.Once +} + +// Add progress bars. +func (p *Pool) Add(pbs ...*ProgressBar) { + for _, bar := range pbs { + bar.ManualUpdate = true + bar.NotPrint = true + bar.Start() + p.bars = append(p.bars, bar) + } +} + +func (p *Pool) start() (err error) { + p.RefreshRate = DefaultRefreshRate + quit, err := lockEcho() + if err != nil { + return + } + p.quit = make(chan int) + go p.writer(quit) + return +} + +func (p *Pool) writer(finish chan int) { + var first = true + for { + select { + case <-time.After(p.RefreshRate): + if p.print(first) { + p.print(false) + finish <- 1 + return + } + first = false + case <-p.quit: + finish <- 1 + return + } + } +} + +// Restore terminal state and close pool +func (p *Pool) Stop() error { + // Wait until one final refresh has passed. + time.Sleep(p.RefreshRate) + + p.finishOnce.Do(func() { + close(p.quit) + }) + return unlockEcho() +} diff --git a/vendor/src/gopkg.in/cheggaaa/pb.v1/pool_win.go b/vendor/src/gopkg.in/cheggaaa/pb.v1/pool_win.go new file mode 100644 index 00000000..d7a5ace4 --- /dev/null +++ b/vendor/src/gopkg.in/cheggaaa/pb.v1/pool_win.go @@ -0,0 +1,35 @@ +// +build windows + +package pb + +import ( + "fmt" + "log" +) + +func (p *Pool) print(first bool) bool { + var out string + if !first { + coords, err := getCursorPos() + if err != nil { + log.Panic(err) + } + coords.Y -= int16(len(p.bars)) + coords.X = 0 + + err = setCursorPos(coords) + if err != nil { + log.Panic(err) + } + } + isFinished := true + for _, bar := range p.bars { + if !bar.isFinish { + isFinished = false + } + bar.Update() + out += fmt.Sprintf("\r%s\n", bar.String()) + } + fmt.Print(out) + return isFinished +} diff --git a/vendor/src/gopkg.in/cheggaaa/pb.v1/pool_x.go b/vendor/src/gopkg.in/cheggaaa/pb.v1/pool_x.go new file mode 100644 index 00000000..d95b71d8 --- /dev/null +++ b/vendor/src/gopkg.in/cheggaaa/pb.v1/pool_x.go @@ -0,0 +1,22 @@ +// +build linux darwin freebsd netbsd openbsd solaris dragonfly + +package pb + +import "fmt" + +func (p *Pool) print(first bool) bool { + var out string + if !first { + out = fmt.Sprintf("\033[%dA", len(p.bars)) + } + isFinished := true + for _, bar := range p.bars { + if !bar.isFinish { + isFinished = false + } + bar.Update() + out += fmt.Sprintf("\r%s\n", bar.String()) + } + fmt.Print(out) + return isFinished +} diff --git a/vendor/src/gopkg.in/cheggaaa/pb.v1/reader.go b/vendor/src/gopkg.in/cheggaaa/pb.v1/reader.go new file mode 100644 index 00000000..9f3148b5 --- /dev/null +++ b/vendor/src/gopkg.in/cheggaaa/pb.v1/reader.go @@ -0,0 +1,25 @@ +package pb + +import ( + "io" +) + +// It's proxy reader, implement io.Reader +type Reader struct { + io.Reader + bar *ProgressBar +} + +func (r *Reader) Read(p []byte) (n int, err error) { + n, err = r.Reader.Read(p) + r.bar.Add(n) + return +} + +// Close the reader when it implements io.Closer +func (r *Reader) Close() (err error) { + if closer, ok := r.Reader.(io.Closer); ok { + return closer.Close() + } + return +} diff --git a/vendor/src/gopkg.in/cheggaaa/pb.v1/runecount.go b/vendor/src/gopkg.in/cheggaaa/pb.v1/runecount.go new file mode 100644 index 00000000..d52edd36 --- /dev/null +++ b/vendor/src/gopkg.in/cheggaaa/pb.v1/runecount.go @@ -0,0 +1,17 @@ +package pb + +import ( + "github.com/mattn/go-runewidth" + "regexp" +) + +// Finds the control character sequences (like colors) +var ctrlFinder = regexp.MustCompile("\x1b\x5b[0-9]+\x6d") + +func escapeAwareRuneCountInString(s string) int { + n := runewidth.StringWidth(s) + for _, sm := range ctrlFinder.FindAllString(s, -1) { + n -= len(sm) + } + return n +} diff --git a/vendor/src/gopkg.in/cheggaaa/pb.v1/termios_bsd.go b/vendor/src/gopkg.in/cheggaaa/pb.v1/termios_bsd.go new file mode 100644 index 00000000..517ea8ed --- /dev/null +++ b/vendor/src/gopkg.in/cheggaaa/pb.v1/termios_bsd.go @@ -0,0 +1,9 @@ +// +build darwin freebsd netbsd openbsd dragonfly +// +build !appengine + +package pb + +import "syscall" + +const ioctlReadTermios = syscall.TIOCGETA +const ioctlWriteTermios = syscall.TIOCSETA diff --git a/vendor/src/gopkg.in/cheggaaa/pb.v1/termios_nix.go b/vendor/src/gopkg.in/cheggaaa/pb.v1/termios_nix.go new file mode 100644 index 00000000..ebb3fe87 --- /dev/null +++ b/vendor/src/gopkg.in/cheggaaa/pb.v1/termios_nix.go @@ -0,0 +1,7 @@ +// +build linux solaris +// +build !appengine + +package pb + +const ioctlReadTermios = 0x5401 // syscall.TCGETS +const ioctlWriteTermios = 0x5402 // syscall.TCSETS