diff --git a/.dockerignore b/.dockerignore index b84c81eb2..a5b1aab59 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,5 +1,6 @@ ./ci/ conf/stack +conf/stack/** screenshots tools test/data/registry @@ -23,3 +24,5 @@ coverage .npm-debug.log test/__pycache__ __pycache__ +**/__pycache__ +static/build/** \ No newline at end of file diff --git a/.gitignore b/.gitignore index 92c6f1dea..06ba0a6cf 100644 --- a/.gitignore +++ b/.gitignore @@ -11,7 +11,6 @@ static/fonts static/build stack_local test/data/registry/ -typings GIT_HEAD .idea .python-version @@ -24,3 +23,4 @@ htmlcov .npm-debug.log Dockerfile-e build/ +.vscode diff --git a/.gitlab-ci.jsonnet b/.gitlab-ci.jsonnet new file mode 100644 index 000000000..f669df0a1 --- /dev/null +++ b/.gitlab-ci.jsonnet @@ -0,0 +1,99 @@ +local utils = import '.gitlab-ci/utils.libsonnet'; +local vars = import '.gitlab-ci/vars.libsonnet'; +local mergeJob = utils.ci.mergeJob; +local images = vars.images; +local baseJob = (import '.gitlab-ci/base_jobs.libsonnet')(vars); + +local stages_list = [ + // gitlab-ci stages + 'docker_base', + 'docker_build', + 'unit_tests', + 'integration', + 'docker_release', + 'teardown', +]; + +local stages = utils.set(stages_list); + +// List CI jobs +local jobs = { + // Helpers + local onlyMaster = { + only: ['master', 'tags'], + }, + local onlyBranch = { + only: ['branches'], + }, + + 'container-base-build': baseJob.dockerBuild + onlyMaster { + // ! Only master/tags + // Update the base container + stage: stages.docker_base, + script: [ + 'docker build --cache-from quay.io/quay/quay-base:latest' + + ' -t %s -f quay-base.dockerfile .' % images.base.name, + 'docker push %s' % images.base.name, + ], + }, + + 'container-build': baseJob.dockerBuild { + // Build and push the quay container. + // Docker Tag is the branch/tag name + stage: stages.docker_build, + script: [ + 'docker build -t %s -f Dockerfile .' % images.quayci.name, + 'docker push %s' % images.quayci.name], + }, + + 'container-release': baseJob.dockerBuild + onlyMaster { + // ! Only master/tags + // push the container to the 'prod' repository + local repo_with_sha = images.release.name, + stage: stages.docker_release, + script: [ + 'docker pull %s' % images.quayci.name, + 'docker tag %s %s' % [images.quayci.name, repo_with_sha], + 'docker push %s' % [repo_with_sha], # @TODO(ant31) add signing + ], + }, + + // Unit-tests + local unittest_stage = baseJob.QuayTest { + stage: stages.unit_tests }, + 'unit-tests': unittest_stage { + script: [ + 'py.test --timeout=7200 --verbose --show-count ./ --color=no -x'] }, + + 'registry-tests': unittest_stage { + script: [ + 'py.test --timeout=7200 --verbose --show-count ./test/registry_tests.py --color=no -x'] }, + + // UI tests + 'karma-tests': unittest_stage { + script: [ + 'curl -Ss https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add -', + 'echo "deb http://dl.google.com/linux/chrome/deb/ stable main" > /etc/apt/sources.list.d/google.list', + 'apt-get update -yqqq', + 'apt-get install -y google-chrome-stable', + 'yarn test' + ] }, + + // Unit-tests with real databases + local db_stage = { stage: stages.unit_tests }, + local dbname = 'quay', + postgres: db_stage + baseJob.dbTest('postgresql', + image='postgres:9.6', + env={ POSTGRES_PASSWORD: dbname, POSTGRES_USER: dbname }), + + mysql: db_stage + baseJob.dbTest('mysql+pymysql', + image='mysql:latest', + env={ [key]: dbname for key in ['MYSQL_ROOT_PASSWORD', 'MYSQL_DATABASE', + 'MYSQL_USER', 'MYSQL_PASSWORD'] }), + +}; + +{ + stages: stages_list, + variables: vars.global, +} + jobs diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 000000000..ca92ee270 --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,155 @@ +# Generated from .gitlab-ci.jsonnet +# DO NOT EDIT THIS FILE BY HAND -- YOUR CHANGES WILL BE OVERWRITTEN +--- +container-base-build: + before_script: + - docker login -u $DOCKER_USER -p $DOCKER_PASS quay.io + image: docker:git + only: + - master + - tags + script: + - docker build --cache-from quay.io/quay/quay-base:latest -t quay.io/quay/quay-base:latest -f quay-base.dockerfile . + - docker push quay.io/quay/quay-base:latest + stage: docker_base + tags: + - kubernetes + variables: + DOCKER_DRIVER: overlay + DOCKER_HOST: tcp://docker-host.gitlab-runner.svc.cluster.local:2375 +container-build: + before_script: + - docker login -u $DOCKER_USER -p $DOCKER_PASS quay.io + image: docker:git + script: + - docker build -t quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG} -f Dockerfile . + - docker push quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG} + stage: docker_build + tags: + - kubernetes + variables: + DOCKER_DRIVER: overlay + DOCKER_HOST: tcp://docker-host.gitlab-runner.svc.cluster.local:2375 +container-release: + before_script: + - docker login -u $DOCKER_USER -p $DOCKER_PASS quay.io + image: docker:git + only: + - master + - tags + script: + - docker pull quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG} + - docker tag quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG} quay.io/quay/quay:${CI_COMMIT_REF_SLUG}-${CI_COMMIT_SHA} + - docker push quay.io/quay/quay:${CI_COMMIT_REF_SLUG}-${CI_COMMIT_SHA} + stage: docker_release + tags: + - kubernetes + variables: + DOCKER_DRIVER: overlay + DOCKER_HOST: tcp://docker-host.gitlab-runner.svc.cluster.local:2375 +karma-tests: + before_script: + - cd $QUAYDIR + - source $QUAYDIR/venv/bin/activate + image: quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG} + script: + - curl -Ss https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - + - echo "deb http://dl.google.com/linux/chrome/deb/ stable main" > /etc/apt/sources.list.d/google.list + - apt-get update -yqqq + - apt-get install -y google-chrome-stable + - yarn test + stage: unit_tests + tags: + - kubernetes + variables: + GIT_STRATEGY: none + PYTHONPATH: . + QUAYDIR: /quay-registry + TEST: 'true' +mysql: + before_script: + - cd $QUAYDIR + - source $QUAYDIR/venv/bin/activate + image: quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG} + script: + - sleep 30 + - alembic upgrade head + - PYTHONPATH="." TEST="true" py.test --timeout=7200 --verbose --show-count ./ --color=no --ignore=endpoints/appr/test/ -x + services: + - mysql:latest + stage: unit_tests + tags: + - kubernetes + variables: + GIT_STRATEGY: none + MYSQL_DATABASE: quay + MYSQL_PASSWORD: quay + MYSQL_ROOT_PASSWORD: quay + MYSQL_USER: quay + PYTHONPATH: . + QUAYDIR: /quay-registry + SKIP_DB_SCHEMA: 'true' + TEST: 'true' + TEST_DATABASE_URI: mysql+pymysql://quay:quay@localhost/quay +postgres: + before_script: + - cd $QUAYDIR + - source $QUAYDIR/venv/bin/activate + image: quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG} + script: + - sleep 30 + - alembic upgrade head + - PYTHONPATH="." TEST="true" py.test --timeout=7200 --verbose --show-count ./ --color=no --ignore=endpoints/appr/test/ -x + services: + - postgres:9.6 + stage: unit_tests + tags: + - kubernetes + variables: + GIT_STRATEGY: none + POSTGRES_PASSWORD: quay + POSTGRES_USER: quay + PYTHONPATH: . + QUAYDIR: /quay-registry + SKIP_DB_SCHEMA: 'true' + TEST: 'true' + TEST_DATABASE_URI: postgresql://quay:quay@localhost/quay +registry-tests: + before_script: + - cd $QUAYDIR + - source $QUAYDIR/venv/bin/activate + image: quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG} + script: + - py.test --timeout=7200 --verbose --show-count ./test/registry_tests.py --color=no -x + stage: unit_tests + tags: + - kubernetes + variables: + GIT_STRATEGY: none + PYTHONPATH: . + QUAYDIR: /quay-registry + TEST: 'true' +stages: +- docker_base +- docker_build +- unit_tests +- integration +- docker_release +- teardown +unit-tests: + before_script: + - cd $QUAYDIR + - source $QUAYDIR/venv/bin/activate + image: quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG} + script: + - py.test --timeout=7200 --verbose --show-count ./ --color=no -x + stage: unit_tests + tags: + - kubernetes + variables: + GIT_STRATEGY: none + PYTHONPATH: . + QUAYDIR: /quay-registry + TEST: 'true' +variables: + FAILFASTCI_NAMESPACE: quay diff --git a/.gitlab-ci/base_jobs.libsonnet b/.gitlab-ci/base_jobs.libsonnet new file mode 100644 index 000000000..d90a3eef1 --- /dev/null +++ b/.gitlab-ci/base_jobs.libsonnet @@ -0,0 +1,50 @@ +function(vars={}) + { + dockerBuild: { + // base job to manage containers (build / push) + variables: { + DOCKER_DRIVER: "overlay", + DOCKER_HOST: "tcp://docker-host.gitlab-runner.svc.cluster.local:2375" + }, + + image: "docker:git", + before_script: [ + "docker login -u $DOCKER_USER -p $DOCKER_PASS quay.io", + ], + + tags: [ + "kubernetes", + ], + }, + + QuayTest: { + // base job to test the container + image: vars.images.quayci.name, + variables: { + TEST: "true", + PYTHONPATH: ".", + QUAYDIR: "/quay-registry", + GIT_STRATEGY: "none", + }, + before_script: [ + "cd $QUAYDIR", + "source $QUAYDIR/venv/bin/activate", + ], + tags: [ + "kubernetes", + ], + }, + + dbTest(scheme, image, env):: self.QuayTest { + variables+: { + SKIP_DB_SCHEMA: 'true', + TEST_DATABASE_URI: '%s://quay:quay@localhost/quay' % scheme, + } + env, + services: [image], + script: [ + "sleep 30", + "alembic upgrade head", + 'PYTHONPATH="." TEST="true" py.test --timeout=7200 --verbose --show-count ./ --color=no --ignore=endpoints/appr/test/ -x', + ], + }, + } diff --git a/.gitlab-ci/utils.libsonnet b/.gitlab-ci/utils.libsonnet new file mode 100644 index 000000000..73801c928 --- /dev/null +++ b/.gitlab-ci/utils.libsonnet @@ -0,0 +1,66 @@ +{ + local topSelf = self, + # Generate a sequence array from 1 to i + seq(i):: ( + [x for x in std.range(1, i)] + ), + + objectFieldsHidden(obj):: ( + std.setDiff(std.objectFieldsAll(obj), std.objectFields(obj)) + ), + + objectFlatten(obj):: ( + // Merge 1 level dict depth into toplevel + local visible = { [k]: obj[j][k] + for j in std.objectFieldsAll(obj) + for k in std.objectFieldsAll(obj[j]) }; + + visible + ), + + compact(array):: ( + [x for x in array if x != null] + ), + + objectValues(obj):: ( + local fields = std.objectFields(obj); + [obj[key] for key in fields] + ), + + objectMap(func, obj):: ( + local fields = std.objectFields(obj); + { [key]: func(obj[key]) for key in fields } + ), + + capitalize(str):: ( + std.char(std.codepoint(str[0]) - 32) + str[1:] + ), + + test: self.capitalize("test"), + + set(array):: + { [key]: key for key in array }, + + containerName(repo, tag):: "%s:%s" % [repo, tag], + + ci: { + + mergeJob(base_job, jobs, stage=null):: { + [job_name]: base_job + jobs[job_name] + + if stage != null then { stage: stage } else {} + for job_name in std.objectFields(jobs) + }, + + only(key):: ( + if key == "master" + then { only: ['master', 'tags'] } + else { only: ['branches'] } + ), + + setManual(key, values):: ( + if std.objectHas(topSelf.set(values), key) + then { when: 'manual' } + else { only: ['branches'] } + ), + }, +} diff --git a/.gitlab-ci/vars.libsonnet b/.gitlab-ci/vars.libsonnet new file mode 100644 index 000000000..b47e2cd9f --- /dev/null +++ b/.gitlab-ci/vars.libsonnet @@ -0,0 +1,27 @@ +local utils = import "utils.libsonnet"; + +{ + global: { + // .gitlab-ci.yaml top `variables` key + FAILFASTCI_NAMESPACE: "quay", + }, + + // internal variables + images: { + // Quay initial image, used in the FROM clause + base: { repo: "quay.io/quay/quay-base", tag: "latest", + name: utils.containerName(self.repo, self.tag), + }, + + // @TODO(ant31) release should use quay/quay + // release is a copy of the quayci image to the 'prod' repository + release: { repo: "quay.io/quay/quay", + tag: "${CI_COMMIT_REF_SLUG}-${CI_COMMIT_SHA}", + name: utils.containerName(self.repo, self.tag), + }, + + quayci: { repo: "quay.io/quay/quay-ci", tag: "${CI_COMMIT_REF_SLUG}", + name: utils.containerName(self.repo, self.tag), + }, + }, +} diff --git a/CHANGELOG.md b/CHANGELOG.md index fd41424aa..943dc5779 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,26 @@ +### v2.4.0 + +- Added: Kubernetes Applications Support +- Added: Full-page search UI (#2529) +- Added: Always generate V2 manifests for tag operations in UI (#2608) +- Added: Option to enable public repositories in v2 catalog API (#2654) +- Added: Disable repository notifications after 3 failures (#2652) +- Added: Remove requirement for flash for copy button in UI (#2667) + +- Fixed: Upgrade support for Markdown (#2624) +- Fixed: Kubernetes secret generation with secrets with CAPITAL names (#2640) +- Fixed: Content-Length reporting on HEAD requests (#2616) +- Fixed: Use configured email address as the sender in email notifications (#2635) +- Fixed: Better peformance on permissions lookup (#2628) +- Fixed: Disable federated login for new users if user creation is disabled (#2623) +- Fixed: Show build logs timestamps by default (#2647) +- Fixed: Custom TLS certificates tooling in superuser panel under Kubernetes (#2646, #2663) +- Fixed: Disable debug logs in superuser panel when under multiple instances (#2663) +- Fixed: External Notification Modal UI bug (#2650) +- Fixed: Security worker thrashing when security scanner not available +- Fixed: Torrent validation in superuser config panel (#2694) +- Fixed: Expensive database call in build badges (#2688) + ### v2.3.4 - Added: Always show tag expiration options in superuser panel diff --git a/Dockerfile b/Dockerfile index f2ead3c14..be7fba93a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,60 +1,10 @@ # vim:ft=dockerfile -FROM phusion/baseimage:0.9.19 +FROM quay.io/quay/quay-base:latest -ENV DEBIAN_FRONTEND noninteractive -ENV HOME /root +WORKDIR $QUAYDIR -# This is so we don't break http golang/go#17066 -# When Ubuntu has nginx >= 1.11.0 we can switch back. -RUN add-apt-repository ppa:nginx/development - -# Add Yarn repository until it is officially added to Ubuntu -RUN curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - -RUN echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list - -# Install system packages -RUN apt-get update && apt-get upgrade -y # 27APR2017 -RUN apt-get install -y \ - dnsmasq \ - g++ \ - gdb \ - gdebi-core \ - git \ - jpegoptim \ - libevent-2.0.5 \ - libevent-dev \ - libffi-dev \ - libfreetype6-dev \ - libgpgme11 \ - libgpgme11-dev \ - libjpeg62 \ - libjpeg62-dev \ - libjpeg8 \ - libldap-2.4-2 \ - libldap2-dev \ - libmagic1 \ - libpq-dev \ - libpq5 \ - libsasl2-dev \ - libsasl2-modules \ - monit \ - nginx \ - nodejs \ - optipng \ - openssl \ - python-dbg \ - python-dev \ - python-pip \ - python-virtualenv \ - yarn=0.22.0-1 \ - w3m - -# Install python dependencies -ADD requirements.txt requirements.txt -RUN virtualenv --distribute venv -RUN venv/bin/pip install -r requirements.txt # 07SEP2016 -RUN venv/bin/pip freeze +COPY requirements.txt requirements-tests.txt ./ # Check python dependencies for the GPL # Due to the following bug, pip results must be piped to a file before grepping: @@ -63,131 +13,43 @@ RUN cat requirements.txt | grep -v "^-e" | awk -F'==' '{print $1}' | xargs venv/ test -z $(cat pipinfo.txt | grep GPL | grep -v LGPL) && \ rm pipinfo.txt -# Install cfssl -RUN mkdir /gocode -ENV GOPATH /gocode -RUN curl -O https://storage.googleapis.com/golang/go1.6.linux-amd64.tar.gz && \ - tar -xvf go1.6.linux-amd64.tar.gz && \ - mv go /usr/local && \ - rm -rf go1.6.linux-amd64.tar.gz && \ - /usr/local/go/bin/go get -u github.com/cloudflare/cfssl/cmd/cfssl && \ - /usr/local/go/bin/go get -u github.com/cloudflare/cfssl/cmd/cfssljson && \ - cp /gocode/bin/cfssljson /bin/cfssljson && \ - cp /gocode/bin/cfssl /bin/cfssl && \ - rm -rf /gocode && rm -rf /usr/local/go - -# Install jwtproxy -RUN curl -L -o /usr/local/bin/jwtproxy https://github.com/coreos/jwtproxy/releases/download/v0.0.1/jwtproxy-linux-x64 -RUN chmod +x /usr/local/bin/jwtproxy - -# Install prometheus-aggregator -RUN curl -L -o /usr/local/bin/prometheus-aggregator https://github.com/coreos/prometheus-aggregator/releases/download/v0.0.1-alpha/prometheus-aggregator -RUN chmod +x /usr/local/bin/prometheus-aggregator +RUN virtualenv --distribute venv \ + && venv/bin/pip install -r requirements.txt \ + && venv/bin/pip install -r requirements-tests.txt \ + && venv/bin/pip freeze # Install front-end dependencies -RUN ln -s /usr/bin/nodejs /usr/bin/node -ADD package.json package.json -ADD tsconfig.json tsconfig.json -ADD webpack.config.js webpack.config.js -ADD typings.json typings.json -ADD yarn.lock yarn.lock -RUN yarn install --ignore-engines +# JS depedencies +COPY yarn.lock ./ +RUN yarn install --ignore-engines -# Add static files -ADD static static +# JS compile +COPY static static +COPY package.json tsconfig.json webpack.config.js tslint.json ./ +RUN yarn build \ + && jpegoptim static/img/**/*.jpg \ + && optipng -clobber -quiet static/img/**/*.png -# Run Webpack -RUN node_modules/.bin/webpack --progress - -# Run front-end tests -ARG RUN_TESTS=true -ENV RUN_TESTS ${RUN_TESTS} - -ADD karma.conf.js karma.conf.js -RUN if [ "$RUN_TESTS" = true ]; then \ - yarn test; \ - fi - -# Install Grunt and Grunt depenencies -RUN yarn global add grunt-cli -ADD grunt grunt -RUN cd grunt && yarn install - -# Run Grunt -RUN cd grunt && grunt - -# Optimize our images -ADD static/img static/img -RUN jpegoptim static/img/**/*.jpg -RUN optipng -clobber -quiet static/img/**/*.png - -RUN apt-get remove -y --auto-remove python-dev g++ libjpeg62-dev libevent-dev libldap2-dev libsasl2-dev libpq-dev libffi-dev libgpgme11-dev nodejs jpegoptim optipng w3m -RUN apt-get autoremove -y -RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* +COPY . . # Set up the init system -ADD conf/init/copy_config_files.sh /etc/my_init.d/ -ADD conf/init/doupdatelimits.sh /etc/my_init.d/ -ADD conf/init/copy_syslog_config.sh /etc/my_init.d/ -ADD conf/init/certs_create.sh /etc/my_init.d/ -ADD conf/init/certs_install.sh /etc/my_init.d/ -ADD conf/init/nginx_conf_create.sh /etc/my_init.d/ -ADD conf/init/runmigration.sh /etc/my_init.d/ -ADD conf/init/syslog-ng.conf /etc/syslog-ng/ -ADD conf/init/zz_boot.sh /etc/my_init.d/ -ADD conf/init/service/ /etc/service/ -RUN rm -rf /etc/service/syslog-forwarder +RUN mkdir -p /etc/my_init.d /etc/systlog-ng /usr/local/bin /etc/monit static/fonts static/ldn /usr/local/nginx/logs/ \ + && cp $QUAYCONF/init/*.sh /etc/my_init.d/ \ + && cp $QUAYCONF/init/syslog-ng.conf /etc/syslog-ng/ \ + && cp -r $QUAYCONF/init/service/* /etc/service \ + && cp $QUAYCONF/kill-buildmanager.sh /usr/local/bin/kill-buildmanager.sh \ + && cp $QUAYCONF/monitrc /etc/monit/monitrc \ + && chmod 0600 /etc/monit/monitrc \ + && cp $QUAYCONF/init/logrotate.conf /etc/logrotate.conf \ + && cp .git/HEAD GIT_HEAD \ + && rm -rf /etc/service/syslog-forwarder -ADD conf/kill-buildmanager.sh /usr/local/bin/kill-buildmanager.sh -ADD conf/monitrc /etc/monit/monitrc -RUN chmod 0600 /etc/monit/monitrc - -# remove after phusion/baseimage-docker#338 is fixed -ADD conf/init/logrotate.conf /etc/logrotate.conf - -# Download any external libs. -RUN mkdir static/fonts static/ldn -ADD external_libraries.py external_libraries.py -RUN venv/bin/python -m external_libraries -RUN mkdir -p /usr/local/nginx/logs/ - -# TODO(ssewell): only works on a detached head, make work with ref -ADD .git/HEAD GIT_HEAD - -# Add all of the files! -ADD . . -RUN pyclean . +RUN ln -s $QUAYCONF /conf # Cleanup any NPM-related stuff. -RUN rm -rf /root/.npm -RUN rm -rf /.npm -RUN rm -rf /usr/local/lib/node_modules -RUN rm -rf /root/node_modules -RUN rm -rf /node_modules -RUN rm -rf /grunt -RUN rm package.json yarn.lock - -# Run the tests -ENV RUN_ACI_TESTS False -ADD requirements-tests.txt requirements-tests.txt - -RUN if [ "$RUN_TESTS" = true ]; then \ - venv/bin/pip install -r requirements-tests.txt ;\ - fi - - -RUN if [ "$RUN_TESTS" = true ]; then \ - TEST=true PYTHONPATH="." venv/bin/py.test --timeout=7200 --verbose \ - --show-count -x --color=no ./; \ - fi - -RUN if [ "$RUN_TESTS" = true ]; then \ - TEST=true PYTHONPATH="." venv/bin/py.test --timeout=7200 --verbose \ - --show-count -x --color=no test/registry_tests.py ; \ - fi - -RUN PYTHONPATH=. venv/bin/alembic heads | grep -E '^[0-9a-f]+ \(head\)$' > ALEMBIC_HEAD - -VOLUME ["/conf/stack", "/var/log", "/datastorage", "/tmp", "/conf/etcd"] - -EXPOSE 443 8443 80 +# RUN apt-get remove -y --auto-remove python-dev g++ libjpeg62-dev libevent-dev libldap2-dev libsasl2-dev libpq-dev libffi-dev libgpgme11-dev nodejs jpegoptim optipng w3m \ +# && apt-get autoremove -y \ +# && apt-get clean +# && rm -rf /root/.npm /.npm /usr/local/lib/node_modules /usr/share/yarn/node_modules \ +# /root/node_modules /node_modules /grunt +RUN PYTHONPATH=$QUAYPATH venv/bin/alembic heads | grep -E '^[0-9a-f]+ \(head\)$' > ALEMBIC_HEAD diff --git a/Dockerfile.old b/Dockerfile.old new file mode 100644 index 000000000..191a5cafe --- /dev/null +++ b/Dockerfile.old @@ -0,0 +1,189 @@ +# vim:ft=dockerfile + +FROM phusion/baseimage:0.9.19 + +ENV DEBIAN_FRONTEND noninteractive +ENV HOME /root +ENV QUAYCONF /quay/conf +ENV QUAYDIR /quay +ENV QUAYPATH "." + +RUN mkdir $QUAYDIR +WORKDIR $QUAYDIR + +# This is so we don't break http golang/go#17066 +# When Ubuntu has nginx >= 1.11.0 we can switch back. +RUN add-apt-repository ppa:nginx/development + +# Add Yarn repository until it is officially added to Ubuntu +RUN curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - +RUN echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list + +# Install system packages +RUN apt-get update && apt-get upgrade -y # 26MAY2017 +RUN apt-get install -y \ + dnsmasq \ + g++ \ + gdb \ + gdebi-core \ + git \ + jpegoptim \ + libevent-2.0.5 \ + libevent-dev \ + libffi-dev \ + libfreetype6-dev \ + libgpgme11 \ + libgpgme11-dev \ + libjpeg62 \ + libjpeg62-dev \ + libjpeg8 \ + libldap-2.4-2 \ + libldap2-dev \ + libmagic1 \ + libpq-dev \ + libpq5 \ + libsasl2-dev \ + libsasl2-modules \ + monit \ + nginx \ + nodejs \ + optipng \ + openssl \ + python-dbg \ + python-dev \ + python-pip \ + python-virtualenv \ + yarn=0.22.0-1 \ + w3m + +# Install python dependencies + +ADD requirements.txt requirements.txt +RUN virtualenv --distribute venv +RUN venv/bin/pip install -r requirements.txt # 07SEP2016 +RUN venv/bin/pip freeze + +# Check python dependencies for the GPL +# Due to the following bug, pip results must be piped to a file before grepping: +# https://github.com/pypa/pip/pull/3304 +RUN cat requirements.txt | grep -v "^-e" | awk -F'==' '{print $1}' | xargs venv/bin/pip --disable-pip-version-check show > pipinfo.txt && \ + test -z $(cat pipinfo.txt | grep GPL | grep -v LGPL) && \ + rm pipinfo.txt + +# Install cfssl +RUN mkdir /gocode +ENV GOPATH /gocode +RUN curl -O https://storage.googleapis.com/golang/go1.6.linux-amd64.tar.gz && \ + tar -xvf go1.6.linux-amd64.tar.gz && \ + mv go /usr/local && \ + rm -rf go1.6.linux-amd64.tar.gz && \ + /usr/local/go/bin/go get -u github.com/cloudflare/cfssl/cmd/cfssl && \ + /usr/local/go/bin/go get -u github.com/cloudflare/cfssl/cmd/cfssljson && \ + cp /gocode/bin/cfssljson /bin/cfssljson && \ + cp /gocode/bin/cfssl /bin/cfssl && \ + rm -rf /gocode && rm -rf /usr/local/go + +# Install jwtproxy +RUN curl -L -o /usr/local/bin/jwtproxy https://github.com/coreos/jwtproxy/releases/download/v0.0.1/jwtproxy-linux-x64 +RUN chmod +x /usr/local/bin/jwtproxy + +# Install prometheus-aggregator +RUN curl -L -o /usr/local/bin/prometheus-aggregator https://github.com/coreos/prometheus-aggregator/releases/download/v0.0.1-alpha/prometheus-aggregator +RUN chmod +x /usr/local/bin/prometheus-aggregator + +# Install front-end dependencies +RUN ln -s /usr/bin/nodejs /usr/bin/node +ADD package.json package.json +ADD tsconfig.json tsconfig.json +ADD webpack.config.js webpack.config.js +ADD yarn.lock yarn.lock +RUN yarn install --ignore-engines + +# Add static files +ADD static static + +# Run Webpack +RUN yarn build + +# Optimize our images +ADD static/img static/img +RUN jpegoptim static/img/**/*.jpg +RUN optipng -clobber -quiet static/img/**/*.png + +RUN apt-get remove -y --auto-remove python-dev g++ libjpeg62-dev libevent-dev libldap2-dev libsasl2-dev libpq-dev libffi-dev libgpgme11-dev nodejs jpegoptim optipng w3m +RUN apt-get autoremove -y +RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + +# Set up the init system +ADD conf/init/copy_config_files.sh /etc/my_init.d/ +ADD conf/init/doupdatelimits.sh /etc/my_init.d/ +ADD conf/init/copy_syslog_config.sh /etc/my_init.d/ +ADD conf/init/certs_create.sh /etc/my_init.d/ +ADD conf/init/certs_install.sh /etc/my_init.d/ +ADD conf/init/nginx_conf_create.sh /etc/my_init.d/ +ADD conf/init/runmigration.sh /etc/my_init.d/ +ADD conf/init/syslog-ng.conf /etc/syslog-ng/ +ADD conf/init/zz_boot.sh /etc/my_init.d/ +ADD conf/init/service/ /etc/service/ +RUN rm -rf /etc/service/syslog-forwarder + +ADD conf/kill-buildmanager.sh /usr/local/bin/kill-buildmanager.sh +ADD conf/monitrc /etc/monit/monitrc +RUN chmod 0600 /etc/monit/monitrc + +# remove after phusion/baseimage-docker#338 is fixed +ADD conf/init/logrotate.conf /etc/logrotate.conf + +# TODO(ssewell): only works on a detached head, make work with ref +ADD .git/HEAD GIT_HEAD + +# Add all of the files! +ADD . . +RUN mkdir static/fonts static/ldn + +# Download any external libs. +RUN venv/bin/python -m external_libraries +RUN mkdir -p /usr/local/nginx/logs/ + + +RUN pyclean . + +# Cleanup any NPM-related stuff. +RUN rm -rf /root/.npm +RUN rm -rf .npm +RUN rm -rf /usr/local/lib/node_modules +RUN rm -rf /usr/share/yarn/node_modules +RUN rm -rf /root/node_modules +RUN rm -rf node_modules +RUN rm -rf grunt +RUN rm package.json yarn.lock + +# Run the tests +ARG RUN_TESTS=true +ENV RUN_TESTS ${RUN_TESTS} + +ENV RUN_ACI_TESTS False +ADD requirements-tests.txt requirements-tests.txt + +RUN if [ "$RUN_TESTS" = true ]; then \ + venv/bin/pip install -r requirements-tests.txt ;\ + fi + +RUN if [ "$RUN_TESTS" = true ]; then \ + TEST=true PYTHONPATH="." venv/bin/py.test --timeout=7200 --verbose \ + --show-count -x --color=no ./ && rm -rf /var/tmp/; \ + fi + +RUN if [ "$RUN_TESTS" = true ]; then \ + TEST=true PYTHONPATH="." venv/bin/py.test --timeout=7200 --verbose \ + --show-count -x --color=no test/registry_tests.py && rm -rf /var/tmp/;\ + + fi + +RUN rm -rf /root/.cache + +RUN PYTHONPATH=. venv/bin/alembic heads | grep -E '^[0-9a-f]+ \(head\)$' > ALEMBIC_HEAD + +VOLUME ["/conf/stack", "/var/log", "/datastorage", "/tmp", "/conf/etcd"] + +EXPOSE 443 8443 80 diff --git a/README.md b/README.md index fbabe96f8..eb2cf11ad 100644 --- a/README.md +++ b/README.md @@ -55,12 +55,13 @@ High-level features include: 2. [Local Scripts](#local-scripts) 3. [Development inside Docker](#development-inside-docker) 4. [Adding a Python Dependency](#adding-a-python-dependency) - 5. [Running the Build System](#running-the-build-system) - 6. [To run individual tests](#to-run-individual-tests) + 5. [Adding a Yarn Dependency](#adding-a-yarn-dependency) + 6. [Running the Build System](#running-the-build-system) + 7. [To run individual tests](#to-run-individual-tests) 1. [Pytest](#pytest) 2. [Tox](#tox) - 7. [Running Migrations](#running-migrations) - 8. [How to run a build with tests for a push or merge](#how-to-run-a-build-with-tests-for-a-push-or-merge) + 8. [Running Migrations](#running-migrations) + 9. [How to run a build with tests for a push or merge](#how-to-run-a-build-with-tests-for-a-push-or-merge) 4. **[Documentation](#documentation)** 1. [Architecture at a Glance](#architecture-at-a-glance) 2. [Terminology](#terminology) @@ -95,6 +96,7 @@ docker-machine create -d virtualbox default eval "$(pyenv virtualenv-init -)" eval "$(pyenv init -)" eval $(/usr/local/bin/docker-machine env default) +export PYTHONPATH="." # Some installs don't have /usr/include, required for finding SASL header files # This command might fail because of the rootfs is read-only. Refer to the following: @@ -206,6 +208,23 @@ pip freeze > requirements.txt pyenv uninstall quay-deps ``` +### Adding a Yarn Dependency + +We use [Yarn](https://yarnpkg.com/) for frontend dependency management. The `yarn.lock` file ensures +that we get consistant version installs using the `yarn install` command. However, new dependencies +should be added using `yarn add `. This will add an entry to `package.json` and `yarn.lock`. + +Occassionally there will be merge conflicts with `yarn.lock`. To resolve them, use the following (taken +from [here](https://github.com/yarnpkg/yarn/issues/1776#issuecomment-269539948)). + +```sh +git rebase origin/master +git checkout origin/master -- yarn.lock +yarn install +git add yarn.lock +git rebase --continue +``` + ### Running the Build System TODO diff --git a/_init.py b/_init.py new file mode 100644 index 000000000..804323555 --- /dev/null +++ b/_init.py @@ -0,0 +1,35 @@ +import os +import re +import subprocess + + +ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) +CONF_DIR = os.getenv("QUAYCONF", os.path.join(ROOT_DIR, "conf/")) +STATIC_DIR = os.path.join(ROOT_DIR, 'static/') +STATIC_LDN_DIR = os.path.join(STATIC_DIR, 'ldn/') +STATIC_FONTS_DIR = os.path.join(STATIC_DIR, 'fonts/') +TEMPLATE_DIR = os.path.join(ROOT_DIR, 'templates/') + + +def _get_version_number_changelog(): + try: + with open(os.path.join(ROOT_DIR, 'CHANGELOG.md')) as f: + return re.search(r'(v[0-9]+\.[0-9]+\.[0-9]+)', f.readline()).group(0) + except IOError: + return '' + + +def _get_git_sha(): + if os.path.exists("GIT_HEAD"): + with open(os.path.join(ROOT_DIR, "GIT_HEAD")) as f: + return f.read() + else: + try: + return subprocess.check_output(["git", "rev-parse", "HEAD"]).strip()[0:8] + except (OSError, subprocess.CalledProcessError): + pass + return "unknown" + + +__version__ = _get_version_number_changelog() +__gitrev__ = _get_git_sha() diff --git a/app.py b/app.py index 0bc3da2d7..87ae5e234 100644 --- a/app.py +++ b/app.py @@ -1,3 +1,4 @@ +import hashlib import json import logging import os @@ -13,7 +14,8 @@ from jwkest.jwk import RSAKey from werkzeug.routing import BaseConverter import features - +from _init import CONF_DIR +from auth.auth_context import get_authenticated_user from avatars.avatars import Avatar from buildman.manager.buildcanceller import BuildCanceller from data import database @@ -31,6 +33,7 @@ from oauth.services.github import GithubOAuthService from oauth.services.gitlab import GitLabOAuthService from oauth.loginmanager import OAuthLoginManager from storage import Storage +from util.log import filter_logs from util import get_app_url from util.saas.analytics import Analytics from util.saas.useranalytics import UserAnalytics @@ -49,9 +52,10 @@ from util.tufmetadata.api import TUFMetadataAPI from util.security.instancekeys import InstanceKeys from util.security.signing import Signer -OVERRIDE_CONFIG_DIRECTORY = 'conf/stack/' -OVERRIDE_CONFIG_YAML_FILENAME = 'conf/stack/config.yaml' -OVERRIDE_CONFIG_PY_FILENAME = 'conf/stack/config.py' + +OVERRIDE_CONFIG_DIRECTORY = os.path.join(CONF_DIR, 'stack/') +OVERRIDE_CONFIG_YAML_FILENAME = os.path.join(CONF_DIR, 'stack/config.yaml') +OVERRIDE_CONFIG_PY_FILENAME = os.path.join(CONF_DIR, 'stack/config.py') OVERRIDE_CONFIG_KEY = 'QUAY_OVERRIDE_CONFIG' @@ -102,6 +106,10 @@ if (app.config['PREFERRED_URL_SCHEME'] == 'https' and # Load features from config. features.import_features(app.config) +CONFIG_DIGEST = hashlib.sha256(json.dumps(app.config, default=str)).hexdigest()[0:8] + +logger.debug("Loaded config", extra={"config": app.config}) + class RequestWithId(Request): request_gen = staticmethod(urn_generator(['request'])) @@ -114,26 +122,60 @@ class RequestWithId(Request): @app.before_request def _request_start(): logger.debug('Starting request: %s', request.path) + logger.info("request-start", extra={"request_id": request.request_id}) + + +DEFAULT_FILTER = lambda x: '[FILTERED]' +FILTERED_VALUES = [ + {'key': ['password'], 'fn': DEFAULT_FILTER}, + {'key': ['user', 'password'], 'fn': DEFAULT_FILTER}, + {'key': ['blob'], 'fn': lambda x: x[0:8]} +] @app.after_request -def _request_end(r): +def _request_end(resp): + jsonbody = request.get_json(force=True, silent=True) + values = request.values.to_dict() + + if jsonbody and not isinstance(jsonbody, dict): + jsonbody = {'_parsererror': jsonbody} + + if isinstance(values, dict): + filter_logs(values, FILTERED_VALUES) + + extra = { + "endpoint": request.endpoint, + "request_id" : request.request_id, + "remote_addr": request.remote_addr, + "http_method": request.method, + "original_url": request.url, + "path": request.path, + "parameters": values, + "json_body": jsonbody, + "confsha": CONFIG_DIGEST, + } + + if request.user_agent is not None: + extra["user-agent"] = request.user_agent.string + + user = get_authenticated_user() + + if user: + extra['user'] = {'email': user.email, + 'uuid': user.uuid, + 'org': user.organization, + 'robot': user.robot} + + logger.info("request-end", extra=extra) + logger.debug('Ending request: %s', request.path) - return r + return resp -class InjectingFilter(logging.Filter): - def filter(self, record): - if _request_ctx_stack.top is not None: - record.msg = '[%s] %s' % (request.request_id, record.msg) - return True root_logger = logging.getLogger() -# Add the request id filter to all handlers of the root logger -for handler in root_logger.handlers: - handler.addFilter(InjectingFilter()) - app.request_class = RequestWithId # Register custom converters. diff --git a/application.py b/application.py index 810109d1d..4d5f578a2 100644 --- a/application.py +++ b/application.py @@ -1,6 +1,8 @@ +import os import logging import logging.config +from util.log import logfile_path from app import app as application @@ -12,5 +14,5 @@ import secscan if __name__ == '__main__': - logging.config.fileConfig('conf/logging_debug.conf', disable_existing_loggers=False) + logging.config.fileConfig(logfile_path(debug=True), disable_existing_loggers=False) application.run(port=5000, debug=True, threaded=True, host='0.0.0.0') diff --git a/auth/registry_jwt_auth.py b/auth/registry_jwt_auth.py index a9e912e8b..4208a8462 100644 --- a/auth/registry_jwt_auth.py +++ b/auth/registry_jwt_auth.py @@ -7,10 +7,10 @@ from flask import request, url_for from flask_principal import identity_changed, Identity from app import app, get_app_url, instance_keys -from .auth_context import set_grant_context, get_grant_context -from .permissions import repository_read_grant, repository_write_grant, repository_admin_grant -from util.names import parse_namespace_repository +from auth.auth_context import (set_grant_context, get_grant_context) +from auth.permissions import repository_read_grant, repository_write_grant, repository_admin_grant from util.http import abort +from util.names import parse_namespace_repository from util.security.registry_jwt import (ANONYMOUS_SUB, decode_bearer_header, InvalidBearerTokenException) from data import model @@ -18,8 +18,10 @@ from data import model logger = logging.getLogger(__name__) + CONTEXT_KINDS = ['user', 'token', 'oauth'] + ACCESS_SCHEMA = { 'type': 'array', 'description': 'List of access granted to the subject', diff --git a/boot.py b/boot.py old mode 100644 new mode 100755 index 4fd826425..545db0ed4 --- a/boot.py +++ b/boot.py @@ -13,6 +13,7 @@ from app import app from data.model.release import set_region_release from util.config.database import sync_database_with_config from util.generatepresharedkey import generate_key +from _init import CONF_DIR @lru_cache(maxsize=1) @@ -42,7 +43,7 @@ def setup_jwt_proxy(): """ Creates a service key for quay to use in the jwtproxy and generates the JWT proxy configuration. """ - if os.path.exists('conf/jwtproxy_conf.yaml'): + if os.path.exists(os.path.join(CONF_DIR, 'jwtproxy_conf.yaml')): # Proxy is already setup. return @@ -65,16 +66,17 @@ def setup_jwt_proxy(): registry = audience + '/keys' security_issuer = app.config.get('SECURITY_SCANNER_ISSUER_NAME', 'security_scanner') - with open("conf/jwtproxy_conf.yaml.jnj") as f: + with open(os.path.join(CONF_DIR, 'jwtproxy_conf.yaml.jnj')) as f: template = Template(f.read()) rendered = template.render( + conf_dir=CONF_DIR, audience=audience, registry=registry, key_id=quay_key_id, security_issuer=security_issuer, ) - with open('conf/jwtproxy_conf.yaml', 'w') as f: + with open(os.path.join(CONF_DIR, 'jwtproxy_conf.yaml'), 'w') as f: f.write(rendered) diff --git a/buildman/manager/executor.py b/buildman/manager/executor.py index 585267c05..4bcff9697 100644 --- a/buildman/manager/executor.py +++ b/buildman/manager/executor.py @@ -19,7 +19,7 @@ from buildman.asyncutil import AsyncWrapper from container_cloud_config import CloudConfigContext from app import metric_queue, app from util.metrics.metricqueue import duration_collector_async - +from _init import ROOT_DIR logger = logging.getLogger(__name__) @@ -29,7 +29,7 @@ ONE_HOUR = 60*60 _TAG_RETRY_COUNT = 3 # Number of times to retry adding tags. _TAG_RETRY_SLEEP = 2 # Number of seconds to wait between tag retries. -ENV = Environment(loader=FileSystemLoader('buildman/templates')) +ENV = Environment(loader=FileSystemLoader(os.path.join(ROOT_DIR, "buildman/templates"))) TEMPLATE = ENV.get_template('cloudconfig.yaml') CloudConfigContext().populate_jinja_environment(ENV) diff --git a/conf/gunicorn_local.py b/conf/gunicorn_local.py index f95d85cc1..bf312a5ef 100644 --- a/conf/gunicorn_local.py +++ b/conf/gunicorn_local.py @@ -1,10 +1,16 @@ +import sys +import os +sys.path.append(os.path.join(os.path.dirname(__file__), "../")) + +from util.log import logfile_path from Crypto import Random + +logconfig = logfile_path(debug=True) bind = '0.0.0.0:5000' workers = 2 worker_class = 'gevent' daemon = False -logconfig = 'conf/logging_debug.conf' pythonpath = '.' preload_app = True diff --git a/conf/gunicorn_registry.py b/conf/gunicorn_registry.py index 9d7f080c1..dce4c7ac8 100644 --- a/conf/gunicorn_registry.py +++ b/conf/gunicorn_registry.py @@ -1,12 +1,19 @@ +import sys +import os +sys.path.append(os.path.join(os.path.dirname(__file__), "../")) + +from util.log import logfile_path from Crypto import Random + +logconfig = logfile_path(debug=False) bind = 'unix:/tmp/gunicorn_registry.sock' workers = 8 worker_class = 'gevent' -logconfig = 'conf/logging.conf' pythonpath = '.' preload_app = True + def post_fork(server, worker): # Reset the Random library to ensure it won't raise the "PID check failed." error after # gunicorn forks. diff --git a/conf/gunicorn_secscan.py b/conf/gunicorn_secscan.py index 4b16b4399..04b7cdce9 100644 --- a/conf/gunicorn_secscan.py +++ b/conf/gunicorn_secscan.py @@ -1,12 +1,19 @@ +import sys +import os +sys.path.append(os.path.join(os.path.dirname(__file__), "../")) + +from util.log import logfile_path from Crypto import Random + +logconfig = logfile_path(debug=False) bind = 'unix:/tmp/gunicorn_secscan.sock' workers = 2 worker_class = 'gevent' -logconfig = 'conf/logging.conf' pythonpath = '.' preload_app = True + def post_fork(server, worker): # Reset the Random library to ensure it won't raise the "PID check failed." error after # gunicorn forks. diff --git a/conf/gunicorn_verbs.py b/conf/gunicorn_verbs.py index ad432ee2a..9f2c5aef1 100644 --- a/conf/gunicorn_verbs.py +++ b/conf/gunicorn_verbs.py @@ -1,12 +1,20 @@ +import sys +import os +sys.path.append(os.path.join(os.path.dirname(__file__), "../")) + +from util.log import logfile_path from Crypto import Random + +logconfig = logfile_path(debug=False) + bind = 'unix:/tmp/gunicorn_verbs.sock' workers = 4 -logconfig = 'conf/logging.conf' pythonpath = '.' preload_app = True timeout = 2000 # Because sync workers + def post_fork(server, worker): # Reset the Random library to ensure it won't raise the "PID check failed." error after # gunicorn forks. diff --git a/conf/gunicorn_web.py b/conf/gunicorn_web.py index b6a5ddbcd..411ae1190 100644 --- a/conf/gunicorn_web.py +++ b/conf/gunicorn_web.py @@ -1,9 +1,16 @@ +import sys +import os +sys.path.append(os.path.join(os.path.dirname(__file__), "../")) + +from util.log import logfile_path from Crypto import Random + +logconfig = logfile_path(debug=False) + bind = 'unix:/tmp/gunicorn_web.sock' workers = 2 worker_class = 'gevent' -logconfig = 'conf/logging.conf' pythonpath = '.' preload_app = True diff --git a/conf/init/certs_create.sh b/conf/init/certs_create.sh index d03b9da4d..888803580 100755 --- a/conf/init/certs_create.sh +++ b/conf/init/certs_create.sh @@ -1,8 +1,10 @@ #! /bin/bash set -e - +QUAYPATH=${QUAYPATH:-"."} +QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"} +cd ${QUAYDIR:-"/"} # Create certs for jwtproxy to mitm outgoing TLS connections echo '{"CN":"CA","key":{"algo":"rsa","size":2048}}' | cfssl gencert -initca - | cfssljson -bare mitm -cp mitm-key.pem /conf/mitm.key -cp mitm.pem /conf/mitm.cert +cp mitm-key.pem $QUAYCONF/mitm.key +cp mitm.pem $QUAYCONF/mitm.cert cp mitm.pem /usr/local/share/ca-certificates/mitm.crt diff --git a/conf/init/certs_install.sh b/conf/init/certs_install.sh index 9f440d8a6..0db7a5c4d 100755 --- a/conf/init/certs_install.sh +++ b/conf/init/certs_install.sh @@ -1,27 +1,39 @@ #! /bin/bash set -e +QUAYPATH=${QUAYPATH:-"."} +QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"} + +cd ${QUAYDIR:-"/"} # Add the custom LDAP certificate -if [ -e /conf/stack/ldap.crt ] +if [ -e $QUAYCONF/stack/ldap.crt ] then - cp /conf/stack/ldap.crt /usr/local/share/ca-certificates/ldap.crt + cp $QUAYCONF/stack/ldap.crt /usr/local/share/ca-certificates/ldap.crt fi # Add extra trusted certificates (as a directory) -if [ -d /conf/stack/extra_ca_certs ]; then - if test "$(ls -A "/conf/stack/extra_ca_certs")"; then - echo "Installing extra certificates found in /conf/stack/extra_ca_certs directory" - cp /conf/stack/extra_ca_certs/* /usr/local/share/ca-certificates/ - cat /conf/stack/extra_ca_certs/* >> /venv/lib/python2.7/site-packages/requests/cacert.pem +if [ -d $QUAYCONF/stack/extra_ca_certs ]; then + if test "$(ls -A "$QUAYCONF/stack/extra_ca_certs")"; then + echo "Installing extra certificates found in $QUAYCONF/stack/extra_ca_certs directory" + cp $QUAYCONF/stack/extra_ca_certs/* /usr/local/share/ca-certificates/ + cat $QUAYCONF/stack/extra_ca_certs/* >> venv/lib/python2.7/site-packages/requests/cacert.pem fi fi # Add extra trusted certificates (as a file) -if [ -f /conf/stack/extra_ca_certs ]; then - echo "Installing extra certificates found in /conf/stack/extra_ca_certs file" - csplit -z -f /usr/local/share/ca-certificates/extra-ca- /conf/stack/extra_ca_certs '/-----BEGIN CERTIFICATE-----/' '{*}' - cat /conf/stack/extra_ca_certs >> /venv/lib/python2.7/site-packages/requests/cacert.pem +if [ -f $QUAYCONF/stack/extra_ca_certs ]; then + echo "Installing extra certificates found in $QUAYCONF/stack/extra_ca_certs file" + csplit -z -f /usr/local/share/ca-certificates/extra-ca- $QUAYCONF/stack/extra_ca_certs '/-----BEGIN CERTIFICATE-----/' '{*}' + cat $QUAYCONF/stack/extra_ca_certs >> venv/lib/python2.7/site-packages/requests/cacert.pem fi +# Add extra trusted certificates (prefixed) +for f in $(find $QUAYCONF/stack/ -maxdepth 1 -type f -name "extra_ca*") +do + echo "Installing extra cert $f" + cp "$f" /usr/local/share/ca-certificates/ + cat "$f" >> venv/lib/python2.7/site-packages/requests/cacert.pem +done + # Update all CA certificates. update-ca-certificates diff --git a/conf/init/copy_config_files.sh b/conf/init/copy_config_files.sh index ceeebdb37..0aef306ef 100755 --- a/conf/init/copy_config_files.sh +++ b/conf/init/copy_config_files.sh @@ -1,11 +1,16 @@ #! /bin/sh +QUAYPATH=${QUAYPATH:-"."} +QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"} -if [ -e /conf/stack/robots.txt ] +cd ${QUAYDIR:-"/"} + + +if [ -e $QUAYCONF/stack/robots.txt ] then - cp /conf/stack/robots.txt /templates/robots.txt + cp $QUAYCONF/stack/robots.txt $QUAYPATH/templates/robots.txt fi -if [ -e /conf/stack/favicon.ico ] +if [ -e $QUAYCONF/stack/favicon.ico ] then - cp /conf/stack/favicon.ico /static/favicon.ico + cp $QUAYCONF/stack/favicon.ico $QUAYPATH/static/favicon.ico fi \ No newline at end of file diff --git a/conf/init/copy_syslog_config.sh b/conf/init/copy_syslog_config.sh index 7acd62b6b..853bed35f 100755 --- a/conf/init/copy_syslog_config.sh +++ b/conf/init/copy_syslog_config.sh @@ -1,6 +1,10 @@ #! /bin/sh +QUAYPATH=${QUAYPATH:-"."} +QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"} -if [ -e /conf/stack/syslog-ng-extra.conf ] +cd ${QUAYDIR:-"/"} + +if [ -e $QUAYCONF/stack/syslog-ng-extra.conf ] then - cp /conf/stack/syslog-ng-extra.conf /etc/syslog-ng/conf.d/ + cp $QUAYCONF/stack/syslog-ng-extra.conf /etc/syslog-ng/conf.d/ fi diff --git a/conf/init/nginx_conf_create.py b/conf/init/nginx_conf_create.py new file mode 100644 index 000000000..94826f59c --- /dev/null +++ b/conf/init/nginx_conf_create.py @@ -0,0 +1,51 @@ +import os +import os.path + +import yaml +import jinja2 + +QUAYPATH = os.getenv("QUAYPATH", ".") +QUAYDIR = os.getenv("QUAYDIR", "/") +QUAYCONF_DIR = os.getenv("QUAYCONF", os.path.join(QUAYDIR, QUAYPATH, "conf")) +STATIC_DIR = os.path.join(QUAYDIR, 'static/') + +def write_config(filename, **kwargs): + with open(filename + ".jnj") as f: + template = jinja2.Template(f.read()) + rendered = template.render(kwargs) + + with open(filename, 'w') as f: + f.write(rendered) + + +def generate_nginx_config(): + """ + Generates nginx config from the app config + """ + use_https = os.path.exists(os.path.join(QUAYCONF_DIR, 'stack/ssl.key')) + write_config(os.path.join(QUAYCONF_DIR, 'nginx/nginx.conf'), use_https=use_https) + + +def generate_server_config(config): + """ + Generates server config from the app config + """ + config = config or {} + tuf_server = config.get('TUF_SERVER', None) + tuf_host = config.get('TUF_HOST', None) + signing_enabled = config.get('FEATURE_SIGNING', False) + maximum_layer_size = config.get('MAXIMUM_LAYER_SIZE', '20G') + + write_config( + os.path.join(QUAYCONF_DIR, 'nginx/server-base.conf'), tuf_server=tuf_server, tuf_host=tuf_host, + signing_enabled=signing_enabled, maximum_layer_size=maximum_layer_size, static_dir=STATIC_DIR) + + +if __name__ == "__main__": + if os.path.exists(os.path.join(QUAYCONF_DIR, 'stack/config.yaml')): + with open(os.path.join(QUAYCONF_DIR, 'stack/config.yaml'), 'r') as f: + config = yaml.load(f) + else: + config = None + generate_server_config(config) + generate_nginx_config() diff --git a/conf/init/nginx_conf_create.sh b/conf/init/nginx_conf_create.sh index c023f5e4f..670d14c04 100755 --- a/conf/init/nginx_conf_create.sh +++ b/conf/init/nginx_conf_create.sh @@ -1,51 +1,8 @@ -#!/venv/bin/python +#!/bin/bash -import os.path +QUAYDIR=${QUAYDIR:-"/"} +QUAYPATH=${QUAYPATH:-"."} +QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"} -import yaml -import jinja2 - - -def write_config(filename, **kwargs): - with open(filename + ".jnj") as f: - template = jinja2.Template(f.read()) - rendered = template.render(kwargs) - - with open(filename, 'w') as f: - f.write(rendered) - - -def generate_nginx_config(): - """ - Generates nginx config from the app config - """ - use_https = os.path.exists('conf/stack/ssl.key') - write_config('conf/nginx/nginx.conf', - use_https=use_https) - - -def generate_server_config(config): - """ - Generates server config from the app config - """ - config = config or {} - tuf_server = config.get('TUF_SERVER', None) - tuf_host = config.get('TUF_HOST', None) - signing_enabled = config.get('FEATURE_SIGNING', False) - maximum_layer_size = config.get('MAXIMUM_LAYER_SIZE', '20G') - - write_config('conf/nginx/server-base.conf', - tuf_server=tuf_server, - tuf_host=tuf_host, - signing_enabled=signing_enabled, - maximum_layer_size=maximum_layer_size) - - -if __name__ == "__main__": - if os.path.exists('conf/stack/config.yaml'): - with open('conf/stack/config.yaml', 'r') as f: - config = yaml.load(f) - else: - config = None - generate_server_config(config) - generate_nginx_config() +cd $QUAYDIR +venv/bin/python $QUAYCONF/init/nginx_conf_create.py \ No newline at end of file diff --git a/conf/init/runmigration.sh b/conf/init/runmigration.sh index 8b006b745..01c722839 100755 --- a/conf/init/runmigration.sh +++ b/conf/init/runmigration.sh @@ -1,5 +1,6 @@ -#! /bin/bash +#!/bin/bash set -e +cd ${QUAYDIR:-"/"} # Run the database migration -PYTHONPATH=. venv/bin/alembic upgrade head \ No newline at end of file +PYTHONPATH=${QUAYPATH:-"."} venv/bin/alembic upgrade head \ No newline at end of file diff --git a/conf/init/service/blobuploadcleanupworker/run b/conf/init/service/blobuploadcleanupworker/run index 5f6f273ce..29759be69 100755 --- a/conf/init/service/blobuploadcleanupworker/run +++ b/conf/init/service/blobuploadcleanupworker/run @@ -2,7 +2,9 @@ echo 'Starting Blob upload cleanup worker' -cd / -venv/bin/python -m workers.blobuploadcleanupworker 2>&1 +QUAYPATH=${QUAYPATH:-"."} + +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/python -m workers.blobuploadcleanupworker.blobuploadcleanupworker 2>&1 echo 'Blob upload cleanup exited' \ No newline at end of file diff --git a/conf/init/service/buildlogsarchiver/run b/conf/init/service/buildlogsarchiver/run index df3d4b05f..f6b69f584 100755 --- a/conf/init/service/buildlogsarchiver/run +++ b/conf/init/service/buildlogsarchiver/run @@ -2,7 +2,8 @@ echo 'Starting build logs archiver worker' -cd / -venv/bin/python -m workers.buildlogsarchiver 2>&1 +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/python -m workers.buildlogsarchiver 2>&1 echo 'Diffs worker exited' \ No newline at end of file diff --git a/conf/init/service/buildmanager/run b/conf/init/service/buildmanager/run index c33ca96be..10eda0054 100755 --- a/conf/init/service/buildmanager/run +++ b/conf/init/service/buildmanager/run @@ -6,7 +6,9 @@ echo 'Starting internal build manager' monit # Run the build manager. -cd / +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} +export PYTHONPATH=$QUAYPATH exec venv/bin/python -m buildman.builder 2>&1 echo 'Internal build manager exited' \ No newline at end of file diff --git a/conf/init/service/chunkcleanupworker/run b/conf/init/service/chunkcleanupworker/run index 57c9c5aa8..a16307d5a 100755 --- a/conf/init/service/chunkcleanupworker/run +++ b/conf/init/service/chunkcleanupworker/run @@ -2,7 +2,8 @@ echo 'Starting chunk cleanup worker' -cd / -venv/bin/python -m workers.chunkcleanupworker 2>&1 +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/python -m workers.chunkcleanupworker 2>&1 echo 'Chunk cleanup worker exited' \ No newline at end of file diff --git a/conf/init/service/gcworker/run b/conf/init/service/gcworker/run index 6a843d4b8..1f892342a 100755 --- a/conf/init/service/gcworker/run +++ b/conf/init/service/gcworker/run @@ -2,7 +2,8 @@ echo 'Starting GC worker' -cd / -venv/bin/python -m workers.gcworker 2>&1 +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/python -m workers.gc.gcworker 2>&1 -echo 'Repository GC exited' \ No newline at end of file +echo 'Repository GC exited' diff --git a/conf/init/service/globalpromstats/run b/conf/init/service/globalpromstats/run index fab1258fc..cd31b67ea 100755 --- a/conf/init/service/globalpromstats/run +++ b/conf/init/service/globalpromstats/run @@ -2,7 +2,8 @@ echo 'Starting global prometheus stats worker' -cd / -venv/bin/python -m workers.globalpromstats +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/python -m workers.globalpromstats echo 'Global prometheus stats exited' diff --git a/conf/init/service/gunicorn_registry/run b/conf/init/service/gunicorn_registry/run index 3c88fd0e3..3b38ea155 100755 --- a/conf/init/service/gunicorn_registry/run +++ b/conf/init/service/gunicorn_registry/run @@ -2,7 +2,10 @@ echo 'Starting gunicon' -cd / -nice -n 10 venv/bin/gunicorn -c conf/gunicorn_registry.py registry:application +QUAYPATH=${QUAYPATH:-"."} +QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"} + +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH nice -n 10 venv/bin/gunicorn -c $QUAYCONF/gunicorn_registry.py registry:application echo 'Gunicorn exited' \ No newline at end of file diff --git a/conf/init/service/gunicorn_secscan/run b/conf/init/service/gunicorn_secscan/run index d78ebedcd..23f24bf7d 100755 --- a/conf/init/service/gunicorn_secscan/run +++ b/conf/init/service/gunicorn_secscan/run @@ -2,7 +2,10 @@ echo 'Starting gunicon' -cd / -venv/bin/gunicorn -c conf/gunicorn_secscan.py secscan:application +QUAYPATH=${QUAYPATH:-"."} +QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"} + +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/gunicorn -c $QUAYCONF/gunicorn_secscan.py secscan:application echo 'Gunicorn exited' \ No newline at end of file diff --git a/conf/init/service/gunicorn_verbs/run b/conf/init/service/gunicorn_verbs/run index d76a7adcf..eb7d7e35e 100755 --- a/conf/init/service/gunicorn_verbs/run +++ b/conf/init/service/gunicorn_verbs/run @@ -2,7 +2,10 @@ echo 'Starting gunicon' -cd / -nice -n 10 venv/bin/gunicorn -c conf/gunicorn_verbs.py verbs:application +QUAYPATH=${QUAYPATH:-"."} +QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"} + +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH nice -n 10 venv/bin/gunicorn -c $QUAYCONF/gunicorn_verbs.py verbs:application echo 'Gunicorn exited' \ No newline at end of file diff --git a/conf/init/service/gunicorn_web/run b/conf/init/service/gunicorn_web/run index 86d107618..76ed8edde 100755 --- a/conf/init/service/gunicorn_web/run +++ b/conf/init/service/gunicorn_web/run @@ -2,7 +2,10 @@ echo 'Starting gunicon' -cd / -venv/bin/gunicorn -c conf/gunicorn_web.py web:application +QUAYPATH=${QUAYPATH:-"."} +QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"} + +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/gunicorn -c $QUAYCONF/gunicorn_web.py web:application echo 'Gunicorn exited' \ No newline at end of file diff --git a/conf/init/service/jwtproxy/run b/conf/init/service/jwtproxy/run index 263169cde..7c77b1cd7 100755 --- a/conf/init/service/jwtproxy/run +++ b/conf/init/service/jwtproxy/run @@ -1,12 +1,16 @@ #! /bin/bash -cd / -if [ -f conf/jwtproxy_conf.yaml ]; +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH +QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"} + +if [ -f $QUAYCONF/jwtproxy_conf.yaml ]; then echo 'Starting jwtproxy' - /usr/local/bin/jwtproxy --config conf/jwtproxy_conf.yaml + /usr/local/bin/jwtproxy --config $QUAYCONF/jwtproxy_conf.yaml rm /tmp/jwtproxy_secscan.sock echo 'Jwtproxy exited' else - sleep 1 + sleep 1 fi diff --git a/conf/init/service/logrotateworker/run b/conf/init/service/logrotateworker/run index a99aa6ad3..57ffad5ff 100755 --- a/conf/init/service/logrotateworker/run +++ b/conf/init/service/logrotateworker/run @@ -2,7 +2,8 @@ echo 'Starting log rotation worker' -cd / -venv/bin/python -m workers.logrotateworker +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/python -m workers.logrotateworker echo 'Log rotation worker exited' diff --git a/conf/init/service/nginx/run b/conf/init/service/nginx/run index feb4f872d..85e4511f9 100755 --- a/conf/init/service/nginx/run +++ b/conf/init/service/nginx/run @@ -2,6 +2,11 @@ echo 'Starting nginx' -/usr/sbin/nginx -c /conf/nginx/nginx.conf +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH +QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"} + +/usr/sbin/nginx -c $QUAYCONF/nginx/nginx.conf echo 'Nginx exited' diff --git a/conf/init/service/notificationworker/run b/conf/init/service/notificationworker/run index b149d9f34..0da5cf4ad 100755 --- a/conf/init/service/notificationworker/run +++ b/conf/init/service/notificationworker/run @@ -2,7 +2,9 @@ echo 'Starting notification worker' -cd / -venv/bin/python -m workers.notificationworker +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} + +PYTHONPATH=$QUAYPATH venv/bin/python -m workers.notificationworker echo 'Notification worker exited' \ No newline at end of file diff --git a/conf/init/service/queuecleanupworker/run b/conf/init/service/queuecleanupworker/run index ba04d5019..96bdc88d5 100755 --- a/conf/init/service/queuecleanupworker/run +++ b/conf/init/service/queuecleanupworker/run @@ -2,7 +2,8 @@ echo 'Starting Queue cleanup worker' -cd / -venv/bin/python -m workers.queuecleanupworker 2>&1 +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/python -m workers.queuecleanupworker 2>&1 echo 'Repository Queue cleanup exited' \ No newline at end of file diff --git a/conf/init/service/repositoryactioncounter/run b/conf/init/service/repositoryactioncounter/run index 08e0e3164..d0aa9a748 100755 --- a/conf/init/service/repositoryactioncounter/run +++ b/conf/init/service/repositoryactioncounter/run @@ -2,7 +2,8 @@ echo 'Starting repository action count worker' -cd / -venv/bin/python -m workers.repositoryactioncounter 2>&1 +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/python -m workers.repositoryactioncounter 2>&1 echo 'Repository action worker exited' \ No newline at end of file diff --git a/conf/init/service/security_notification_worker/run b/conf/init/service/security_notification_worker/run index 83c94e686..d1dd24a07 100755 --- a/conf/init/service/security_notification_worker/run +++ b/conf/init/service/security_notification_worker/run @@ -2,7 +2,8 @@ echo 'Starting security scanner notification worker' -cd / -venv/bin/python -m workers.security_notification_worker 2>&1 +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/python -m workers.security_notification_worker 2>&1 echo 'Security scanner notification worker exited' diff --git a/conf/init/service/securityworker/run b/conf/init/service/securityworker/run index c40f9aa4b..4498cf00a 100755 --- a/conf/init/service/securityworker/run +++ b/conf/init/service/securityworker/run @@ -2,7 +2,8 @@ echo 'Starting security scanner worker' -cd / -venv/bin/python -m workers.securityworker 2>&1 +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/python -m workers.securityworker.securityworker 2>&1 echo 'Security scanner worker exited' diff --git a/conf/init/service/service_key_worker/run b/conf/init/service/service_key_worker/run index 20b578c24..470913439 100755 --- a/conf/init/service/service_key_worker/run +++ b/conf/init/service/service_key_worker/run @@ -2,7 +2,8 @@ echo 'Starting service key worker' -cd / -venv/bin/python -m workers.service_key_worker 2>&1 +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/python -m workers.service_key_worker 2>&1 echo 'Service key worker exited' diff --git a/conf/init/service/storagereplication/run b/conf/init/service/storagereplication/run index ed62731f8..1773070c6 100755 --- a/conf/init/service/storagereplication/run +++ b/conf/init/service/storagereplication/run @@ -2,7 +2,8 @@ echo 'Starting storage replication worker' -cd / -venv/bin/python -m workers.storagereplication 2>&1 +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/python -m workers.storagereplication 2>&1 echo 'Repository storage replication exited' \ No newline at end of file diff --git a/conf/init/service/teamsyncworker/run b/conf/init/service/teamsyncworker/run index 9f68ae897..17b45459f 100755 --- a/conf/init/service/teamsyncworker/run +++ b/conf/init/service/teamsyncworker/run @@ -2,7 +2,8 @@ echo 'Starting team synchronization worker' -cd / -venv/bin/python -m workers.teamsyncworker 2>&1 +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/python -m workers.teamsyncworker 2>&1 echo 'Team synchronization worker exited' \ No newline at end of file diff --git a/conf/init/zz_boot.sh b/conf/init/zz_boot.sh index ab760266b..abc20d3ca 100755 --- a/conf/init/zz_boot.sh +++ b/conf/init/zz_boot.sh @@ -1,3 +1,4 @@ #!/bin/bash +cd ${QUAYDIR:-"/"} -/venv/bin/python /boot.py +venv/bin/python ${QUAYPATH:-"."}/boot.py diff --git a/conf/jwtproxy_conf.yaml.jnj b/conf/jwtproxy_conf.yaml.jnj index 05f162400..7a626e8f5 100644 --- a/conf/jwtproxy_conf.yaml.jnj +++ b/conf/jwtproxy_conf.yaml.jnj @@ -2,8 +2,8 @@ jwtproxy: signer_proxy: enabled: true listen_addr: :8080 - ca_key_file: /conf/mitm.key - ca_crt_file: /conf/mitm.cert + ca_key_file: {{ conf_dir }}/mitm.key + ca_crt_file: {{ conf_dir }}/mitm.cert signer: issuer: quay @@ -13,7 +13,7 @@ jwtproxy: type: preshared options: key_id: {{ key_id }} - private_key_path: /conf/quay.pem + private_key_path: {{ conf_dir }}/quay.pem verifier_proxies: - enabled: true listen_addr: unix:/tmp/jwtproxy_secscan.sock diff --git a/conf/logging.conf b/conf/logging.conf index 317803a24..e38521b66 100644 --- a/conf/logging.conf +++ b/conf/logging.conf @@ -1,11 +1,11 @@ [loggers] -keys=root +keys=root,gunicorn.error,gunicorn.access [handlers] keys=console [formatters] -keys=generic +keys=generic,json [logger_root] level=INFO @@ -19,3 +19,18 @@ args=(sys.stdout, ) [formatter_generic] format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s class=logging.Formatter + +[formatter_json] +class=loghandler.JsonFormatter + +[logger_gunicorn.error] +level=ERROR +handlers=console +propagate=0 +qualname=gunicorn.error + +[logger_gunicorn.access] +handlers=console +propagate=0 +qualname=gunicorn.access +level=DEBUG diff --git a/conf/logging_debug.conf b/conf/logging_debug.conf index 3413f3035..0609a8e58 100644 --- a/conf/logging_debug.conf +++ b/conf/logging_debug.conf @@ -1,11 +1,11 @@ [loggers] -keys=root,boto +keys=root,boto,gunicorn.error,gunicorn.access [handlers] keys=console [formatters] -keys=generic +keys=generic,json [logger_root] level=DEBUG @@ -16,11 +16,26 @@ level=INFO handlers=console qualname=boto +[logger_gunicorn.access] +handlers=console +propagate=0 +qualname=gunicorn.access +level=DEBUG + [handler_console] class=StreamHandler formatter=generic args=(sys.stdout, ) +[logger_gunicorn.error] +level=ERROR +handlers=console +propagate=0 +qualname=gunicorn.error + [formatter_generic] format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s class=logging.Formatter + +[formatter_json] +class=loghandler.JsonFormatter diff --git a/conf/logging_debug_json.conf b/conf/logging_debug_json.conf new file mode 100644 index 000000000..8f0d51c64 --- /dev/null +++ b/conf/logging_debug_json.conf @@ -0,0 +1,41 @@ +[loggers] +keys=root,boto,gunicorn.error,gunicorn.access + +[handlers] +keys=console + +[formatters] +keys=generic,json + +[logger_root] +level=DEBUG +handlers=console + +[logger_boto] +level=INFO +handlers=console +qualname=boto + +[logger_gunicorn.access] +handlers=console +propagate=0 +qualname=gunicorn.access +level=DEBUG + +[handler_console] +class=StreamHandler +formatter=json +args=(sys.stdout, ) + +[logger_gunicorn.error] +level=ERROR +handlers=console +propagate=0 +qualname=gunicorn.error + +[formatter_generic] +format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s +class=logging.Formatter + +[formatter_json] +class=loghandler.JsonFormatter diff --git a/conf/logging_json.conf b/conf/logging_json.conf new file mode 100644 index 000000000..4d9536380 --- /dev/null +++ b/conf/logging_json.conf @@ -0,0 +1,36 @@ +[loggers] +keys=root,gunicorn.error,gunicorn.access + +[handlers] +keys=console + +[formatters] +keys=json,generic + +[logger_root] +level=INFO +handlers=console + +[handler_console] +class=StreamHandler +formatter=json +args=(sys.stdout, ) + +[formatter_generic] +format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s +class=logging.Formatter + +[formatter_json] +class=loghandler.JsonFormatter + +[logger_gunicorn.error] +level=ERROR +handlers=console +propagate=0 +qualname=gunicorn.error + +[logger_gunicorn.access] +handlers=console +propagate=0 +qualname=gunicorn.access +level=DEBUG diff --git a/conf/nginx/server-base.conf.jnj b/conf/nginx/server-base.conf.jnj index 7e12b0053..9115f04b1 100644 --- a/conf/nginx/server-base.conf.jnj +++ b/conf/nginx/server-base.conf.jnj @@ -166,11 +166,11 @@ location /c1/ { location /static/ { # checks for static file, if not found proxy to app - alias /static/; + alias {{static_dir}}; error_page 404 /404; } -error_page 502 /static/502.html; +error_page 502 {{static_dir}}/502.html; location ~ ^/b1/controller(/?)(.*) { proxy_pass http://build_manager_controller_server/$2; diff --git a/config.py b/config.py index 8ca7ea7d6..671e174bd 100644 --- a/config.py +++ b/config.py @@ -3,6 +3,8 @@ from uuid import uuid4 import os.path import requests +from _init import ROOT_DIR, CONF_DIR + def build_requests_session(): sess = requests.Session() @@ -45,7 +47,7 @@ class ImmutableConfig(object): # Status tag config STATUS_TAGS = {} for tag_name in ['building', 'failed', 'none', 'ready', 'cancelled']: - tag_path = os.path.join('buildstatus', tag_name + '.svg') + tag_path = os.path.join(ROOT_DIR, 'buildstatus', tag_name + '.svg') with open(tag_path) as tag_svg: STATUS_TAGS[tag_name] = tag_svg.read() @@ -263,6 +265,10 @@ class DefaultConfig(ImmutableConfig): # Feature Flag: Whether to enable support for App repositories. FEATURE_APP_REGISTRY = False + # Feature Flag: If set to true, the _catalog endpoint returns public repositories. Otherwise, + # only private repositories can be returned. + FEATURE_PUBLIC_CATALOG = False + # The namespace to use for library repositories. # Note: This must remain 'library' until Docker removes their hard-coded namespace for libraries. # See: https://github.com/docker/docker/blob/master/registry/session.go#L320 @@ -296,7 +302,7 @@ class DefaultConfig(ImmutableConfig): # System logs. SYSTEM_LOGS_PATH = "/var/log/" SYSTEM_LOGS_FILE = "/var/log/syslog" - SYSTEM_SERVICES_PATH = "conf/init/service/" + SYSTEM_SERVICES_PATH = os.path.join(CONF_DIR, "init/service/") # Allow registry pulls when unable to write to the audit log ALLOW_PULLS_WITHOUT_STRICT_LOGGING = False @@ -349,7 +355,7 @@ class DefaultConfig(ImmutableConfig): SECURITY_SCANNER_READONLY_FAILOVER_ENDPOINTS = [] # The indexing engine version running inside the security scanner. - SECURITY_SCANNER_ENGINE_VERSION_TARGET = 2 + SECURITY_SCANNER_ENGINE_VERSION_TARGET = 3 # The version of the API to use for the security scanner. SECURITY_SCANNER_API_VERSION = 'v1' @@ -400,11 +406,11 @@ class DefaultConfig(ImmutableConfig): INSTANCE_SERVICE_KEY_SERVICE = 'quay' # The location of the key ID file generated for this instance. - INSTANCE_SERVICE_KEY_KID_LOCATION = 'conf/quay.kid' + INSTANCE_SERVICE_KEY_KID_LOCATION = os.path.join(CONF_DIR, 'quay.kid') # The location of the private key generated for this instance. # NOTE: If changed, jwtproxy_conf.yaml.jnj must also be updated. - INSTANCE_SERVICE_KEY_LOCATION = 'conf/quay.pem' + INSTANCE_SERVICE_KEY_LOCATION = os.path.join(CONF_DIR, 'quay.pem') # This instance's service key expiration in minutes. INSTANCE_SERVICE_KEY_EXPIRATION = 120 diff --git a/data/database.py b/data/database.py index 457b9f77a..56d62e621 100644 --- a/data/database.py +++ b/data/database.py @@ -1006,6 +1006,7 @@ class RepositoryNotification(BaseModel): title = CharField(null=True) config_json = TextField() event_config_json = TextField(default='{}') + number_of_failures = IntegerField(default=0) class RepositoryAuthorizedEmail(BaseModel): diff --git a/data/interfaces/appr.py b/data/interfaces/appr.py index 03d7a1077..a8772ce18 100644 --- a/data/interfaces/appr.py +++ b/data/interfaces/appr.py @@ -10,8 +10,11 @@ from six import add_metaclass from app import storage, authentication from data import model, oci_model from data.database import Tag, Manifest, MediaType, Blob, Repository, Channel +from util.audit import track_and_log +from util.morecollections import AttrDict from util.names import parse_robot_username + class BlobDescriptor(namedtuple('Blob', ['mediaType', 'size', 'digest', 'urls'])): """ BlobDescriptor describes a blob with its mediatype, size and digest. A BlobDescriptor is used to retrieves the actual blob. @@ -55,10 +58,6 @@ class AppRegistryDataInterface(object): """ Interface that represents all data store interactions required by a App Registry. """ - @abstractmethod - def _application(self, package_name): - pass - @abstractmethod def list_applications(self, namespace=None, media_type=None, search=None, username=None, with_channels=False): @@ -175,6 +174,11 @@ class AppRegistryDataInterface(object): Raises: ChannelNotFound, PackageNotFound """ + @abstractmethod + def log_action(self, event_name, namespace_name, repo_name=None, analytics_name=None, + analytics_sample=1, **kwargs): + """ Logs an action to the audit log. """ + def _split_package_name(package): """ Returns the namespace and package-name """ @@ -200,6 +204,22 @@ class OCIAppModel(AppRegistryDataInterface): raise_package_not_found(package) return repo + def log_action(self, event_name, namespace_name, repo_name=None, analytics_name=None, + analytics_sample=1, metadata=None): + metadata = {} if metadata is None else metadata + + repo = None + if repo_name is not None: + db_repo = model.repository.get_repository(namespace_name, repo_name, + kind_filter='application') + repo = AttrDict({ + 'id': db_repo.id, + 'name': db_repo.name, + 'namespace_name': db_repo.namespace_user.username, + }) + track_and_log(event_name, repo, analytics_name=analytics_name, + analytics_sample=analytics_sample, **metadata) + def list_applications(self, namespace=None, media_type=None, search=None, username=None, with_channels=False): """ Lists all repositories that contain applications, with optional filtering to a specific @@ -248,7 +268,7 @@ class OCIAppModel(AppRegistryDataInterface): def create_application(self, package_name, visibility, owner): """ Create a new app repository, owner is the user who creates it """ ns, name = _split_package_name(package_name) - model.repository.create_repository(ns, name, owner, visibility, "application") + model.repository.create_repository(ns, name, owner, visibility, 'application') def application_exists(self, package_name): """ Create a new app repository, owner is the user who creates it """ diff --git a/data/migrations/versions/dc4af11a5f90_add_notification_number_of_failures_.py b/data/migrations/versions/dc4af11a5f90_add_notification_number_of_failures_.py new file mode 100644 index 000000000..ffde9d687 --- /dev/null +++ b/data/migrations/versions/dc4af11a5f90_add_notification_number_of_failures_.py @@ -0,0 +1,32 @@ +"""add notification number of failures column + +Revision ID: dc4af11a5f90 +Revises: 53e2ac668296 +Create Date: 2017-05-16 17:24:02.630365 + +""" + +# revision identifiers, used by Alembic. +revision = 'dc4af11a5f90' +down_revision = '53e2ac668296' + +import sqlalchemy as sa +from alembic import op + + +def upgrade(tables): + op.add_column('repositorynotification', sa.Column('number_of_failures', + sa.Integer(), + nullable=False, + server_default='0')) + op.bulk_insert(tables.logentrykind, [ + {'name': 'reset_repo_notification'}, + ]) + + +def downgrade(tables): + op.drop_column('repositorynotification', 'number_of_failures') + op.execute(tables + .logentrykind + .delete() + .where(tables.logentrykind.c.name == op.inline_literal('reset_repo_notification'))) diff --git a/data/model/_basequery.py b/data/model/_basequery.py index 4a7c44a0d..28b0b1952 100644 --- a/data/model/_basequery.py +++ b/data/model/_basequery.py @@ -54,9 +54,13 @@ def get_public_repo_visibility(): return Visibility.get(name='public') -@lru_cache(maxsize=3) def _lookup_team_role(name): - return TeamRole.get(name=name) + return _lookup_team_roles()[name] + + +@lru_cache(maxsize=1) +def _lookup_team_roles(): + return {role.name:role for role in TeamRole.select()} def filter_to_repos_for_user(query, username=None, namespace=None, repo_kind='image', diff --git a/data/model/blob.py b/data/model/blob.py index cd830d6cb..0a3f1a39f 100644 --- a/data/model/blob.py +++ b/data/model/blob.py @@ -88,6 +88,17 @@ def get_stale_blob_upload(stale_timespan): return None +def get_blob_upload_by_uuid(upload_uuid): + """ Loads the upload with the given UUID, if any. """ + try: + return (BlobUpload + .select() + .where(BlobUpload.uuid == upload_uuid) + .get()) + except BlobUpload.DoesNotExist: + return None + + def get_blob_upload(namespace, repo_name, upload_uuid): """ Load the upload which is already in progress. """ diff --git a/data/model/label.py b/data/model/label.py index 467eca86f..aef0a9449 100644 --- a/data/model/label.py +++ b/data/model/label.py @@ -62,7 +62,7 @@ def create_manifest_label(tag_manifest, key, value, source_type_name, media_type media_type_id = _get_media_type_id(media_type_name) if media_type_id is None: - raise InvalidMediaTypeException + raise InvalidMediaTypeException() source_type_id = _get_label_source_type_id(source_type_name) diff --git a/data/model/notification.py b/data/model/notification.py index 194e2975b..3769d52cd 100644 --- a/data/model/notification.py +++ b/data/model/notification.py @@ -1,9 +1,9 @@ import json -from data.model import InvalidNotificationException, db_transaction from data.database import (Notification, NotificationKind, User, Team, TeamMember, TeamRole, RepositoryNotification, ExternalNotificationEvent, Repository, - ExternalNotificationMethod, Namespace) + ExternalNotificationMethod, Namespace, db_for_update) +from data.model import InvalidNotificationException, db_transaction def create_notification(kind_name, target, metadata={}, lookup_path=None): @@ -125,6 +125,30 @@ def delete_matching_notifications(target, kind_name, **kwargs): notification.delete_instance() +def increment_notification_failure_count(notification_id): + """ This increments the number of failures by one """ + RepositoryNotification.update(number_of_failures=RepositoryNotification.number_of_failures + 1).where( + RepositoryNotification.id == notification_id).execute() + + +def reset_notification_number_of_failures(namespace_name, repository_name, uuid): + """ This resets the number of failures for a repo notification to 0 """ + try: + notification = RepositoryNotification.select().where(RepositoryNotification.uuid == uuid).get() + if (notification.repository.namespace_user.username != namespace_name or + notification.repository.name != repository_name): + raise InvalidNotificationException('No repository notification found with uuid: %s' % uuid) + reset_number_of_failures_to_zero(notification.id) + return notification + except RepositoryNotification.DoesNotExist: + return None + + +def reset_number_of_failures_to_zero(notification_id): + """ This resets the number of failures for a repo notification to 0 """ + RepositoryNotification.update(number_of_failures=0).where(RepositoryNotification.id == notification_id).execute() + + def create_repo_notification(repo, event_name, method_name, method_config, event_config, title=None): event = ExternalNotificationEvent.get(ExternalNotificationEvent.name == event_name) method = ExternalNotificationMethod.get(ExternalNotificationMethod.name == method_name) @@ -134,23 +158,34 @@ def create_repo_notification(repo, event_name, method_name, method_config, event event_config_json=json.dumps(event_config)) +def _base_get_notification(uuid): + """ This is a base query for get statements """ + return (RepositoryNotification + .select(RepositoryNotification, Repository, Namespace) + .join(Repository) + .join(Namespace, on=(Repository.namespace_user == Namespace.id)) + .where(RepositoryNotification.uuid == uuid)) + + +def get_enabled_notification(uuid): + """ This returns a notification with less than 3 failures """ + try: + return _base_get_notification(uuid).where(RepositoryNotification.number_of_failures < 3).get() + except RepositoryNotification.DoesNotExist: + raise InvalidNotificationException('No repository notification found with uuid: %s' % uuid) + + def get_repo_notification(uuid): try: - return (RepositoryNotification - .select(RepositoryNotification, Repository, Namespace) - .join(Repository) - .join(Namespace, on=(Repository.namespace_user == Namespace.id)) - .where(RepositoryNotification.uuid == uuid) - .get()) + return _base_get_notification(uuid).get() except RepositoryNotification.DoesNotExist: - raise InvalidNotificationException('No repository notification found with id: %s' % uuid) + raise InvalidNotificationException('No repository notification found with uuid: %s' % uuid) def delete_repo_notification(namespace_name, repository_name, uuid): found = get_repo_notification(uuid) - if (found.repository.namespace_user.username != namespace_name or - found.repository.name != repository_name): - raise InvalidNotificationException('No repository notifiation found with id: %s' % uuid) + if found.repository.namespace_user.username != namespace_name or found.repository.name != repository_name: + raise InvalidNotificationException('No repository notifiation found with uuid: %s' % uuid) found.delete_instance() return found diff --git a/data/model/organization.py b/data/model/organization.py index 429d2566d..463604915 100644 --- a/data/model/organization.py +++ b/data/model/organization.py @@ -106,15 +106,28 @@ def remove_organization_member(org, user_obj): TeamMember.delete().where(TeamMember.id << members).execute() -def get_organization_member_set(orgname): +def get_organization_member_set(org, include_robots=False, users_filter=None): + """ Returns the set of all member usernames under the given organization, with optional + filtering by robots and/or by a specific set of User objects. + """ Org = User.alias() org_users = (User .select(User.username) .join(TeamMember) .join(Team) - .join(Org, on=(Org.id == Team.organization)) - .where(Org.username == orgname) + .where(Team.organization == org) .distinct()) + + if not include_robots: + org_users = org_users.where(User.robot == False) + + if users_filter is not None: + ids_list = [u.id for u in users_filter if u is not None] + if not ids_list: + return set() + + org_users = org_users.where(User.id << ids_list) + return {user.username for user in org_users} diff --git a/data/model/repository.py b/data/model/repository.py index e94b8b4fa..2cd2b6976 100644 --- a/data/model/repository.py +++ b/data/model/repository.py @@ -181,45 +181,59 @@ def garbage_collect_repo(repo, extra_candidate_set=None): logger.debug('No candidate images for GC for repo: %s', repo.id) return True - candidates_orphans = list(candidate_orphan_image_set) + all_images_removed = set() + all_storage_id_whitelist = set() + all_unreferenced_candidates = set() - with db_transaction(): - Candidate = Image.alias() - Tagged = Image.alias() - ancestor_superset = Tagged.ancestors ** db_concat_func(Candidate.ancestors, Candidate.id, '/%') + # Remove any images directly referenced by tags, to prune the working set. + direct_referenced = (RepositoryTag + .select(RepositoryTag.image) + .where(RepositoryTag.repository == repo.id, + RepositoryTag.image << list(candidate_orphan_image_set))) + candidate_orphan_image_set.difference_update([t.image_id for t in direct_referenced]) - # We are going to compute all images which are being referenced in two ways: - # First, we will find all images which have their ancestor paths appear in - # another image. Secondly, we union in all of the candidate images which are - # directly referenced by a tag. This can be used in a subquery to directly - # find which candidates are being referenced without any client side - # computation or extra round trips. - ancestor_referenced = (Candidate - .select(Candidate.id) - .join(Tagged, on=ancestor_superset) - .join(RepositoryTag, on=(Tagged.id == RepositoryTag.image)) + # Iteratively try to remove images from the database. The only images we can remove are those + # that are not referenced by tags AND not the parents of other images. We continue removing images + # until no changes are found. + iteration = 0 + making_progress = True + while candidate_orphan_image_set and making_progress: + iteration = iteration + 1 + logger.debug('Starting iteration #%s for GC of repository %s with candidates: %s', iteration, + repo.id, candidate_orphan_image_set) + candidates_orphans = list(candidate_orphan_image_set) + + with db_transaction(): + # Any image directly referenced by a tag that still exists, cannot be GCed. + direct_referenced = (RepositoryTag + .select(RepositoryTag.image) .where(RepositoryTag.repository == repo.id, - Candidate.id << candidates_orphans)) + RepositoryTag.image << candidates_orphans)) - direct_referenced = (RepositoryTag - .select(RepositoryTag.image) - .where(RepositoryTag.repository == repo.id, - RepositoryTag.image << candidates_orphans)) + # Any image which is the parent of another image, cannot be GCed. + parent_referenced = (Image + .select(Image.parent) + .where(Image.repository == repo.id, + Image.parent << candidates_orphans)) - referenced_candidates = (direct_referenced | ancestor_referenced) + referenced_candidates = (direct_referenced | parent_referenced) - # We desire a few pieces of information from the database from the following - # query: all of the image ids which are associated with this repository, - # and the storages which are associated with those images. - unreferenced_candidates = (Image - .select(Image.id, Image.docker_image_id, - ImageStorage.id, ImageStorage.uuid) - .join(ImageStorage) - .where(Image.id << candidates_orphans, - ~(Image.id << referenced_candidates))) + # We desire a few pieces of information from the database from the following + # query: all of the image ids which are associated with this repository, + # and the storages which are associated with those images. + unreferenced_candidates = (Image + .select(Image.id, Image.docker_image_id, + ImageStorage.id, ImageStorage.uuid) + .join(ImageStorage) + .where(Image.id << candidates_orphans, + ~(Image.id << referenced_candidates))) + + image_ids_to_remove = [candidate.id for candidate in unreferenced_candidates] + making_progress = bool(len(image_ids_to_remove)) + if len(image_ids_to_remove) == 0: + # No more candidates to remove. + break - image_ids_to_remove = [candidate.id for candidate in unreferenced_candidates] - if len(image_ids_to_remove) > 0: logger.info('Cleaning up unreferenced images: %s', image_ids_to_remove) storage_id_whitelist = set([candidate.storage_id for candidate in unreferenced_candidates]) @@ -249,15 +263,22 @@ def garbage_collect_repo(repo, extra_candidate_set=None): logger.info('Could not GC images %s; will try again soon', image_ids_to_remove) return False + # Add the images to the removed set and remove them from the candidate set. + all_images_removed.update(image_ids_to_remove) + all_storage_id_whitelist.update(storage_id_whitelist) + all_unreferenced_candidates.update(unreferenced_candidates) + + candidate_orphan_image_set.difference_update(image_ids_to_remove) + # If any images were removed, GC any orphaned storages. - if len(image_ids_to_remove) > 0: - logger.info('Garbage collecting storage for images: %s', image_ids_to_remove) - storage_ids_removed = set(storage.garbage_collect_storage(storage_id_whitelist)) + if len(all_images_removed) > 0: + logger.info('Garbage collecting storage for images: %s', all_images_removed) + storage_ids_removed = set(storage.garbage_collect_storage(all_storage_id_whitelist)) # If any storages were removed and cleanup callbacks are registered, call them with # the images+storages removed. if storage_ids_removed and config.image_cleanup_callbacks: - image_storages_removed = [candidate for candidate in unreferenced_candidates + image_storages_removed = [candidate for candidate in all_unreferenced_candidates if candidate.storage_id in storage_ids_removed] for callback in config.image_cleanup_callbacks: callback(image_storages_removed) @@ -616,3 +637,14 @@ def list_popular_public_repos(action_count_threshold, time_span, repo_kind='imag .group_by(RepositoryActionCount.repository, Repository.name, Namespace.username) .having(fn.Sum(RepositoryActionCount.count) >= action_count_threshold) .tuples()) + + +def is_empty(namespace_name, repository_name): + """ Returns if the repository referenced by the given namespace and name is empty. If the repo + doesn't exist, returns True. + """ + try: + tag.list_repository_tags(namespace_name, repository_name).limit(1).get() + return False + except RepositoryTag.DoesNotExist: + return True diff --git a/data/model/test/test_gc.py b/data/model/test/test_gc.py new file mode 100644 index 000000000..975de1660 --- /dev/null +++ b/data/model/test/test_gc.py @@ -0,0 +1,622 @@ +import hashlib +import pytest +import time + +from mock import patch + +from app import storage +from contextlib import contextmanager +from playhouse.test_utils import assert_query_count + +from data import model, database +from data.database import Image, ImageStorage, DerivedStorageForImage, Label, TagManifestLabel, Blob +from test.fixtures import * + + +ADMIN_ACCESS_USER = 'devtable' +PUBLIC_USER = 'public' + +REPO = 'somerepo' + +def _set_tag_expiration_policy(namespace, expiration_s): + namespace_user = model.user.get_user(namespace) + model.user.change_user_tag_expiration(namespace_user, expiration_s) + + +@pytest.fixture() +def default_tag_policy(initialized_db): + _set_tag_expiration_policy(ADMIN_ACCESS_USER, 0) + _set_tag_expiration_policy(PUBLIC_USER, 0) + + +def create_image(docker_image_id, repository_obj, username): + preferred = storage.preferred_locations[0] + image = model.image.find_create_or_link_image(docker_image_id, repository_obj, username, {}, + preferred) + image.storage.uploading = False + image.storage.save() + + # Create derived images as well. + model.image.find_or_create_derived_storage(image, 'squash', preferred) + model.image.find_or_create_derived_storage(image, 'aci', preferred) + + # Add some torrent info. + try: + database.TorrentInfo.get(storage=image.storage) + except database.TorrentInfo.DoesNotExist: + model.storage.save_torrent_info(image.storage, 1, 'helloworld') + + # Add some additional placements to the image. + for location_name in ['local_eu']: + location = database.ImageStorageLocation.get(name=location_name) + + try: + database.ImageStoragePlacement.get(location=location, storage=image.storage) + except: + continue + + database.ImageStoragePlacement.create(location=location, storage=image.storage) + + return image.storage + + +def create_repository(namespace=ADMIN_ACCESS_USER, name=REPO, **kwargs): + user = model.user.get_user(namespace) + repo = model.repository.create_repository(namespace, name, user) + + # Populate the repository with the tags. + image_map = {} + for tag_name in kwargs: + image_ids = kwargs[tag_name] + parent = None + + for image_id in image_ids: + if not image_id in image_map: + image_map[image_id] = create_image(image_id, repo, namespace) + + v1_metadata = { + 'id': image_id, + } + if parent is not None: + v1_metadata['parent'] = parent.docker_image_id + + # Set the ancestors for the image. + parent = model.image.set_image_metadata(image_id, namespace, name, '', '', '', v1_metadata, + parent=parent) + + # Set the tag for the image. + tag_manifest, _ = model.tag.store_tag_manifest(namespace, name, tag_name, image_ids[-1], + 'sha:someshahere', '{}') + + # Add some labels to the tag. + model.label.create_manifest_label(tag_manifest, 'foo', 'bar', 'manifest') + model.label.create_manifest_label(tag_manifest, 'meh', 'grah', 'manifest') + + return repo + + +def gc_now(repository): + assert model.repository.garbage_collect_repo(repository) + + +def delete_tag(repository, tag, perform_gc=True): + model.tag.delete_tag(repository.namespace_user.username, repository.name, tag) + if perform_gc: + assert model.repository.garbage_collect_repo(repository) + + +def move_tag(repository, tag, docker_image_id): + model.tag.create_or_update_tag(repository.namespace_user.username, repository.name, tag, + docker_image_id) + assert model.repository.garbage_collect_repo(repository) + + +def assert_not_deleted(repository, *args): + for docker_image_id in args: + assert model.image.get_image_by_id(repository.namespace_user.username, repository.name, + docker_image_id) + + +def assert_deleted(repository, *args): + for docker_image_id in args: + try: + # Verify the image is missing when accessed by the repository. + model.image.get_image_by_id(repository.namespace_user.username, repository.name, + docker_image_id) + except model.DataModelException: + return + + assert False, 'Expected image %s to be deleted' % docker_image_id + + +def _get_dangling_storage_count(): + storage_ids = set([current.id for current in ImageStorage.select()]) + referenced_by_image = set([image.storage_id for image in Image.select()]) + referenced_by_derived = set([derived.derivative_id + for derived in DerivedStorageForImage.select()]) + + return len(storage_ids - referenced_by_image - referenced_by_derived) + + +def _get_dangling_label_count(): + label_ids = set([current.id for current in Label.select()]) + referenced_by_manifest = set([mlabel.label_id for mlabel in TagManifestLabel.select()]) + return len(label_ids - referenced_by_manifest) + + +@contextmanager +def assert_gc_integrity(expect_storage_removed=True): + """ Specialized assertion for ensuring that GC cleans up all dangling storages + and labels, invokes the callback for images removed and doesn't invoke the + callback for images *not* removed. + """ + # Add a callback for when images are removed. + removed_image_storages = [] + model.config.register_image_cleanup_callback(removed_image_storages.extend) + + # Store the number of dangling storages and labels. + existing_storage_count = _get_dangling_storage_count() + existing_label_count = _get_dangling_label_count() + yield + + # Ensure the number of dangling storages and labels has not changed. + updated_storage_count = _get_dangling_storage_count() + assert updated_storage_count == existing_storage_count + + updated_label_count = _get_dangling_label_count() + assert updated_label_count == existing_label_count + + # Ensure that for each call to the image+storage cleanup callback, the image and its + # storage is not found *anywhere* in the database. + for removed_image_and_storage in removed_image_storages: + with pytest.raises(Image.DoesNotExist): + Image.get(id=removed_image_and_storage.id) + + with pytest.raises(ImageStorage.DoesNotExist): + ImageStorage.get(id=removed_image_and_storage.storage_id) + + with pytest.raises(ImageStorage.DoesNotExist): + ImageStorage.get(uuid=removed_image_and_storage.storage.uuid) + + assert expect_storage_removed == bool(removed_image_storages) + + # Ensure all CAS storage is in the storage engine. + preferred = storage.preferred_locations[0] + for storage_row in ImageStorage.select(): + if storage_row.cas_path: + storage.get_content({preferred}, storage.blob_path(storage_row.content_checksum)) + + for blob_row in Blob.select(): + storage.get_content({preferred}, storage.blob_path(blob_row.digest)) + + +def test_has_garbage(default_tag_policy, initialized_db): + """ Remove all existing repositories, then add one without garbage, check, then add one with + garbage, and check again. + """ + # Delete all existing repos. + for repo in database.Repository.select().order_by(database.Repository.id): + assert model.repository.purge_repository(repo.namespace_user.username, repo.name) + + # Change the time machine expiration on the namespace. + (database.User + .update(removed_tag_expiration_s=1000000000) + .where(database.User.username == ADMIN_ACCESS_USER) + .execute()) + + # Create a repository without any garbage. + repository = create_repository(latest=['i1', 'i2', 'i3']) + + # Ensure that no repositories are returned by the has garbage check. + assert model.repository.find_repository_with_garbage(1000000000) is None + + # Delete a tag. + delete_tag(repository, 'latest', perform_gc=False) + + # There should still not be any repositories with garbage, due to time machine. + assert model.repository.find_repository_with_garbage(1000000000) is None + + # Change the time machine expiration on the namespace. + (database.User + .update(removed_tag_expiration_s=0) + .where(database.User.username == ADMIN_ACCESS_USER) + .execute()) + + # Now we should find the repository for GC. + repository = model.repository.find_repository_with_garbage(0) + assert repository is not None + assert repository.name == REPO + + # GC the repository. + assert model.repository.garbage_collect_repo(repository) + + # There should now be no repositories with garbage. + assert model.repository.find_repository_with_garbage(0) is None + + +def test_find_garbage_policy_functions(default_tag_policy, initialized_db): + with assert_query_count(1): + one_policy = model.repository.get_random_gc_policy() + all_policies = model.repository._get_gc_expiration_policies() + assert one_policy in all_policies + + +def test_one_tag(default_tag_policy, initialized_db): + """ Create a repository with a single tag, then remove that tag and verify that the repository + is now empty. """ + with assert_gc_integrity(): + repository = create_repository(latest=['i1', 'i2', 'i3']) + delete_tag(repository, 'latest') + assert_deleted(repository, 'i1', 'i2', 'i3') + + +def test_two_tags_unshared_images(default_tag_policy, initialized_db): + """ Repository has two tags with no shared images between them. """ + with assert_gc_integrity(): + repository = create_repository(latest=['i1', 'i2', 'i3'], other=['f1', 'f2']) + delete_tag(repository, 'latest') + assert_deleted(repository, 'i1', 'i2', 'i3') + assert_not_deleted(repository, 'f1', 'f2') + + +def test_two_tags_shared_images(default_tag_policy, initialized_db): + """ Repository has two tags with shared images. Deleting the tag should only remove the + unshared images. + """ + with assert_gc_integrity(): + repository = create_repository(latest=['i1', 'i2', 'i3'], other=['i1', 'f1']) + delete_tag(repository, 'latest') + assert_deleted(repository, 'i2', 'i3') + assert_not_deleted(repository, 'i1', 'f1') + + +def test_unrelated_repositories(default_tag_policy, initialized_db): + """ Two repositories with different images. Removing the tag from one leaves the other's + images intact. + """ + with assert_gc_integrity(): + repository1 = create_repository(latest=['i1', 'i2', 'i3'], name='repo1') + repository2 = create_repository(latest=['j1', 'j2', 'j3'], name='repo2') + + delete_tag(repository1, 'latest') + + assert_deleted(repository1, 'i1', 'i2', 'i3') + assert_not_deleted(repository2, 'j1', 'j2', 'j3') + + +def test_related_repositories(default_tag_policy, initialized_db): + """ Two repositories with shared images. Removing the tag from one leaves the other's + images intact. + """ + with assert_gc_integrity(): + repository1 = create_repository(latest=['i1', 'i2', 'i3'], name='repo1') + repository2 = create_repository(latest=['i1', 'i2', 'j1'], name='repo2') + + delete_tag(repository1, 'latest') + + assert_deleted(repository1, 'i3') + assert_not_deleted(repository2, 'i1', 'i2', 'j1') + + +def test_inaccessible_repositories(default_tag_policy, initialized_db): + """ Two repositories under different namespaces should result in the images being deleted + but not completely removed from the database. + """ + with assert_gc_integrity(): + repository1 = create_repository(namespace=ADMIN_ACCESS_USER, latest=['i1', 'i2', 'i3']) + repository2 = create_repository(namespace=PUBLIC_USER, latest=['i1', 'i2', 'i3']) + + delete_tag(repository1, 'latest') + assert_deleted(repository1, 'i1', 'i2', 'i3') + assert_not_deleted(repository2, 'i1', 'i2', 'i3') + + + +def test_many_multiple_shared_images(default_tag_policy, initialized_db): + """ Repository has multiple tags with shared images. Delete all but one tag. + """ + with assert_gc_integrity(): + repository = create_repository(latest=['i1', 'i2', 'i3', 'i4', 'i5', 'i6', 'i7', 'i8', 'j0'], + master=['i1', 'i2', 'i3', 'i4', 'i5', 'i6', 'i7', 'i8', 'j1']) + + # Delete tag latest. Should only delete j0, since it is not shared. + delete_tag(repository, 'latest') + + assert_deleted(repository, 'j0') + assert_not_deleted(repository, 'i1', 'i2', 'i3', 'i4', 'i5', 'i6', 'i7', 'i8', 'j1') + + # Delete tag master. Should delete the rest of the images. + delete_tag(repository, 'master') + + assert_deleted(repository, 'i1', 'i2', 'i3', 'i4', 'i5', 'i6', 'i7', 'i8', 'j1') + + +def test_multiple_shared_images(default_tag_policy, initialized_db): + """ Repository has multiple tags with shared images. Selectively deleting the tags, and + verifying at each step. + """ + with assert_gc_integrity(): + repository = create_repository(latest=['i1', 'i2', 'i3'], other=['i1', 'f1', 'f2'], + third=['t1', 't2', 't3'], fourth=['i1', 'f1']) + + # Current state: + # latest -> i3->i2->i1 + # other -> f2->f1->i1 + # third -> t3->t2->t1 + # fourth -> f1->i1 + + # Delete tag other. Should delete f2, since it is not shared. + delete_tag(repository, 'other') + assert_deleted(repository, 'f2') + assert_not_deleted(repository, 'i1', 'i2', 'i3', 't1', 't2', 't3', 'f1') + + # Current state: + # latest -> i3->i2->i1 + # third -> t3->t2->t1 + # fourth -> f1->i1 + + # Move tag fourth to i3. This should remove f1 since it is no longer referenced. + move_tag(repository, 'fourth', 'i3') + assert_deleted(repository, 'f1') + assert_not_deleted(repository, 'i1', 'i2', 'i3', 't1', 't2', 't3') + + # Current state: + # latest -> i3->i2->i1 + # third -> t3->t2->t1 + # fourth -> i3->i2->i1 + + # Delete tag 'latest'. This should do nothing since fourth is on the same branch. + delete_tag(repository, 'latest') + assert_not_deleted(repository, 'i1', 'i2', 'i3', 't1', 't2', 't3') + + # Current state: + # third -> t3->t2->t1 + # fourth -> i3->i2->i1 + + # Delete tag 'third'. This should remove t1->t3. + delete_tag(repository, 'third') + assert_deleted(repository, 't1', 't2', 't3') + assert_not_deleted(repository, 'i1', 'i2', 'i3') + + # Current state: + # fourth -> i3->i2->i1 + + # Add tag to i1. + move_tag(repository, 'newtag', 'i1') + assert_not_deleted(repository, 'i1', 'i2', 'i3') + + # Current state: + # fourth -> i3->i2->i1 + # newtag -> i1 + + # Delete tag 'fourth'. This should remove i2 and i3. + delete_tag(repository, 'fourth') + assert_deleted(repository, 'i2', 'i3') + assert_not_deleted(repository, 'i1') + + # Current state: + # newtag -> i1 + + # Delete tag 'newtag'. This should remove the remaining image. + delete_tag(repository, 'newtag') + assert_deleted(repository, 'i1') + + # Current state: + # (Empty) + + +def test_empty_gc(default_tag_policy, initialized_db): + with assert_gc_integrity(expect_storage_removed=False): + repository = create_repository(latest=['i1', 'i2', 'i3'], other=['i1', 'f1', 'f2'], + third=['t1', 't2', 't3'], fourth=['i1', 'f1']) + + gc_now(repository) + assert_not_deleted(repository, 'i1', 'i2', 'i3', 't1', 't2', 't3', 'f1', 'f2') + + +def test_time_machine_no_gc(default_tag_policy, initialized_db): + """ Repository has two tags with shared images. Deleting the tag should not remove any images + """ + with assert_gc_integrity(expect_storage_removed=False): + repository = create_repository(latest=['i1', 'i2', 'i3'], other=['i1', 'f1']) + _set_tag_expiration_policy(repository.namespace_user.username, 60*60*24) + + delete_tag(repository, 'latest') + assert_not_deleted(repository, 'i2', 'i3') + assert_not_deleted(repository, 'i1', 'f1') + + +def test_time_machine_gc(default_tag_policy, initialized_db): + """ Repository has two tags with shared images. Deleting the second tag should cause the images + for the first deleted tag to gc. + """ + with assert_gc_integrity(): + repository = create_repository(latest=['i1', 'i2', 'i3'], other=['i1', 'f1']) + + _set_tag_expiration_policy(repository.namespace_user.username, 1) + + delete_tag(repository, 'latest') + assert_not_deleted(repository, 'i2', 'i3') + assert_not_deleted(repository, 'i1', 'f1') + + time.sleep(2) + + # This will cause the images associated with latest to gc + delete_tag(repository, 'other') + assert_deleted(repository, 'i2', 'i3') + assert_not_deleted(repository, 'i1', 'f1') + + +def test_images_shared_storage(default_tag_policy, initialized_db): + """ Repository with two tags, both with the same shared storage. Deleting the first + tag should delete the first image, but *not* its storage. + """ + with assert_gc_integrity(expect_storage_removed=False): + repository = create_repository() + + # Add two tags, each with their own image, but with the same storage. + image_storage = model.storage.create_v1_storage(storage.preferred_locations[0]) + + first_image = Image.create(docker_image_id='i1', + repository=repository, storage=image_storage, + ancestors='/') + + second_image = Image.create(docker_image_id='i2', + repository=repository, storage=image_storage, + ancestors='/') + + model.tag.store_tag_manifest(repository.namespace_user.username, repository.name, + 'first', first_image.docker_image_id, + 'sha:someshahere', '{}') + + model.tag.store_tag_manifest(repository.namespace_user.username, repository.name, + 'second', second_image.docker_image_id, + 'sha:someshahere', '{}') + + # Delete the first tag. + delete_tag(repository, 'first') + assert_deleted(repository, 'i1') + assert_not_deleted(repository, 'i2') + + +def test_image_with_cas(default_tag_policy, initialized_db): + """ A repository with a tag pointing to an image backed by CAS. Deleting and GCing the tag + should result in the storage and its CAS data being removed. + """ + with assert_gc_integrity(expect_storage_removed=True): + repository = create_repository() + + # Create an image storage record under CAS. + content = 'hello world' + digest = 'sha256:' + hashlib.sha256(content).hexdigest() + preferred = storage.preferred_locations[0] + storage.put_content({preferred}, storage.blob_path(digest), content) + + image_storage = database.ImageStorage.create(content_checksum=digest, uploading=False) + location = database.ImageStorageLocation.get(name=preferred) + database.ImageStoragePlacement.create(location=location, storage=image_storage) + + # Ensure the CAS path exists. + assert storage.exists({preferred}, storage.blob_path(digest)) + + # Create the image and the tag. + first_image = Image.create(docker_image_id='i1', + repository=repository, storage=image_storage, + ancestors='/') + + model.tag.store_tag_manifest(repository.namespace_user.username, repository.name, + 'first', first_image.docker_image_id, + 'sha:someshahere1', '{}') + + assert_not_deleted(repository, 'i1') + + # Delete the tag. + delete_tag(repository, 'first') + assert_deleted(repository, 'i1') + + # Ensure the CAS path is gone. + assert not storage.exists({preferred}, storage.blob_path(digest)) + + +def test_images_shared_cas(default_tag_policy, initialized_db): + """ A repository, each two tags, pointing to the same image, which has image storage + with the same *CAS path*, but *distinct records*. Deleting the first tag should delete the + first image, and its storage, but not the file in storage, as it shares its CAS path. + """ + with assert_gc_integrity(expect_storage_removed=True): + repository = create_repository() + + # Create two image storage records with the same content checksum. + content = 'hello world' + digest = 'sha256:' + hashlib.sha256(content).hexdigest() + preferred = storage.preferred_locations[0] + storage.put_content({preferred}, storage.blob_path(digest), content) + + is1 = database.ImageStorage.create(content_checksum=digest, uploading=False) + is2 = database.ImageStorage.create(content_checksum=digest, uploading=False) + + location = database.ImageStorageLocation.get(name=preferred) + + database.ImageStoragePlacement.create(location=location, storage=is1) + database.ImageStoragePlacement.create(location=location, storage=is2) + + # Ensure the CAS path exists. + assert storage.exists({preferred}, storage.blob_path(digest)) + + # Create two images in the repository, and two tags, each pointing to one of the storages. + first_image = Image.create(docker_image_id='i1', + repository=repository, storage=is1, + ancestors='/') + + second_image = Image.create(docker_image_id='i2', + repository=repository, storage=is2, + ancestors='/') + + model.tag.store_tag_manifest(repository.namespace_user.username, repository.name, + 'first', first_image.docker_image_id, + 'sha:someshahere1', '{}') + + model.tag.store_tag_manifest(repository.namespace_user.username, repository.name, + 'second', second_image.docker_image_id, + 'sha:someshahere2', '{}') + + assert_not_deleted(repository, 'i1', 'i2') + + # Delete the first tag. + delete_tag(repository, 'first') + assert_deleted(repository, 'i1') + assert_not_deleted(repository, 'i2') + + # Ensure the CAS path still exists. + assert storage.exists({preferred}, storage.blob_path(digest)) + + +def test_images_shared_cas_with_new_blob_table(default_tag_policy, initialized_db): + """ A repository with a tag and image that shares its CAS path with a record in the new Blob + table. Deleting the first tag should delete the first image, and its storage, but not the + file in storage, as it shares its CAS path with the blob row. + """ + with assert_gc_integrity(expect_storage_removed=True): + repository = create_repository() + + # Create two image storage records with the same content checksum. + content = 'hello world' + digest = 'sha256:' + hashlib.sha256(content).hexdigest() + preferred = storage.preferred_locations[0] + storage.put_content({preferred}, storage.blob_path(digest), content) + + media_type = database.MediaType.get(name='text/plain') + + is1 = database.ImageStorage.create(content_checksum=digest, uploading=False) + database.Blob.create(digest=digest, size=0, media_type=media_type) + + location = database.ImageStorageLocation.get(name=preferred) + database.ImageStoragePlacement.create(location=location, storage=is1) + + # Ensure the CAS path exists. + assert storage.exists({preferred}, storage.blob_path(digest)) + + # Create the image in the repository, and the tag. + first_image = Image.create(docker_image_id='i1', + repository=repository, storage=is1, + ancestors='/') + + model.tag.store_tag_manifest(repository.namespace_user.username, repository.name, + 'first', first_image.docker_image_id, + 'sha:someshahere1', '{}') + + assert_not_deleted(repository, 'i1') + + # Delete the tag. + delete_tag(repository, 'first') + assert_deleted(repository, 'i1') + + # Ensure the CAS path still exists, as it is referenced by the Blob table + assert storage.exists({preferred}, storage.blob_path(digest)) + + +def test_purge_repo(app): + """ Test that app registers delete_metadata function on repository deletions """ + with patch('app.tuf_metadata_api') as mock_tuf: + model.repository.purge_repository("ns", "repo") + assert mock_tuf.delete_metadata.called_with("ns", "repo") diff --git a/data/model/test/test_repository.py b/data/model/test/test_repository.py index ee6bd0165..b5c6f741b 100644 --- a/data/model/test/test_repository.py +++ b/data/model/test/test_repository.py @@ -2,7 +2,7 @@ import pytest from peewee import IntegrityError -from data.model.repository import create_repository, purge_repository +from data.model.repository import create_repository, purge_repository, is_empty from test.fixtures import * def test_duplicate_repository_different_kinds(initialized_db): @@ -12,3 +12,10 @@ def test_duplicate_repository_different_kinds(initialized_db): # Try to create an app repo with the same name, which should fail. with pytest.raises(IntegrityError): create_repository('devtable', 'somenewrepo', None, repo_kind='application') + + +def test_is_empty(initialized_db): + create_repository('devtable', 'somenewrepo', None, repo_kind='image') + + assert is_empty('devtable', 'somenewrepo') + assert not is_empty('devtable', 'simple') diff --git a/data/oci_model/package.py b/data/oci_model/package.py index c91affe7a..61eae8e20 100644 --- a/data/oci_model/package.py +++ b/data/oci_model/package.py @@ -16,6 +16,9 @@ def list_packages_query(namespace=None, media_type=None, search_query=None, user username=username, search_fields=fields, limit=50) + if not repositories: + return [] + repo_query = (Repository .select(Repository, Namespace.username) .join(Namespace, on=(Repository.namespace_user == Namespace.id)) diff --git a/data/users/federated.py b/data/users/federated.py index 074818d34..3c61a5837 100644 --- a/data/users/federated.py +++ b/data/users/federated.py @@ -10,6 +10,8 @@ logger = logging.getLogger(__name__) UserInformation = namedtuple('UserInformation', ['username', 'email', 'id']) +DISABLED_MESSAGE = 'User creation is disabled. Please contact your adminstrator to gain access.' + class FederatedUsers(object): """ Base class for all federated users systems. """ @@ -96,7 +98,10 @@ class FederatedUsers(object): def _get_and_link_federated_user_info(self, username, email): db_user = model.user.verify_federated_login(self._federated_service, username) if not db_user: - # We must create the user in our db + # We must create the user in our db. Check to see if this is allowed. + if not features.USER_CREATION: + return (None, DISABLED_MESSAGE) + valid_username = None for valid_username in generate_valid_usernames(username): if model.user.is_username_unique(valid_username): diff --git a/data/users/test/test_users.py b/data/users/test/test_users.py new file mode 100644 index 000000000..007332880 --- /dev/null +++ b/data/users/test/test_users.py @@ -0,0 +1,36 @@ +import pytest + +from mock import patch + +from data.database import model +from data.users.federated import DISABLED_MESSAGE +from test.test_ldap import mock_ldap +from test.test_keystone_auth import fake_keystone + +from test.fixtures import * + +@pytest.mark.parametrize('auth_system_builder, user1, user2', [ + (mock_ldap, ('someuser', 'somepass'), ('testy', 'password')), + (fake_keystone, ('cool.user', 'password'), ('some.neat.user', 'foobar')), +]) +def test_auth_createuser(auth_system_builder, user1, user2, config, app): + with auth_system_builder() as auth: + # Login as a user and ensure a row in the database is created for them. + user, err = auth.verify_and_link_user(*user1) + assert err is None + assert user + + federated_info = model.user.lookup_federated_login(user, auth.federated_service) + assert federated_info is not None + + # Disable user creation. + with patch('features.USER_CREATION', False): + # Ensure that the existing user can login. + user_again, err = auth.verify_and_link_user(*user1) + assert err is None + assert user_again.id == user.id + + # Ensure that a new user cannot. + new_user, err = auth.verify_and_link_user(*user2) + assert new_user is None + assert err == DISABLED_MESSAGE diff --git a/endpoints/api/__init__.py b/endpoints/api/__init__.py index 2ffd359be..93df3a0eb 100644 --- a/endpoints/api/__init__.py +++ b/endpoints/api/__init__.py @@ -387,23 +387,6 @@ def define_json_response(schema_name): return wrapper -def disallow_under_trust(func): - """ Disallows the decorated operation for repository when it has trust enabled. - """ - @wraps(func) - def wrapper(self, *args, **kwargs): - if features.SIGNING: - namespace = args[0] - repository = args[1] - - repo = model.repository.get_repository(namespace, repository) - if repo is not None and repo.trust_enabled: - raise InvalidRequest('Cannot call this method on a repostory with trust enabled') - - return func(self, *args, **kwargs) - return wrapper - - import endpoints.api.billing import endpoints.api.build import endpoints.api.discovery @@ -429,4 +412,3 @@ import endpoints.api.trigger import endpoints.api.user import endpoints.api.secscan import endpoints.api.signing - diff --git a/endpoints/api/build.py b/endpoints/api/build.py index 2f9a96375..7a20c2872 100644 --- a/endpoints/api/build.py +++ b/endpoints/api/build.py @@ -19,8 +19,7 @@ from data.buildlogs import BuildStatusRetrievalError from endpoints.api import (RepositoryParamResource, parse_args, query_param, nickname, resource, require_repo_read, require_repo_write, validate_json_request, ApiResource, internal_only, format_date, api, path_param, - require_repo_admin, abort, disallow_for_app_repositories, - disallow_under_trust) + require_repo_admin, abort, disallow_for_app_repositories) from endpoints.building import start_build, PreparedBuild, MaximumBuildsQueuedException from endpoints.exception import Unauthorized, NotFound, InvalidRequest from util.names import parse_robot_username @@ -226,7 +225,6 @@ class RepositoryBuildList(RepositoryParamResource): @require_repo_write @nickname('requestRepoBuild') @disallow_for_app_repositories - @disallow_under_trust @validate_json_request('RepositoryBuildRequest') def post(self, namespace, repository): """ Request that a repository be built and pushed from the specified input. """ @@ -363,7 +361,6 @@ class RepositoryBuildResource(RepositoryParamResource): @require_repo_admin @nickname('cancelRepoBuild') - @disallow_under_trust @disallow_for_app_repositories def delete(self, namespace, repository, build_uuid): """ Cancels a repository build. """ diff --git a/endpoints/api/globalmessages.py b/endpoints/api/globalmessages.py index b27683a17..d6b491d2f 100644 --- a/endpoints/api/globalmessages.py +++ b/endpoints/api/globalmessages.py @@ -58,6 +58,11 @@ class GlobalUserMessages(ApiResource): 'message': { 'type': 'object', 'description': 'A single message', + 'required': [ + 'content', + 'media_type', + 'severity', + ], 'properties': { 'content': { 'type': 'string', diff --git a/endpoints/api/manifest.py b/endpoints/api/manifest.py index e96283f7f..66716da3f 100644 --- a/endpoints/api/manifest.py +++ b/endpoints/api/manifest.py @@ -10,6 +10,7 @@ from endpoints.exception import NotFound from data import model from digest import digest_tools +from util.validation import VALID_LABEL_KEY_REGEX BASE_MANIFEST_ROUTE = '/v1/repository//manifest/' MANIFEST_DIGEST_ROUTE = BASE_MANIFEST_ROUTE.format(digest_tools.DIGEST_PATTERN) @@ -92,9 +93,17 @@ class RepositoryManifestLabels(RepositoryParamResource): if label_validator.has_reserved_prefix(label_data['key']): abort(400, message='Label has a reserved prefix') - label = model.label.create_manifest_label(tag_manifest, label_data['key'], - label_data['value'], 'api', - media_type_name=label_data['media_type']) + label = None + try: + label = model.label.create_manifest_label(tag_manifest, label_data['key'], + label_data['value'], 'api', + media_type_name=label_data['media_type']) + except model.InvalidLabelKeyException: + abort(400, message='Label is of an invalid format or missing please use %s format for labels'.format( + VALID_LABEL_KEY_REGEX)) + except model.InvalidMediaTypeException: + abort(400, message='Media type is invalid please use a valid media type of text/plain or application/json') + metadata = { 'id': label.uuid, 'key': label_data['key'], diff --git a/endpoints/api/permission.py b/endpoints/api/permission.py index 259e9c9d6..83ed3e687 100644 --- a/endpoints/api/permission.py +++ b/endpoints/api/permission.py @@ -73,6 +73,9 @@ class RepositoryUserPermissionList(RepositoryParamResource): # This repository isn't under an org pass + # Load the permissions. + repo_perms = model.user.get_all_repo_users(namespace, repository) + # Determine how to wrap the role(s). def wrapped_role_view(repo_perm): return wrap_role_view_user(role_view(repo_perm), repo_perm.user) @@ -80,20 +83,17 @@ class RepositoryUserPermissionList(RepositoryParamResource): role_view_func = wrapped_role_view if org: - org_members = model.organization.get_organization_member_set(namespace) + users_filter = {perm.user for perm in repo_perms} + org_members = model.organization.get_organization_member_set(org, users_filter=users_filter) current_func = role_view_func def wrapped_role_org_view(repo_perm): - return wrap_role_view_org(current_func(repo_perm), repo_perm.user, - org_members) + return wrap_role_view_org(current_func(repo_perm), repo_perm.user, org_members) role_view_func = wrapped_role_org_view - # Load and return the permissions. - repo_perms = model.user.get_all_repo_users(namespace, repository) return { - 'permissions': {perm.user.username: role_view_func(perm) - for perm in repo_perms} + 'permissions': {perm.user.username: role_view_func(perm) for perm in repo_perms} } @@ -156,8 +156,8 @@ class RepositoryUserPermission(RepositoryParamResource): perm_view = wrap_role_view_user(role_view(perm), perm.user) try: - model.organization.get_organization(namespace) - org_members = model.organization.get_organization_member_set(namespace) + org = model.organization.get_organization(namespace) + org_members = model.organization.get_organization_member_set(org, users_filter={perm.user}) perm_view = wrap_role_view_org(perm_view, perm.user, org_members) except model.InvalidOrganizationException: # This repository is not part of an organization @@ -183,8 +183,8 @@ class RepositoryUserPermission(RepositoryParamResource): perm_view = wrap_role_view_user(role_view(perm), perm.user) try: - model.organization.get_organization(namespace) - org_members = model.organization.get_organization_member_set(namespace) + org = model.organization.get_organization(namespace) + org_members = model.organization.get_organization_member_set(org, users_filter={perm.user}) perm_view = wrap_role_view_org(perm_view, perm.user, org_members) except model.InvalidOrganizationException: # This repository is not part of an organization diff --git a/endpoints/api/prototype.py b/endpoints/api/prototype.py index f14458594..2944aab60 100644 --- a/endpoints/api/prototype.py +++ b/endpoints/api/prototype.py @@ -133,7 +133,10 @@ class PermissionPrototypeList(ApiResource): raise NotFound() permissions = model.permission.get_prototype_permissions(org) - org_members = model.organization.get_organization_member_set(orgname) + + users_filter = ({p.activating_user for p in permissions} | + {p.delegate_user for p in permissions}) + org_members = model.organization.get_organization_member_set(org, users_filter=users_filter) return {'prototypes': [prototype_view(p, org_members) for p in permissions]} raise Unauthorized() @@ -180,7 +183,9 @@ class PermissionPrototypeList(ApiResource): prototype = model.permission.add_prototype_permission(org, role_name, activating_user, delegate_user, delegate_team) log_prototype_action('create_prototype_permission', orgname, prototype) - org_members = model.organization.get_organization_member_set(orgname) + + users_filter = {prototype.activating_user, prototype.delegate_user} + org_members = model.organization.get_organization_member_set(org, users_filter=users_filter) return prototype_view(prototype, org_members) raise Unauthorized() @@ -257,7 +262,9 @@ class PermissionPrototype(ApiResource): log_prototype_action('modify_prototype_permission', orgname, prototype, original_role=existing.role.name) - org_members = model.organization.get_organization_member_set(orgname) + + users_filter = {prototype.activating_user, prototype.delegate_user} + org_members = model.organization.get_organization_member_set(org, users_filter=users_filter) return prototype_view(prototype, org_members) raise Unauthorized() diff --git a/endpoints/api/repositorynotification.py b/endpoints/api/repositorynotification.py index ac14ec2e0..ab81d2c5b 100644 --- a/endpoints/api/repositorynotification.py +++ b/endpoints/api/repositorynotification.py @@ -2,6 +2,7 @@ import json +import logging from flask import request from app import notification_queue @@ -14,7 +15,7 @@ from endpoints.notificationmethod import (NotificationMethod, CannotValidateNotificationMethodException) from endpoints.notificationhelper import build_notification_data from data import model - +logger = logging.getLogger(__name__) def notification_view(note): config = {} @@ -36,6 +37,7 @@ def notification_view(note): 'config': config, 'title': note.title, 'event_config': event_config, + 'number_of_failures': note.number_of_failures, } @@ -154,6 +156,20 @@ class RepositoryNotification(RepositoryParamResource): return 'No Content', 204 + @require_repo_admin + @nickname('resetRepositoryNotificationFailures') + @disallow_for_app_repositories + def post(self, namespace, repository, uuid): + """ Resets repository notification to 0 failures. """ + reset = model.notification.reset_notification_number_of_failures(namespace, repository, uuid) + if reset is not None: + log_action('reset_repo_notification', namespace, + {'repo': repository, 'namespace': namespace, 'notification_id': uuid, + 'event': reset.event.name, 'method': reset.method.name}, + repo=model.repository.get_repository(namespace, repository)) + + return 'No Content', 204 + @resource('/v1/repository//notification//test') @path_param('repository', 'The full path of the repository. e.g. namespace/name') diff --git a/endpoints/api/robot.py b/endpoints/api/robot.py index 8f1cbde73..b2bf36563 100644 --- a/endpoints/api/robot.py +++ b/endpoints/api/robot.py @@ -78,7 +78,7 @@ class UserRobotList(ApiResource): @nickname('getUserRobots') @parse_args() @query_param('permissions', - 'Whether to include repostories and teams in which the robots have permission.', + 'Whether to include repositories and teams in which the robots have permission.', type=truthy_bool, default=False) def get(self, parsed_args): """ List the available robots for the user. """ diff --git a/endpoints/api/signing.py b/endpoints/api/signing.py index 2758a999d..aa426ff7c 100644 --- a/endpoints/api/signing.py +++ b/endpoints/api/signing.py @@ -27,9 +27,4 @@ class RepositorySignatures(RepositoryParamResource): if repo is None or not repo.trust_enabled: raise NotFound() - tag_data, expiration = tuf_metadata_api.get_default_tags_with_expiration(namespace, repository) - return { - 'tags': tag_data, - 'expiration': expiration - } - + return {'delegations': tuf_metadata_api.get_all_tags_with_expiration(namespace, repository)} diff --git a/endpoints/api/superuser.py b/endpoints/api/superuser.py index 356193308..fab711bd7 100644 --- a/endpoints/api/superuser.py +++ b/endpoints/api/superuser.py @@ -32,6 +32,7 @@ from util.useremails import send_confirmation_email, send_recovery_email from util.license import decode_license, LicenseDecodeError from util.security.ssl import load_certificate, CertInvalidException from util.config.validator import EXTRA_CA_DIRECTORY +from _init import ROOT_DIR logger = logging.getLogger(__name__) @@ -179,7 +180,7 @@ class ChangeLog(ApiResource): def get(self): """ Returns the change log for this installation. """ if SuperUserPermission().can(): - with open('CHANGELOG.md', 'r') as f: + with open(os.path.join(ROOT_DIR, 'CHANGELOG.md'), 'r') as f: return { 'log': f.read() } @@ -852,7 +853,7 @@ class SuperUserCustomCertificates(ApiResource): cert_views = [] for extra_cert_path in extra_certs_found: try: - cert_full_path = os.path.join(EXTRA_CA_DIRECTORY, extra_cert_path) + cert_full_path = config_provider.get_volume_path(EXTRA_CA_DIRECTORY, extra_cert_path) with config_provider.get_volume_file(cert_full_path) as f: certificate = load_certificate(f.read()) cert_views.append({ @@ -900,7 +901,7 @@ class SuperUserCustomCertificate(ApiResource): abort(400) logger.debug('Saving custom certificate %s', certpath) - cert_full_path = os.path.join(EXTRA_CA_DIRECTORY, certpath) + cert_full_path = config_provider.get_volume_path(EXTRA_CA_DIRECTORY, certpath) config_provider.save_volume_file(cert_full_path, uploaded_file) logger.debug('Saved custom certificate %s', certpath) @@ -934,7 +935,7 @@ class SuperUserCustomCertificate(ApiResource): @verify_not_prod def delete(self, certpath): if SuperUserPermission().can(): - cert_full_path = os.path.join(EXTRA_CA_DIRECTORY, certpath) + cert_full_path = config_provider.get_volume_path(EXTRA_CA_DIRECTORY, certpath) config_provider.remove_volume_file(cert_full_path) return '', 204 diff --git a/endpoints/api/tag.py b/endpoints/api/tag.py index adc4a60d2..1422b2971 100644 --- a/endpoints/api/tag.py +++ b/endpoints/api/tag.py @@ -2,18 +2,37 @@ from flask import request, abort -from endpoints.api import ( - resource, nickname, require_repo_read, require_repo_write, RepositoryParamResource, log_action, - validate_json_request, path_param, parse_args, query_param, truthy_bool, - disallow_for_app_repositories, disallow_under_trust) -from endpoints.exception import NotFound -from endpoints.api.image import image_view -from endpoints.v2.manifest import _generate_and_store_manifest -from data import model from auth.auth_context import get_authenticated_user +from data import model +from endpoints.api import (resource, nickname, require_repo_read, require_repo_write, + RepositoryParamResource, log_action, validate_json_request, path_param, + parse_args, query_param, truthy_bool, disallow_for_app_repositories) +from endpoints.api.image import image_view +from endpoints.api.tag_models_pre_oci import pre_oci_model +from endpoints.exception import NotFound +from endpoints.v2.manifest import _generate_and_store_manifest from util.names import TAG_ERROR, TAG_REGEX +def tag_view(tag): + tag_info = { + 'name': tag.name, + 'docker_image_id': tag.docker_image_id, + 'reversion': tag.reversion, + } + + if tag.lifetime_start_ts > 0: + tag_info['start_ts'] = tag.lifetime_start_ts + + if tag.lifetime_end_ts > 0: + tag_info['end_ts'] = tag.lifetime_end_ts + + if tag.manifest_list: + tag_info['manifest_digest'] = tag.manifest_list + + return tag_info + + @resource('/v1/repository//tag/') @path_param('repository', 'The full path of the repository. e.g. namespace/name') class ListRepositoryTags(RepositoryParamResource): @@ -28,39 +47,21 @@ class ListRepositoryTags(RepositoryParamResource): @query_param('page', 'Page index for the results. Default 1.', type=int, default=1) @nickname('listRepoTags') def get(self, namespace, repository, parsed_args): - repo = model.repository.get_repository(namespace, repository) - if not repo: - raise NotFound() - - def tag_view(tag): - tag_info = { - 'name': tag.name, - 'docker_image_id': tag.image.docker_image_id, - 'reversion': tag.reversion, - } - - if tag.lifetime_start_ts > 0: - tag_info['start_ts'] = tag.lifetime_start_ts - - if tag.lifetime_end_ts > 0: - tag_info['end_ts'] = tag.lifetime_end_ts - - if tag.id in manifest_map: - tag_info['manifest_digest'] = manifest_map[tag.id] - - return tag_info - specific_tag = parsed_args.get('specificTag') or None - page = max(1, parsed_args.get('page', 1)) limit = min(100, max(1, parsed_args.get('limit', 50))) - tags, manifest_map, more = model.tag.list_repository_tag_history(repo, page=page, size=limit, - specific_tag=specific_tag) + + tag_history = pre_oci_model.list_repository_tag_history(namespace_name=namespace, + repository_name=repository, page=page, + size=limit, specific_tag=specific_tag) + + if not tag_history: + raise NotFound() return { - 'tags': [tag_view(tag) for tag in tags], + 'tags': [tag_view(tag) for tag in tag_history.tags], 'page': page, - 'has_additional': more, + 'has_additional': tag_history.more, } @@ -73,9 +74,7 @@ class RepositoryTag(RepositoryParamResource): 'MoveTag': { 'type': 'object', 'description': 'Description of to which image a new or existing tag should point', - 'required': [ - 'image', - ], + 'required': ['image',], 'properties': { 'image': { 'type': 'string', @@ -87,7 +86,6 @@ class RepositoryTag(RepositoryParamResource): @require_repo_write @disallow_for_app_repositories - @disallow_under_trust @nickname('changeTagImage') @validate_json_request('MoveTag') def put(self, namespace, repository, tag): @@ -128,7 +126,6 @@ class RepositoryTag(RepositoryParamResource): @require_repo_write @disallow_for_app_repositories - @disallow_under_trust @nickname('deleteFullTag') def delete(self, namespace, repository, tag): """ Delete the specified repository tag. """ @@ -207,9 +204,7 @@ class RestoreTag(RepositoryParamResource): 'RestoreTag': { 'type': 'object', 'description': 'Restores a tag to a specific image', - 'required': [ - 'image', - ], + 'required': ['image',], 'properties': { 'image': { 'type': 'string', @@ -225,7 +220,6 @@ class RestoreTag(RepositoryParamResource): @require_repo_write @disallow_for_app_repositories - @disallow_under_trust @nickname('restoreTag') @validate_json_request('RestoreTag') def post(self, namespace, repository, tag): @@ -254,8 +248,8 @@ class RestoreTag(RepositoryParamResource): if existing_image is not None: log_data['original_image'] = existing_image.docker_image_id - log_action('revert_tag', namespace, log_data, repo=model.repository.get_repository( - namespace, repository)) + log_action('revert_tag', namespace, log_data, repo=model.repository.get_repository(namespace, + repository)) return { 'image_id': image_id, diff --git a/endpoints/api/tag_models_interface.py b/endpoints/api/tag_models_interface.py new file mode 100644 index 000000000..c98737f9a --- /dev/null +++ b/endpoints/api/tag_models_interface.py @@ -0,0 +1,43 @@ +from abc import ABCMeta, abstractmethod +from collections import namedtuple + +from six import add_metaclass + + +class Tag( + namedtuple('Tag', [ + 'name', 'image', 'reversion', 'lifetime_start_ts', 'lifetime_end_ts', 'manifest_list', + 'docker_image_id' + ])): + """ + Tag represents a name to an image. + :type name: string + :type image: Image + :type reversion: boolean + :type lifetime_start_ts: int + :type lifetime_end_ts: int + :type manifest_list: [manifest_digest] + :type docker_image_id: string + """ + + +class RepositoryTagHistory(namedtuple('RepositoryTagHistory', ['tags', 'more'])): + """ + Tag represents a name to an image. + :type tags: [Tag] + :type more: boolean + """ + + +@add_metaclass(ABCMeta) +class TagDataInterface(object): + """ + Interface that represents all data store interactions required by a Tag. + """ + + @abstractmethod + def list_repository_tag_history(self, namespace_name, repository_name, page=1, size=100, + specific_tag=None): + """ + Returns a RepositoryTagHistory with a list of historic tags and whether there are more tags then returned. + """ diff --git a/endpoints/api/tag_models_pre_oci.py b/endpoints/api/tag_models_pre_oci.py new file mode 100644 index 000000000..a1842b036 --- /dev/null +++ b/endpoints/api/tag_models_pre_oci.py @@ -0,0 +1,30 @@ +from data import model +from endpoints.api.tag_models_interface import TagDataInterface, Tag, RepositoryTagHistory + + +class PreOCIModel(TagDataInterface): + """ + PreOCIModel implements the data model for the Tags using a database schema + before it was changed to support the OCI specification. + """ + + def list_repository_tag_history(self, namespace_name, repository_name, page=1, size=100, + specific_tag=None): + repository = model.repository.get_repository(namespace_name, repository_name) + if repository is None: + return None + tags, manifest_map, more = model.tag.list_repository_tag_history(repository, page, size, + specific_tag) + repository_tag_history = [] + for tag in tags: + manifest_list = None + if tag.id in manifest_map: + manifest_list = manifest_map[tag.id] + repository_tag_history.append( + Tag(name=tag.name, image=tag.image, reversion=tag.reversion, + lifetime_start_ts=tag.lifetime_start_ts, lifetime_end_ts=tag.lifetime_end_ts, + manifest_list=manifest_list, docker_image_id=tag.image.docker_image_id)) + return RepositoryTagHistory(tags=repository_tag_history, more=more) + + +pre_oci_model = PreOCIModel() diff --git a/endpoints/api/test/shared.py b/endpoints/api/test/shared.py index 3d1f0cffa..1d35cdbc5 100644 --- a/endpoints/api/test/shared.py +++ b/endpoints/api/test/shared.py @@ -1,58 +1,10 @@ -import datetime -import json - -from contextlib import contextmanager -from data import model +from endpoints.test.shared import conduct_call from endpoints.api import api -CSRF_TOKEN_KEY = '_csrf_token' -CSRF_TOKEN = '123csrfforme' - - -@contextmanager -def client_with_identity(auth_username, client): - with client.session_transaction() as sess: - if auth_username and auth_username is not None: - loaded = model.user.get_user(auth_username) - sess['user_id'] = loaded.uuid - sess['login_time'] = datetime.datetime.now() - sess[CSRF_TOKEN_KEY] = CSRF_TOKEN - else: - sess['user_id'] = 'anonymous' - - yield client - - with client.session_transaction() as sess: - sess['user_id'] = None - sess['login_time'] = None - sess[CSRF_TOKEN_KEY] = None - - -def add_csrf_param(params): - """ Returns a params dict with the CSRF parameter added. """ - params = params or {} - params[CSRF_TOKEN_KEY] = CSRF_TOKEN - return params - - def conduct_api_call(client, resource, method, params, body=None, expected_code=200): """ Conducts an API call to the given resource via the given client, and ensures its returned status matches the code given. Returns the response. """ - params = add_csrf_param(params) - - final_url = api.url_for(resource, **params) - - headers = {} - headers.update({"Content-Type": "application/json"}) - - if body is not None: - body = json.dumps(body) - - rv = client.open(final_url, method=method, data=body, headers=headers) - msg = '%s %s: got %s expected: %s | %s' % (method, final_url, rv.status_code, expected_code, - rv.data) - assert rv.status_code == expected_code, msg - return rv + return conduct_call(client, resource, api.url_for, method, params, body, expected_code) diff --git a/endpoints/api/test/test_disallow_for_apps.py b/endpoints/api/test/test_disallow_for_apps.py index 27d96c8c2..b9112c291 100644 --- a/endpoints/api/test/test_disallow_for_apps.py +++ b/endpoints/api/test/test_disallow_for_apps.py @@ -16,7 +16,8 @@ from endpoints.api.trigger import (BuildTriggerList, BuildTrigger, BuildTriggerS BuildTriggerActivate, BuildTriggerAnalyze, ActivateBuildTrigger, TriggerBuildList, BuildTriggerFieldValues, BuildTriggerSources, BuildTriggerSourceNamespaces) -from endpoints.api.test.shared import client_with_identity, conduct_api_call +from endpoints.api.test.shared import conduct_api_call +from endpoints.test.shared import client_with_identity from test.fixtures import * BUILD_ARGS = {'build_uuid': '1234'} @@ -45,6 +46,7 @@ FIELD_ARGS = {'trigger_uuid': '1234', 'field_name': 'foobar'} (RepositoryNotificationList, 'post', None), (RepositoryNotification, 'get', NOTIFICATION_ARGS), (RepositoryNotification, 'delete', NOTIFICATION_ARGS), + (RepositoryNotification, 'post', NOTIFICATION_ARGS), (TestRepositoryNotification, 'post', NOTIFICATION_ARGS), (RepositoryImageSecurity, 'get', IMAGE_ARGS), (RepositoryManifestSecurity, 'get', MANIFEST_ARGS), diff --git a/endpoints/api/test/test_disallow_under_trust.py b/endpoints/api/test/test_disallow_under_trust.py deleted file mode 100644 index 2f5f381de..000000000 --- a/endpoints/api/test/test_disallow_under_trust.py +++ /dev/null @@ -1,50 +0,0 @@ -import pytest - -from data import model -from endpoints.api.build import RepositoryBuildList, RepositoryBuildResource -from endpoints.api.tag import RepositoryTag, RestoreTag -from endpoints.api.trigger import (BuildTrigger, BuildTriggerSubdirs, - BuildTriggerActivate, BuildTriggerAnalyze, ActivateBuildTrigger, - BuildTriggerFieldValues, BuildTriggerSources, - BuildTriggerSourceNamespaces) -from endpoints.api.test.shared import client_with_identity, conduct_api_call -from test.fixtures import * - -BUILD_ARGS = {'build_uuid': '1234'} -IMAGE_ARGS = {'imageid': '1234', 'image_id': 1234} -MANIFEST_ARGS = {'manifestref': 'sha256:abcd1234'} -LABEL_ARGS = {'manifestref': 'sha256:abcd1234', 'labelid': '1234'} -NOTIFICATION_ARGS = {'uuid': '1234'} -TAG_ARGS = {'tag': 'foobar'} -TRIGGER_ARGS = {'trigger_uuid': '1234'} -FIELD_ARGS = {'trigger_uuid': '1234', 'field_name': 'foobar'} - -@pytest.mark.parametrize('resource, method, params', [ - (RepositoryBuildList, 'post', None), - (RepositoryBuildResource, 'delete', BUILD_ARGS), - (RepositoryTag, 'put', TAG_ARGS), - (RepositoryTag, 'delete', TAG_ARGS), - (RestoreTag, 'post', TAG_ARGS), - (BuildTrigger, 'delete', TRIGGER_ARGS), - (BuildTriggerSubdirs, 'post', TRIGGER_ARGS), - (BuildTriggerActivate, 'post', TRIGGER_ARGS), - (BuildTriggerAnalyze, 'post', TRIGGER_ARGS), - (ActivateBuildTrigger, 'post', TRIGGER_ARGS), - (BuildTriggerFieldValues, 'post', FIELD_ARGS), - (BuildTriggerSources, 'post', TRIGGER_ARGS), - (BuildTriggerSourceNamespaces, 'get', TRIGGER_ARGS), -]) -def test_disallowed_for_apps(resource, method, params, client): - namespace = 'devtable' - repository = 'somerepo' - - devtable = model.user.get_user('devtable') - repo = model.repository.create_repository(namespace, repository, devtable, repo_kind='image') - model.repository.set_trust(repo, True) - - params = params or {} - params['repository'] = '%s/%s' % (namespace, repository) - - with client_with_identity('devtable', client) as cl: - conduct_api_call(cl, resource, method, params, None, 400) - diff --git a/endpoints/api/test/test_models_pre_oci.py b/endpoints/api/test/test_models_pre_oci.py new file mode 100644 index 000000000..7481511a0 --- /dev/null +++ b/endpoints/api/test/test_models_pre_oci.py @@ -0,0 +1,103 @@ +import pytest +from endpoints.api.tag_models_interface import RepositoryTagHistory, Tag +from mock import Mock + +from data import model +from endpoints.api.tag_models_pre_oci import pre_oci_model + +EMPTY_REPOSITORY = 'empty_repository' +EMPTY_NAMESPACE = 'empty_namespace' +BAD_REPOSITORY_NAME = 'bad_repository_name' +BAD_NAMESPACE_NAME = 'bad_namespace_name' + + +@pytest.fixture +def get_monkeypatch(monkeypatch): + return monkeypatch + + +def mock_out_get_repository(monkeypatch, namespace_name, repository_name): + def return_none(namespace_name, repository_name): + return None + + def return_repository(namespace_name, repository_name): + return 'repository' + + if namespace_name == BAD_NAMESPACE_NAME or repository_name == BAD_REPOSITORY_NAME: + return_function = return_none + else: + return_function = return_repository + + monkeypatch.setattr(model.repository, 'get_repository', return_function) + + +def create_mock_tag(name, reversion, lifetime_start_ts, lifetime_end_ts, mock_id, docker_image_id, + manifest_list): + tag_mock = Mock() + tag_mock.name = name + image_mock = Mock() + image_mock.docker_image_id = docker_image_id + tag_mock.image = image_mock + tag_mock.reversion = reversion + tag_mock.lifetime_start_ts = lifetime_start_ts + tag_mock.lifetime_end_ts = lifetime_end_ts + tag_mock.id = mock_id + tag_mock.manifest_list = manifest_list + tag = Tag(name=name, reversion=reversion, image=image_mock, docker_image_id=docker_image_id, + lifetime_start_ts=lifetime_start_ts, lifetime_end_ts=lifetime_end_ts, + manifest_list=manifest_list) + return tag_mock, tag + + +first_mock, first_tag = create_mock_tag('tag1', 'rev1', 'start1', 'end1', 'id1', + 'docker_image_id1', []) +second_mock, second_tag = create_mock_tag('tag2', 'rev2', 'start2', 'end2', 'id2', + 'docker_image_id2', ['manifest']) + + +def mock_out_list_repository_tag_history(monkeypatch, namespace_name, repository_name, page, size, + specific_tag): + def list_empty_tag_history(repository, page, size, specific_tag): + return [], {}, False + + def list_filled_tag_history(repository, page, size, specific_tag): + tags = [first_mock, second_mock] + return tags, { + first_mock.id: first_mock.manifest_list, + second_mock.id: second_mock.manifest_list + }, len(tags) > size + + def list_only_second_tag(repository, page, size, specific_tag): + tags = [second_mock] + return tags, {second_mock.id: second_mock.manifest_list}, len(tags) > size + + if namespace_name == EMPTY_NAMESPACE or repository_name == EMPTY_REPOSITORY: + return_function = list_empty_tag_history + else: + if specific_tag == 'tag2': + return_function = list_only_second_tag + else: + return_function = list_filled_tag_history + + monkeypatch.setattr(model.tag, 'list_repository_tag_history', return_function) + + +@pytest.mark.parametrize( + 'expected, namespace_name, repository_name, page, size, specific_tag', [ + (None, BAD_NAMESPACE_NAME, 'repository_name', 1, 100, None), + (None, 'namespace_name', BAD_REPOSITORY_NAME, 1, 100, None), + (RepositoryTagHistory(tags=[], more=False), EMPTY_NAMESPACE, EMPTY_REPOSITORY, 1, 100, None), + (RepositoryTagHistory(tags=[first_tag, second_tag], more=False), 'namespace', 'repository', 1, + 100, None), + (RepositoryTagHistory(tags=[first_tag, second_tag], more=True), 'namespace', 'repository', 1, + 1, None), + (RepositoryTagHistory(tags=[second_tag], more=False), 'namespace', 'repository', 1, 100, + 'tag2'), + ]) +def test_list_repository_tag_history(expected, namespace_name, repository_name, page, size, + specific_tag, get_monkeypatch): + mock_out_get_repository(get_monkeypatch, namespace_name, repository_name) + mock_out_list_repository_tag_history(get_monkeypatch, namespace_name, repository_name, page, + size, specific_tag) + assert pre_oci_model.list_repository_tag_history(namespace_name, repository_name, page, size, + specific_tag) == expected diff --git a/endpoints/api/test/test_organization.py b/endpoints/api/test/test_organization.py index 65b9a85d4..9a6525113 100644 --- a/endpoints/api/test/test_organization.py +++ b/endpoints/api/test/test_organization.py @@ -2,8 +2,9 @@ import pytest from data import model from endpoints.api import api -from endpoints.api.test.shared import client_with_identity, conduct_api_call +from endpoints.api.test.shared import conduct_api_call from endpoints.api.organization import Organization +from endpoints.test.shared import client_with_identity from test.fixtures import * @pytest.mark.parametrize('expiration, expected_code', [ diff --git a/endpoints/api/test/test_repository.py b/endpoints/api/test/test_repository.py index d110f5760..999beb00d 100644 --- a/endpoints/api/test/test_repository.py +++ b/endpoints/api/test/test_repository.py @@ -2,8 +2,9 @@ import pytest from mock import patch, ANY, MagicMock -from endpoints.api.test.shared import client_with_identity, conduct_api_call +from endpoints.api.test.shared import conduct_api_call from endpoints.api.repository import RepositoryTrust, Repository +from endpoints.test.shared import client_with_identity from features import FeatureNameValue from test.fixtures import * @@ -52,8 +53,8 @@ def test_signing_disabled(client): params = {'repository': 'devtable/simple'} response = conduct_api_call(cl, Repository, 'GET', params).json assert not response['trust_enabled'] - - + + def test_sni_support(): import ssl assert ssl.HAS_SNI diff --git a/endpoints/api/test/test_search.py b/endpoints/api/test/test_search.py index 4efba0841..e67c3dbff 100644 --- a/endpoints/api/test/test_search.py +++ b/endpoints/api/test/test_search.py @@ -4,32 +4,33 @@ from playhouse.test_utils import assert_query_count from data.model import _basequery from endpoints.api.search import ConductRepositorySearch, ConductSearch -from endpoints.api.test.shared import client_with_identity, conduct_api_call +from endpoints.api.test.shared import conduct_api_call +from endpoints.test.shared import client_with_identity from test.fixtures import * -@pytest.mark.parametrize('query, expected_query_count', [ - ('simple', 7), - ('public', 6), - ('repository', 6), +@pytest.mark.parametrize('query', [ + ('simple'), + ('public'), + ('repository'), ]) -def test_repository_search(query, expected_query_count, client): +def test_repository_search(query, client): with client_with_identity('devtable', client) as cl: params = {'query': query} - with assert_query_count(expected_query_count): + with assert_query_count(6): result = conduct_api_call(cl, ConductRepositorySearch, 'GET', params, None, 200).json assert result['start_index'] == 0 assert result['page'] == 1 assert len(result['results']) -@pytest.mark.parametrize('query, expected_query_count', [ - ('simple', 8), - ('public', 8), - ('repository', 8), +@pytest.mark.parametrize('query', [ + ('simple'), + ('public'), + ('repository'), ]) -def test_search_query_count(query, expected_query_count, client): +def test_search_query_count(query, client): with client_with_identity('devtable', client) as cl: params = {'query': query} - with assert_query_count(expected_query_count): + with assert_query_count(8): result = conduct_api_call(cl, ConductSearch, 'GET', params, None, 200).json assert len(result['results']) diff --git a/endpoints/api/test/test_security.py b/endpoints/api/test/test_security.py index 9f1da90cc..68039aed7 100644 --- a/endpoints/api/test/test_security.py +++ b/endpoints/api/test/test_security.py @@ -2,13 +2,15 @@ import pytest from flask_principal import AnonymousIdentity from endpoints.api import api +from endpoints.api.repositorynotification import RepositoryNotification from endpoints.api.team import OrganizationTeamSyncing -from endpoints.api.test.shared import client_with_identity, conduct_api_call +from endpoints.api.test.shared import conduct_api_call from endpoints.api.repository import RepositoryTrust from endpoints.api.signing import RepositorySignatures from endpoints.api.search import ConductRepositorySearch from endpoints.api.superuser import SuperUserRepositoryBuildLogs, SuperUserRepositoryBuildResource from endpoints.api.superuser import SuperUserRepositoryBuildStatus +from endpoints.test.shared import client_with_identity from test.fixtures import * @@ -16,6 +18,8 @@ TEAM_PARAMS = {'orgname': 'buynlarge', 'teamname': 'owners'} BUILD_PARAMS = {'build_uuid': 'test-1234'} REPO_PARAMS = {'repository': 'devtable/someapp'} SEARCH_PARAMS = {'query': ''} +NOTIFICATION_PARAMS = {'namespace': 'devtable', 'repository': 'devtable/simple', 'uuid': 'some uuid'} + @pytest.mark.parametrize('resource,method,params,body,identity,expected', [ (OrganizationTeamSyncing, 'POST', TEAM_PARAMS, {}, None, 403), @@ -52,6 +56,11 @@ SEARCH_PARAMS = {'query': ''} (RepositorySignatures, 'GET', REPO_PARAMS, {}, 'reader', 403), (RepositorySignatures, 'GET', REPO_PARAMS, {}, 'devtable', 404), + (RepositoryNotification, 'POST', NOTIFICATION_PARAMS, {}, None, 403), + (RepositoryNotification, 'POST', NOTIFICATION_PARAMS, {}, 'freshuser', 403), + (RepositoryNotification, 'POST', NOTIFICATION_PARAMS, {}, 'reader', 403), + (RepositoryNotification, 'POST', NOTIFICATION_PARAMS, {}, 'devtable', 204), + (RepositoryTrust, 'POST', REPO_PARAMS, {'trust_enabled': True}, None, 403), (RepositoryTrust, 'POST', REPO_PARAMS, {'trust_enabled': True}, 'freshuser', 403), (RepositoryTrust, 'POST', REPO_PARAMS, {'trust_enabled': True}, 'reader', 403), diff --git a/endpoints/api/test/test_signing.py b/endpoints/api/test/test_signing.py index a0320d015..e941cee56 100644 --- a/endpoints/api/test/test_signing.py +++ b/endpoints/api/test/test_signing.py @@ -3,42 +3,53 @@ import pytest from collections import Counter from mock import patch -from endpoints.api.test.shared import client_with_identity, conduct_api_call +from endpoints.api.test.shared import conduct_api_call from endpoints.api.signing import RepositorySignatures +from endpoints.test.shared import client_with_identity from test.fixtures import * -VALID_TARGETS = { - 'latest': { - 'hashes': { - 'sha256': 'mLmxwTyUrqIRDaz8uaBapfrp3GPERfsDg2kiMujlteo=' +VALID_TARGETS_MAP = { + "targets/ci": { + "targets": { + "latest": { + "hashes": { + "sha256": "2Q8GLEgX62VBWeL76axFuDj/Z1dd6Zhx0ZDM6kNwPkQ=" + }, + "length": 2111 + } + }, + "expiration": "2020-05-22T10:26:46.618176424-04:00" }, - 'length': 1500 - }, - 'test_tag': { - 'hashes': { - 'sha256': '1234123' - }, - 'length': 50 + "targets": { + "targets": { + "latest": { + "hashes": { + "sha256": "2Q8GLEgX62VBWeL76axFuDj/Z1dd6Zhx0ZDM6kNwPkQ=" + }, + "length": 2111 + } + }, + "expiration": "2020-05-22T10:26:01.953414888-04:00"} } -} + def tags_equal(expected, actual): - expected_tags = expected.get('tags') - actual_tags = actual.get('tags') + expected_tags = expected.get('delegations') + actual_tags = actual.get('delegations') if expected_tags and actual_tags: return Counter(expected_tags) == Counter(actual_tags) return expected == actual -@pytest.mark.parametrize('targets,expected', [ - (VALID_TARGETS, {'tags': VALID_TARGETS, 'expiration': 'expires'}), - ({'bad': 'tags'}, {'tags': {'bad': 'tags'}, 'expiration': 'expires'}), - ({}, {'tags': {}, 'expiration': 'expires'}), - (None, {'tags': None, 'expiration': 'expires'}), # API returns None on exceptions +@pytest.mark.parametrize('targets_map,expected', [ + (VALID_TARGETS_MAP, {'delegations': VALID_TARGETS_MAP}), + ({'bad': 'tags'}, {'delegations': {'bad': 'tags'}}), + ({}, {'delegations': {}}), + (None, {'delegations': None}), # API returns None on exceptions ]) -def test_get_signatures(targets, expected, client): +def test_get_signatures(targets_map, expected, client): with patch('endpoints.api.signing.tuf_metadata_api') as mock_tuf: - mock_tuf.get_default_tags_with_expiration.return_value = (targets, 'expires') + mock_tuf.get_all_tags_with_expiration.return_value = targets_map with client_with_identity('devtable', client) as cl: params = {'repository': 'devtable/trusted'} assert tags_equal(expected, conduct_api_call(cl, RepositorySignatures, 'GET', params, None, 200).json) diff --git a/endpoints/api/test/test_tag.py b/endpoints/api/test/test_tag.py index 0c80ef4ee..c8f578f53 100644 --- a/endpoints/api/test/test_tag.py +++ b/endpoints/api/test/test_tag.py @@ -1,9 +1,15 @@ +import json + import pytest -from mock import patch, Mock +from mock import patch, Mock, MagicMock, call + + +from endpoints.api.tag_models_interface import RepositoryTagHistory, Tag +from endpoints.api.test.shared import conduct_api_call +from endpoints.test.shared import client_with_identity +from endpoints.api.tag import RepositoryTag, RestoreTag, ListRepositoryTags -from endpoints.api.test.shared import client_with_identity, conduct_api_call -from endpoints.api.tag import RepositoryTag, RestoreTag from features import FeatureNameValue from test.fixtures import * @@ -80,6 +86,28 @@ def authd_client(client): yield cl +@pytest.fixture() +def list_repository_tag_history(): + def list_repository_tag_history(namespace_name, repository_name, page, size, specific_tag): + return RepositoryTagHistory(tags=[ + Tag(name='First Tag', image='image', reversion=False, lifetime_start_ts=0, lifetime_end_ts=0, manifest_list=[], + docker_image_id='first docker image id'), + Tag(name='Second Tag', image='second image', reversion=True, lifetime_start_ts=10, lifetime_end_ts=100, + manifest_list=[], docker_image_id='second docker image id')], more=False) + + with patch('endpoints.api.tag.pre_oci_model.list_repository_tag_history', side_effect=list_repository_tag_history): + yield + + +@pytest.fixture() +def find_no_repo_tag_history(): + def list_repository_tag_history(namespace_name, repository_name, page, size, specific_tag): + return None + + with patch('endpoints.api.tag.pre_oci_model.list_repository_tag_history', side_effect=list_repository_tag_history): + yield + + @pytest.mark.parametrize('test_image,test_tag,expected_status', [ ('image1', '-INVALID-TAG-NAME', 400), ('image1', '.INVALID-TAG-NAME', 400), @@ -93,7 +121,7 @@ def authd_client(client): ]) def test_move_tag(test_image, test_tag, expected_status, get_repo_image, get_repo_tag_image, create_or_update_tag, generate_manifest, authd_client): - params = {'repository': 'devtable/repo', 'tag': test_tag} + params = {'repository': 'devtable/simple', 'tag': test_tag} request_body = {'image': test_image} if expected_status is None: with pytest.raises(Exception): @@ -102,6 +130,62 @@ def test_move_tag(test_image, test_tag, expected_status, get_repo_image, get_rep conduct_api_call(authd_client, RepositoryTag, 'put', params, request_body, expected_status) +@pytest.mark.parametrize('namespace, repository, specific_tag, page, limit, expected_response_code, expected', [ + ('devtable', 'simple', None, 1, 10, 200, {'has_additional': False}), + ('devtable', 'simple', None, 1, 10, 200, {'page': 1}), + ('devtable', 'simple', None, 1, 10, 200, {'tags': [{'docker_image_id': 'first docker image id', + 'name': 'First Tag', + 'reversion': False}, + {'docker_image_id': 'second docker image id', + 'end_ts': 100, + 'name': 'Second Tag', + 'reversion': True, + 'start_ts': 10}]}), +]) +def test_list_repository_tags_view_is_correct(namespace, repository, specific_tag, page, limit, + list_repository_tag_history, expected_response_code, expected, + authd_client): + params = {'repository': namespace + '/' + repository, 'specificTag': specific_tag, 'page': page, 'limit': limit} + response = conduct_api_call(authd_client, ListRepositoryTags, 'get', params, expected_code=expected_response_code) + compare_list_history_tags_response(expected, response.json) + + +def compare_list_history_tags_response(expected, actual): + if 'has_additional' in expected: + assert expected['has_additional'] == actual['has_additional'] + + if 'page' in expected: + assert expected['page'] == actual['page'] + + if 'tags' in expected: + assert expected['tags'] == actual['tags'] + + +def test_no_repo_tag_history(find_no_repo_tag_history, authd_client): + params = {'repository': 'devtable/simple', 'specificTag': None, 'page': 1, 'limit': 10} + conduct_api_call(authd_client, ListRepositoryTags, 'get', params, expected_code=404) + + +@pytest.mark.parametrize( + 'specific_tag, page, limit, expected_specific_tag, expected_page, expected_limit', [ + (None, None, None, None, 1, 50), + ('specific_tag', 12, 13, 'specific_tag', 12, 13), + ('specific_tag', -1, 101, 'specific_tag', 1, 100), + ('specific_tag', 0, 0, 'specific_tag', 1, 1), + ]) +def test_repo_tag_history_param_parse(specific_tag, page, limit, expected_specific_tag, expected_page, expected_limit, + authd_client): + mock = MagicMock() + mock.return_value = RepositoryTagHistory(tags=[], more=False) + + with patch('endpoints.api.tag.pre_oci_model.list_repository_tag_history', side_effect=mock): + params = {'repository': 'devtable/simple', 'specificTag': specific_tag, 'page': page, 'limit': limit} + conduct_api_call(authd_client, ListRepositoryTags, 'get', params) + + assert mock.call_args == call(namespace_name='devtable', repository_name='simple', + page=expected_page, size=expected_limit, specific_tag=expected_specific_tag) + + @pytest.mark.parametrize('test_manifest,test_tag,manifest_generated,expected_status', [ (None, 'newtag', True, 200), (None, 'generatemanifestfail', True, None), @@ -110,7 +194,7 @@ def test_move_tag(test_image, test_tag, expected_status, get_repo_image, get_rep def test_restore_tag(test_manifest, test_tag, manifest_generated, expected_status, get_repository, restore_tag_to_manifest, restore_tag_to_image, generate_manifest, authd_client): - params = {'repository': 'devtable/repo', 'tag': test_tag} + params = {'repository': 'devtable/simple', 'tag': test_tag} request_body = {'image': 'image1'} if test_manifest is not None: request_body['manifest_digest'] = test_manifest @@ -121,4 +205,4 @@ def test_restore_tag(test_manifest, test_tag, manifest_generated, expected_statu conduct_api_call(authd_client, RestoreTag, 'post', params, request_body, expected_status) if manifest_generated: - generate_manifest.assert_called_with('devtable', 'repo', test_tag) + generate_manifest.assert_called_with('devtable', 'simple', test_tag) diff --git a/endpoints/api/test/test_team.py b/endpoints/api/test/test_team.py index c40f8f199..9a17a36e4 100644 --- a/endpoints/api/test/test_team.py +++ b/endpoints/api/test/test_team.py @@ -4,9 +4,11 @@ from mock import patch from data import model from endpoints.api import api -from endpoints.api.test.shared import client_with_identity, conduct_api_call +from endpoints.api.test.shared import conduct_api_call from endpoints.api.team import OrganizationTeamSyncing, TeamMemberList from endpoints.api.organization import Organization +from endpoints.test.shared import client_with_identity + from test.test_ldap import mock_ldap from test.fixtures import * diff --git a/endpoints/api/test/test_trigger.py b/endpoints/api/test/test_trigger.py index 48339c9d4..32086ffbf 100644 --- a/endpoints/api/test/test_trigger.py +++ b/endpoints/api/test/test_trigger.py @@ -1,6 +1,6 @@ import pytest -from endpoints.api.trigger import is_parent +from endpoints.api.trigger_analyzer import is_parent @pytest.mark.parametrize('context,dockerfile_path,expected', [ diff --git a/endpoints/api/test/test_trigger_analyzer.py b/endpoints/api/test/test_trigger_analyzer.py new file mode 100644 index 000000000..881bad8a3 --- /dev/null +++ b/endpoints/api/test/test_trigger_analyzer.py @@ -0,0 +1,152 @@ +import pytest +from mock import Mock + +from auth import permissions +from data import model +from endpoints.api.trigger_analyzer import TriggerAnalyzer +from util import dockerfileparse + +BAD_PATH = "\"server_hostname/\" is not a valid Quay repository path" + +EMPTY_CONF = {} + +GOOD_CONF = {'context': '/', 'dockerfile_path': '/file'} + +BAD_CONF = {'context': 'context', 'dockerfile_path': 'dockerfile_path'} + +ONE_ROBOT = {'can_read': False, 'is_robot': True, 'kind': 'user', 'name': 'name'} + +DOCKERFILE_NOT_CHILD = 'Dockerfile, context, is not a child of the context, dockerfile_path.' + +THE_DOCKERFILE_SPECIFIED = 'Could not parse the Dockerfile specified' + +DOCKERFILE_PATH_NOT_FOUND = 'Specified Dockerfile path for the trigger was not found on the main branch. This trigger may fail.' + +NO_FROM_LINE = 'No FROM line found in the Dockerfile' + +REPO_NOT_FOUND = 'Repository "server_hostname/path/file" referenced by the Dockerfile was not found' + + +@pytest.fixture +def get_monkeypatch(monkeypatch): + return monkeypatch + + +def patch_permissions(monkeypatch, can_read=False): + def can_read_fn(base_namespace, base_repository): + return can_read + + monkeypatch.setattr(permissions, 'ReadRepositoryPermission', can_read_fn) + + +def patch_list_namespace_robots(monkeypatch): + my_mock = Mock() + my_mock.configure_mock(**{'username': 'name'}) + return_value = [my_mock] + + def return_list_mocks(namesapce): + return return_value + + monkeypatch.setattr(model.user, 'list_namespace_robots', return_list_mocks) + return return_value + + +def patch_get_all_repo_users_transitive(monkeypatch): + my_mock = Mock() + my_mock.configure_mock(**{'username': 'name'}) + return_value = [my_mock] + + def return_get_mocks(namesapce, image_repostiory): + return return_value + + monkeypatch.setattr(model.user, 'get_all_repo_users_transitive', return_get_mocks) + return return_value + + +def patch_parse_dockerfile(monkeypatch, get_base_image): + if get_base_image is not None: + def return_return_value(content): + parse_mock = Mock() + parse_mock.configure_mock(**{'get_base_image': get_base_image}) + return parse_mock + + monkeypatch.setattr(dockerfileparse, "parse_dockerfile", return_return_value) + else: + def return_return_value(content): + return get_base_image + + monkeypatch.setattr(dockerfileparse, "parse_dockerfile", return_return_value) + + +def patch_model_repository_get_repository(monkeypatch, get_repository): + if get_repository is not None: + + def mock_get_repository(base_namespace, base_repository): + vis_mock = Mock() + vis_mock.name = get_repository + get_repo_mock = Mock(visibility=vis_mock) + + + return get_repo_mock + + else: + def mock_get_repository(base_namespace, base_repository): + return None + + monkeypatch.setattr(model.repository, "get_repository", mock_get_repository) + + +def return_none(): + return None + + +def return_content(): + return Mock() + + +def return_server_hostname(): + return "server_hostname/" + + +def return_non_server_hostname(): + return "slime" + + +def return_path(): + return "server_hostname/path/file" + + +@pytest.mark.parametrize( + 'handler_fn, config_dict, admin_org_permission, status, message, get_base_image, robots, server_hostname, get_repository, can_read, namespace, name', [ + (return_none, EMPTY_CONF, False, "warning", DOCKERFILE_PATH_NOT_FOUND, None, [], None, None, False, "namespace", None), + (return_none, EMPTY_CONF, True, "warning", DOCKERFILE_PATH_NOT_FOUND, None, [ONE_ROBOT], None, None, False, "namespace", None), + (return_content, BAD_CONF, False, "error", THE_DOCKERFILE_SPECIFIED, None, [], None, None, False, "namespace", None), + (return_none, EMPTY_CONF, False, "warning", DOCKERFILE_PATH_NOT_FOUND, return_none, [], None, None, False, "namespace", None), + (return_none, EMPTY_CONF, True, "warning", DOCKERFILE_PATH_NOT_FOUND, return_none, [ONE_ROBOT], None, None, False, "namespace", None), + (return_content, BAD_CONF, False, "error", DOCKERFILE_NOT_CHILD, return_none, [], None, None, False, "namespace", None), + (return_content, GOOD_CONF, False, "warning", NO_FROM_LINE, return_none, [], None, None, False, "namespace", None), + (return_content, GOOD_CONF, False, "publicbase", None, return_non_server_hostname, [], "server_hostname", None, False, "namespace", None), + (return_content, GOOD_CONF, False, "warning", BAD_PATH, return_server_hostname, [], "server_hostname", None, False, "namespace", None), + (return_content, GOOD_CONF, False, "error", REPO_NOT_FOUND, return_path, [], "server_hostname", None, False, "namespace", None), + (return_content, GOOD_CONF, False, "error", REPO_NOT_FOUND, return_path, [], "server_hostname", "nonpublic", False, "namespace", None), + (return_content, GOOD_CONF, False, "requiresrobot", None, return_path, [], "server_hostname", "nonpublic", True, "path", "file"), + (return_content, GOOD_CONF, False, "publicbase", None, return_path, [], "server_hostname", "public", True, "path", "file"), + + ]) +def test_trigger_analyzer(handler_fn, config_dict, admin_org_permission, status, message, get_base_image, robots, + server_hostname, get_repository, can_read, namespace, name, + get_monkeypatch): + patch_list_namespace_robots(get_monkeypatch) + patch_get_all_repo_users_transitive(get_monkeypatch) + patch_parse_dockerfile(get_monkeypatch, get_base_image) + patch_model_repository_get_repository(get_monkeypatch, get_repository) + patch_permissions(get_monkeypatch, can_read) + handler_mock = Mock() + handler_mock.configure_mock(**{'load_dockerfile_contents': handler_fn}) + trigger_analyzer = TriggerAnalyzer(handler_mock, 'namespace', server_hostname, config_dict, admin_org_permission) + assert trigger_analyzer.analyze_trigger() == {'namespace': namespace, + 'name': name, + 'robots': robots, + 'status': status, + 'message': message, + 'is_admin': admin_org_permission} diff --git a/endpoints/api/trigger.py b/endpoints/api/trigger.py index 1d2d3ac18..6cefadc13 100644 --- a/endpoints/api/trigger.py +++ b/endpoints/api/trigger.py @@ -1,6 +1,5 @@ """ Create, list and manage build triggers. """ -import json import logging from os import path from urllib import quote @@ -20,11 +19,11 @@ from data.model.build import update_build_trigger from endpoints.api import (RepositoryParamResource, nickname, resource, require_repo_admin, log_action, request_error, query_param, parse_args, internal_only, validate_json_request, api, path_param, abort, - disallow_for_app_repositories, disallow_under_trust) + disallow_for_app_repositories) from endpoints.api.build import build_status_view, trigger_view, RepositoryBuildStatus +from endpoints.api.trigger_analyzer import TriggerAnalyzer from endpoints.building import start_build, MaximumBuildsQueuedException from endpoints.exception import NotFound, Unauthorized, InvalidRequest -from util.dockerfileparse import parse_dockerfile from util.names import parse_robot_username logger = logging.getLogger(__name__) @@ -35,6 +34,13 @@ def _prepare_webhook_url(scheme, username, password, hostname, path): return urlunparse((scheme, auth_hostname, path, '', '', '')) +def get_trigger(trigger_uuid): + try: + trigger = model.build.get_build_trigger(trigger_uuid) + except model.InvalidBuildTriggerException: + raise NotFound() + return trigger + @resource('/v1/repository//trigger/') @path_param('repository', 'The full path of the repository. e.g. namespace/name') class BuildTriggerList(RepositoryParamResource): @@ -62,23 +68,14 @@ class BuildTrigger(RepositoryParamResource): @nickname('getBuildTrigger') def get(self, namespace_name, repo_name, trigger_uuid): """ Get information for the specified build trigger. """ - try: - trigger = model.build.get_build_trigger(trigger_uuid) - except model.InvalidBuildTriggerException: - raise NotFound() - - return trigger_view(trigger, can_admin=True) + return trigger_view(get_trigger(trigger_uuid), can_admin=True) @require_repo_admin @disallow_for_app_repositories - @disallow_under_trust @nickname('deleteBuildTrigger') def delete(self, namespace_name, repo_name, trigger_uuid): """ Delete the specified build trigger. """ - try: - trigger = model.build.get_build_trigger(trigger_uuid) - except model.InvalidBuildTriggerException: - raise NotFound() + trigger = get_trigger(trigger_uuid) handler = BuildTriggerHandler.get_handler(trigger) if handler.is_active(): @@ -116,15 +113,11 @@ class BuildTriggerSubdirs(RepositoryParamResource): @require_repo_admin @disallow_for_app_repositories - @disallow_under_trust @nickname('listBuildTriggerSubdirs') @validate_json_request('BuildTriggerSubdirRequest') def post(self, namespace_name, repo_name, trigger_uuid): """ List the subdirectories available for the specified build trigger and source. """ - try: - trigger = model.build.get_build_trigger(trigger_uuid) - except model.InvalidBuildTriggerException: - raise NotFound() + trigger = get_trigger(trigger_uuid) user_permission = UserAdminPermission(trigger.connected_user.username) if user_permission.can(): @@ -184,16 +177,11 @@ class BuildTriggerActivate(RepositoryParamResource): @require_repo_admin @disallow_for_app_repositories - @disallow_under_trust @nickname('activateBuildTrigger') @validate_json_request('BuildTriggerActivateRequest') def post(self, namespace_name, repo_name, trigger_uuid): """ Activate the specified build trigger. """ - try: - trigger = model.build.get_build_trigger(trigger_uuid) - except model.InvalidBuildTriggerException: - raise NotFound() - + trigger = get_trigger(trigger_uuid) handler = BuildTriggerHandler.get_handler(trigger) if handler.is_active(): raise InvalidRequest('Trigger config is not sufficient for activation.') @@ -285,15 +273,11 @@ class BuildTriggerAnalyze(RepositoryParamResource): @require_repo_admin @disallow_for_app_repositories - @disallow_under_trust @nickname('analyzeBuildTrigger') @validate_json_request('BuildTriggerAnalyzeRequest') def post(self, namespace_name, repo_name, trigger_uuid): """ Analyze the specified build trigger configuration. """ - try: - trigger = model.build.get_build_trigger(trigger_uuid) - except model.InvalidBuildTriggerException: - raise NotFound() + trigger = get_trigger(trigger_uuid) if trigger.repository.namespace_user.username != namespace_name: raise NotFound() @@ -303,106 +287,14 @@ class BuildTriggerAnalyze(RepositoryParamResource): new_config_dict = request.get_json()['config'] handler = BuildTriggerHandler.get_handler(trigger, new_config_dict) - - def analyze_view(image_namespace, image_repository, status, message=None): - # Retrieve the list of robots and mark whether they have read access already. - robots = [] - if AdministerOrganizationPermission(image_namespace).can(): - if image_repository is not None: - perm_query = model.user.get_all_repo_users_transitive(image_namespace, image_repository) - user_ids_with_permission = set([user.id for user in perm_query]) - else: - user_ids_with_permission = set() - - def robot_view(robot): - return { - 'name': robot.username, - 'kind': 'user', - 'is_robot': True, - 'can_read': robot.id in user_ids_with_permission, - } - - robots = [robot_view(robot) for robot in model.user.list_namespace_robots(image_namespace)] - - return { - 'namespace': image_namespace, - 'name': image_repository, - 'robots': robots, - 'status': status, - 'message': message, - 'is_admin': AdministerOrganizationPermission(image_namespace).can(), - } - + server_hostname = app.config['SERVER_HOSTNAME'] try: - # Load the contents of the Dockerfile. - contents = handler.load_dockerfile_contents() - if not contents: - return { - 'status': 'warning', - 'message': 'Specified Dockerfile path for the trigger was not found on the main ' + - 'branch. This trigger may fail.', - } - - # Parse the contents of the Dockerfile. - parsed = parse_dockerfile(contents) - if not parsed: - return { - 'status': 'error', - 'message': 'Could not parse the Dockerfile specified' - } - - # Check whether the dockerfile_path is correct - if new_config_dict.get('context'): - if not is_parent(new_config_dict.get('context'), new_config_dict.get('dockerfile_path')): - return { - 'status': 'error', - 'message': 'Dockerfile, %s, is not child of the context, %s.' % - (new_config_dict.get('context'), new_config_dict.get('dockerfile_path')) - } - - # Default to the current namespace. - base_namespace = namespace_name - base_repository = None - - # Determine the base image (i.e. the FROM) for the Dockerfile. - base_image = parsed.get_base_image() - if not base_image: - return analyze_view(base_namespace, base_repository, 'warning', - message='No FROM line found in the Dockerfile') - - # Check to see if the base image lives in Quay. - quay_registry_prefix = '%s/' % (app.config['SERVER_HOSTNAME']) - if not base_image.startswith(quay_registry_prefix): - return analyze_view(base_namespace, base_repository, 'publicbase') - - # Lookup the repository in Quay. - result = str(base_image)[len(quay_registry_prefix):].split('/', 2) - if len(result) != 2: - msg = '"%s" is not a valid Quay repository path' % (base_image) - return analyze_view(base_namespace, base_repository, 'warning', message=msg) - - (base_namespace, base_repository) = result - found_repository = model.repository.get_repository(base_namespace, base_repository) - if not found_repository: - return { - 'status': 'error', - 'message': 'Repository "%s" referenced by the Dockerfile was not found' % (base_image) - } - - # If the repository is private and the user cannot see that repo, then - # mark it as not found. - can_read = ReadRepositoryPermission(base_namespace, base_repository) - if found_repository.visibility.name != 'public' and not can_read: - return { - 'status': 'error', - 'message': 'Repository "%s" referenced by the Dockerfile was not found' % (base_image) - } - - if found_repository.visibility.name == 'public': - return analyze_view(base_namespace, base_repository, 'publicbase') - else: - return analyze_view(base_namespace, base_repository, 'requiresrobot') - + trigger_analyzer = TriggerAnalyzer(handler, + namespace_name, + server_hostname, + new_config_dict, + AdministerOrganizationPermission(namespace_name).can()) + return trigger_analyzer.analyze_trigger() except RepositoryReadException as rre: return { 'status': 'error', @@ -413,30 +305,6 @@ class BuildTriggerAnalyze(RepositoryParamResource): 'status': 'notimplemented', } - raise NotFound() - - -def is_parent(context, dockerfile_path): - """ This checks whether the context is a parent of the dockerfile_path""" - if context == "" or dockerfile_path == "": - return False - - normalized_context = path.normpath(context) - if normalized_context[len(normalized_context) - 1] != path.sep: - normalized_context += path.sep - - if normalized_context[0] != path.sep: - normalized_context = path.sep + normalized_context - - normalized_subdir = path.normpath(path.dirname(dockerfile_path)) - if normalized_subdir[0] != path.sep: - normalized_subdir = path.sep + normalized_subdir - - if normalized_subdir[len(normalized_subdir) - 1] != path.sep: - normalized_subdir += path.sep - - return normalized_subdir.startswith(normalized_context) - @resource('/v1/repository//trigger//start') @path_param('repository', 'The full path of the repository. e.g. namespace/name') @@ -467,15 +335,11 @@ class ActivateBuildTrigger(RepositoryParamResource): @require_repo_admin @disallow_for_app_repositories - @disallow_under_trust @nickname('manuallyStartBuildTrigger') @validate_json_request('RunParameters') def post(self, namespace_name, repo_name, trigger_uuid): """ Manually start a build from the specified trigger. """ - try: - trigger = model.build.get_build_trigger(trigger_uuid) - except model.InvalidBuildTriggerException: - raise NotFound() + trigger = get_trigger(trigger_uuid) handler = BuildTriggerHandler.get_handler(trigger) if not handler.is_active(): @@ -532,14 +396,10 @@ class BuildTriggerFieldValues(RepositoryParamResource): @require_repo_admin @disallow_for_app_repositories - @disallow_under_trust @nickname('listTriggerFieldValues') def post(self, namespace_name, repo_name, trigger_uuid, field_name): """ List the field values for a custom run field. """ - try: - trigger = model.build.get_build_trigger(trigger_uuid) - except model.InvalidBuildTriggerException: - raise NotFound() + trigger = get_trigger(trigger_uuid) config = request.get_json() or None if AdministerRepositoryPermission(namespace_name, repo_name).can(): @@ -577,17 +437,13 @@ class BuildTriggerSources(RepositoryParamResource): @require_repo_admin @disallow_for_app_repositories - @disallow_under_trust @nickname('listTriggerBuildSources') @validate_json_request('BuildTriggerSourcesRequest') def post(self, namespace_name, repo_name, trigger_uuid): """ List the build sources for the trigger configuration thus far. """ namespace = request.get_json()['namespace'] - try: - trigger = model.build.get_build_trigger(trigger_uuid) - except model.InvalidBuildTriggerException: - raise NotFound() + trigger = get_trigger(trigger_uuid) user_permission = UserAdminPermission(trigger.connected_user.username) if user_permission.can(): @@ -612,14 +468,10 @@ class BuildTriggerSourceNamespaces(RepositoryParamResource): @require_repo_admin @disallow_for_app_repositories - @disallow_under_trust @nickname('listTriggerBuildSourceNamespaces') def get(self, namespace_name, repo_name, trigger_uuid): """ List the build sources for the trigger configuration thus far. """ - try: - trigger = model.build.get_build_trigger(trigger_uuid) - except model.InvalidBuildTriggerException: - raise NotFound() + trigger = get_trigger(trigger_uuid) user_permission = UserAdminPermission(trigger.connected_user.username) if user_permission.can(): diff --git a/endpoints/api/trigger_analyzer.py b/endpoints/api/trigger_analyzer.py new file mode 100644 index 000000000..2a29e502e --- /dev/null +++ b/endpoints/api/trigger_analyzer.py @@ -0,0 +1,122 @@ +from os import path + +from auth import permissions +from data import model +from util import dockerfileparse + + +def is_parent(context, dockerfile_path): + """ This checks whether the context is a parent of the dockerfile_path""" + if context == "" or dockerfile_path == "": + return False + + normalized_context = path.normpath(context) + if normalized_context[len(normalized_context) - 1] != path.sep: + normalized_context += path.sep + + if normalized_context[0] != path.sep: + normalized_context = path.sep + normalized_context + + normalized_subdir = path.normpath(path.dirname(dockerfile_path)) + if normalized_subdir[0] != path.sep: + normalized_subdir = path.sep + normalized_subdir + + if normalized_subdir[len(normalized_subdir) - 1] != path.sep: + normalized_subdir += path.sep + + return normalized_subdir.startswith(normalized_context) + + +class TriggerAnalyzer: + """ This analyzes triggers and returns the appropriate trigger and robot view to the frontend. """ + + def __init__(self, handler, namespace_name, server_hostname, new_config_dict, admin_org_permission): + self.handler = handler + self.namespace_name = namespace_name + self.server_hostname = server_hostname + self.new_config_dict = new_config_dict + self.admin_org_permission = admin_org_permission + + def analyze_trigger(self): + # Load the contents of the Dockerfile. + contents = self.handler.load_dockerfile_contents() + if not contents: + return self.analyze_view(self.namespace_name, None, 'warning', + message='Specified Dockerfile path for the trigger was not found on the main ' + + 'branch. This trigger may fail.') + + # Parse the contents of the Dockerfile. + parsed = dockerfileparse.parse_dockerfile(contents) + if not parsed: + return self.analyze_view(self.namespace_name, None, 'error', message='Could not parse the Dockerfile specified') + + # Check whether the dockerfile_path is correct + if self.new_config_dict.get('context') and not is_parent(self.new_config_dict.get('context'), + self.new_config_dict.get('dockerfile_path')): + return self.analyze_view(self.namespace_name, None, 'error', + message='Dockerfile, %s, is not a child of the context, %s.' % + (self.new_config_dict.get('context'), + self.new_config_dict.get('dockerfile_path'))) + + # Determine the base image (i.e. the FROM) for the Dockerfile. + base_image = parsed.get_base_image() + if not base_image: + return self.analyze_view(self.namespace_name, None, 'warning', message='No FROM line found in the Dockerfile') + + # Check to see if the base image lives in Quay. + quay_registry_prefix = '%s/' % self.server_hostname + if not base_image.startswith(quay_registry_prefix): + return self.analyze_view(self.namespace_name, None, 'publicbase') + + # Lookup the repository in Quay. + result = str(base_image)[len(quay_registry_prefix):].split('/', 2) + if len(result) != 2: + msg = '"%s" is not a valid Quay repository path' % base_image + return self.analyze_view(self.namespace_name, None, 'warning', message=msg) + + (base_namespace, base_repository) = result + found_repository = model.repository.get_repository(base_namespace, base_repository) + if not found_repository: + return self.analyze_view(self.namespace_name, None, 'error', + message='Repository "%s" referenced by the Dockerfile was not found' % base_image) + + # If the repository is private and the user cannot see that repo, then + # mark it as not found. + can_read = permissions.ReadRepositoryPermission(base_namespace, base_repository) + if found_repository.visibility.name != 'public' and not can_read: + return self.analyze_view(self.namespace_name, None, 'error', + message='Repository "%s" referenced by the Dockerfile was not found' % base_image) + + if found_repository.visibility.name == 'public': + return self.analyze_view(base_namespace, base_repository, 'publicbase') + + return self.analyze_view(base_namespace, base_repository, 'requiresrobot') + + def analyze_view(self, image_namespace, image_repository, status, message=None): + # Retrieve the list of robots and mark whether they have read access already. + robots = [] + if self.admin_org_permission: + if image_repository is not None: + perm_query = model.user.get_all_repo_users_transitive(image_namespace, image_repository) + user_ids_with_permission = set([user.id for user in perm_query]) + else: + user_ids_with_permission = set() + + def robot_view(robot): + return { + 'name': robot.username, + 'kind': 'user', + 'is_robot': True, + 'can_read': robot.id in user_ids_with_permission, + } + + robots = [robot_view(robot) for robot in model.user.list_namespace_robots(image_namespace)] + + return { + 'namespace': image_namespace, + 'name': image_repository, + 'robots': robots, + 'status': status, + 'message': message, + 'is_admin': self.admin_org_permission, + } diff --git a/endpoints/appr/registry.py b/endpoints/appr/registry.py index a77b72104..9f2a5d9fb 100644 --- a/endpoints/appr/registry.py +++ b/endpoints/appr/registry.py @@ -13,6 +13,7 @@ from flask import jsonify, request from auth.auth_context import get_authenticated_user from auth.decorators import process_auth from auth.permissions import (CreateRepositoryPermission, ModifyRepositoryPermission) +from data.interfaces.appr import oci_app_model as model from endpoints.appr import (appr_bp, require_app_repo_read, require_app_repo_write) from endpoints.appr.cnr_backend import Blob, Channel, Package, User from endpoints.appr.decorators import disallow_for_image_repository @@ -102,6 +103,8 @@ def list_packages(): def delete_package(namespace, package_name, release, media_type): reponame = repo_name(namespace, package_name) result = cnr_registry.delete_package(reponame, release, media_type, package_class=Package) + model.log_action('delete_tag', namespace, repo_name=package_name, + metadata={'release': release, 'mediatype': media_type}) return jsonify(result) @@ -136,7 +139,7 @@ def show_package_releases(namespace, package_name): @process_auth @require_app_repo_read @anon_protect -def show_package_releasse_manifests(namespace, package_name, release): +def show_package_release_manifests(namespace, package_name, release): reponame = repo_name(namespace, package_name) result = cnr_registry.show_package_manifests(reponame, release, package_class=Package) return jsonify(result) @@ -153,7 +156,10 @@ def pull(namespace, package_name, release, media_type): reponame = repo_name(namespace, package_name) logger.info("pull %s", reponame) data = cnr_registry.pull(reponame, release, media_type, Package, blob_class=Blob) - return _pull(data) + model.log_action('pull_repo', namespace, repo_name=package_name, + metadata={'release': release, 'mediatype': media_type}) + json_format = request.args.get('format', None) == 'json' + return _pull(data, json_format) @appr_bp.route("/api/v1/packages//", methods=['POST'], @@ -178,6 +184,7 @@ def push(namespace, package_name): {"package": reponame, "scopes": ['create']}) Package.create_repository(reponame, private, owner) + model.log_action('create_repo', namespace, repo_name=package_name) if not ModifyRepositoryPermission(namespace, package_name).can(): raise Forbidden("Unauthorized access for: %s" % reponame, @@ -194,6 +201,8 @@ def push(namespace, package_name): blob = Blob(reponame, values['blob']) app_release = cnr_registry.push(reponame, release_version, media_type, blob, force, package_class=Package, user=owner, visibility=private) + model.log_action('push_repo', namespace, repo_name=package_name, + metadata={'release': release_version}) return jsonify(app_release) @@ -246,6 +255,8 @@ def add_channel_release(namespace, package_name, channel_name, release): reponame = repo_name(namespace, package_name) result = cnr_registry.add_channel_release(reponame, channel_name, release, channel_class=Channel, package_class=Package) + model.log_action('create_tag', namespace, repo_name=package_name, + metadata={'channel': channel_name, 'release': release}) return jsonify(result) @@ -254,13 +265,13 @@ def _check_channel_name(channel_name, release=None): logger.debug('Found invalid channel name CNR add channel release: %s', channel_name) raise InvalidUsage("Found invalid channelname %s" % release, {'name': channel_name, - "release": release}) + 'release': release}) if release is not None and not TAG_REGEX.match(release): logger.debug('Found invalid release name CNR add channel release: %s', release) - raise InvalidUsage("Found invalid channel release name %s" % release, + raise InvalidUsage('Found invalid channel release name %s' % release, {'name': channel_name, - "release": release}) + 'release': release}) @appr_bp.route( @@ -275,6 +286,8 @@ def delete_channel_release(namespace, package_name, channel_name, release): reponame = repo_name(namespace, package_name) result = cnr_registry.delete_channel_release(reponame, channel_name, release, channel_class=Channel, package_class=Package) + model.log_action('delete_tag', namespace, repo_name=package_name, + metadata={'channel': channel_name, 'release': release}) return jsonify(result) @@ -289,4 +302,6 @@ def delete_channel(namespace, package_name, channel_name): _check_channel_name(channel_name) reponame = repo_name(namespace, package_name) result = cnr_registry.delete_channel(reponame, channel_name, channel_class=Channel) + model.log_action('delete_tag', namespace, repo_name=package_name, + metadata={'channel': channel_name}) return jsonify(result) diff --git a/endpoints/appr/test/test_api.py b/endpoints/appr/test/test_api.py index 853d7f77f..722587b87 100644 --- a/endpoints/appr/test/test_api.py +++ b/endpoints/appr/test/test_api.py @@ -91,6 +91,18 @@ class TestServerQuayDB(BaseTestServer): """ TODO: search cross namespace and package name """ BaseTestServer.test_search_package_match(self, db_with_data1, client) + def test_list_search_package_match(self, db_with_data1, client): + url = self._url_for("api/v1/packages") + res = self.Client(client, self.headers()).get(url, params={'query': 'rocketchat'}) + assert res.status_code == 200 + assert len(self.json(res)) == 1 + + def test_list_search_package_no_match(self, db_with_data1, client): + url = self._url_for("api/v1/packages") + res = self.Client(client, self.headers()).get(url, params={'query': 'toto'}) + assert res.status_code == 200 + assert len(self.json(res)) == 0 + @pytest.mark.xfail def test_push_package_already_exists_force(self, db_with_data1, package_b64blob, client): """ No force push implemented """ diff --git a/endpoints/appr/test/test_api_security.py b/endpoints/appr/test/test_api_security.py index 6c9c3384b..c3e52b30c 100644 --- a/endpoints/appr/test/test_api_security.py +++ b/endpoints/appr/test/test_api_security.py @@ -5,7 +5,7 @@ from flask import url_for from data import model from endpoints.appr.registry import appr_bp, blobs -from endpoints.api.test.shared import client_with_identity +from endpoints.test.shared import client_with_identity from test.fixtures import * BLOB_ARGS = {'digest': 'abcd1235'} @@ -35,10 +35,10 @@ CHANNEL_RELEASE_ARGS = {'channel_name': 'c', 'release': 'r'} ('appr.show_package_releases', 'GET', {}, 'devtable', True, 'public', 200), ('appr.show_package_releases', 'GET', {}, 'devtable', True, 'devtable', 200), - ('appr.show_package_releasse_manifests', 'GET', RELEASE_ARGS, 'devtable', False, 'public', 403), - ('appr.show_package_releasse_manifests', 'GET', RELEASE_ARGS, 'devtable', False, 'devtable', 200), - ('appr.show_package_releasse_manifests', 'GET', RELEASE_ARGS, 'devtable', True, 'public', 200), - ('appr.show_package_releasse_manifests', 'GET', RELEASE_ARGS, 'devtable', True, 'devtable', 200), + ('appr.show_package_release_manifests', 'GET', RELEASE_ARGS, 'devtable', False, 'public', 403), + ('appr.show_package_release_manifests', 'GET', RELEASE_ARGS, 'devtable', False, 'devtable', 200), + ('appr.show_package_release_manifests', 'GET', RELEASE_ARGS, 'devtable', True, 'public', 200), + ('appr.show_package_release_manifests', 'GET', RELEASE_ARGS, 'devtable', True, 'devtable', 200), ('appr.pull', 'GET', PACKAGE_ARGS, 'devtable', False, 'public', 403), ('appr.pull', 'GET', PACKAGE_ARGS, 'devtable', False, 'devtable', 404), diff --git a/endpoints/common.py b/endpoints/common.py index d6c895c37..a43ecbd74 100644 --- a/endpoints/common.py +++ b/endpoints/common.py @@ -13,7 +13,7 @@ from flask import make_response, render_template, request, abort, session from flask_login import login_user from flask_principal import identity_changed -import endpoints.decorated # Register the various exceptions via decorators. +import endpoints.decorated # Register the various exceptions via decorators. import features from app import app, oauth_apps, oauth_login, LoginWrappedDBUser, user_analytics, license_validator @@ -25,29 +25,13 @@ from util.names import parse_namespace_repository from util.secscan import PRIORITY_LEVELS from util.saas.useranalytics import build_error_callback from util.timedeltastring import convert_to_timedelta +from _init import STATIC_DIR, __version__ logger = logging.getLogger(__name__) route_data = None -CACHE_BUSTERS_JSON = 'static/dist/cachebusters.json' -CACHE_BUSTERS = None - - -def get_cache_busters(): - """ Retrieves the cache busters hashes. """ - global CACHE_BUSTERS - if CACHE_BUSTERS is not None: - return CACHE_BUSTERS - - if not os.path.exists(CACHE_BUSTERS_JSON): - return {} - - with open(CACHE_BUSTERS_JSON, 'r') as f: - CACHE_BUSTERS = json.loads(f.read()) - return CACHE_BUSTERS - def parse_repository_name(include_tag=False, ns_kwarg_name='namespace_name', @@ -150,36 +134,17 @@ def list_files(path, extension): # Remove the static/ prefix. It is added in the template. return os.path.join(dp, f)[len('static/'):] - filepath = 'static/' + path + filepath = os.path.join('static/', path) return [join_path(dp, f) for dp, dn, files in os.walk(filepath) for f in files if matches(f)] -@lru_cache(maxsize=1) -def _get_version_number(): - try: - with open('CHANGELOG.md') as f: - return re.search('(v[0-9]+\.[0-9]+\.[0-9]+)', f.readline()).group(0) - except IOError: - return '' def render_page_template(name, route_data=None, **kwargs): debugging = app.config.get('DEBUGGING', False) - if debugging: - # If DEBUGGING is enabled, then we load the full set of individual JS and CSS files - # from the file system. - library_styles = list_files('lib', 'css') - main_styles = list_files('css', 'css') - library_scripts = list_files('lib', 'js') - # Ensure Webpack bundle is first script on page - main_scripts = ['build/bundle.js'] + list_files('js', 'js') - file_lists = [library_styles, main_styles, library_scripts, main_scripts] - for file_list in file_lists: - file_list.sort() - else: - library_styles = [] - main_styles = ['dist/quay-frontend.css'] - library_scripts = [] - main_scripts = ['dist/quay-frontend.min.js'] + library_styles = [] + main_styles = [] + library_scripts = [] + main_scripts = list_files('build', 'js') use_cdn = app.config.get('USE_CDN', True) if request.args.get('use_cdn') is not None: @@ -192,12 +157,6 @@ def render_page_template(name, route_data=None, **kwargs): if features.BILLING: external_scripts.append('//checkout.stripe.com/checkout.js') - def add_cachebusters(filenames): - cachebusters = get_cache_busters() - for filename in filenames: - cache_buster = cachebusters.get(filename, random_string()) if not debugging else 'debugging' - yield (filename, cache_buster) - def get_external_login_config(): login_config = [] for login_service in oauth_login.services: @@ -223,16 +182,16 @@ def render_page_template(name, route_data=None, **kwargs): version_number = '' if not features.BILLING: - version_number = 'Quay %s' % _get_version_number() + version_number = 'Quay %s' % __version__ resp = make_response(render_template(name, route_data=route_data, external_styles=external_styles, external_scripts=external_scripts, - main_styles=add_cachebusters(main_styles), - library_styles=add_cachebusters(library_styles), - main_scripts=add_cachebusters(main_scripts), - library_scripts=add_cachebusters(library_scripts), + main_styles=main_styles, + library_styles=library_styles, + main_scripts=main_scripts, + library_scripts=library_scripts, feature_set=features.get_features(), config_set=frontend_visible_config(app.config), oauth_set=get_oauth_config(), @@ -261,4 +220,3 @@ def render_page_template(name, route_data=None, **kwargs): resp.headers['X-FRAME-OPTIONS'] = 'DENY' return resp - diff --git a/endpoints/notificationmethod.py b/endpoints/notificationmethod.py index b315be9b6..06efa2109 100644 --- a/endpoints/notificationmethod.py +++ b/endpoints/notificationmethod.py @@ -1,9 +1,8 @@ -import logging import json +import logging import re import requests - from flask_mail import Message from app import mail, app, OVERRIDE_CONFIG_DIRECTORY @@ -11,10 +10,9 @@ from data import model from util.config.validator import SSL_FILENAMES from workers.queueworker import JobException - logger = logging.getLogger(__name__) -METHOD_TIMEOUT = app.config.get('NOTIFICATION_SEND_TIMEOUT', 10) # Seconds +METHOD_TIMEOUT = app.config.get('NOTIFICATION_SEND_TIMEOUT', 10) # Seconds class InvalidNotificationMethodException(Exception): @@ -53,6 +51,7 @@ class NotificationMethod(object): """ raise NotImplementedError + def perform(self, notification_obj, event_handler, notification_data): """ Performs the notification method. @@ -80,7 +79,7 @@ class QuayNotificationMethod(NotificationMethod): def validate(self, repository, config_data): status, err_message, target_users = self.find_targets(repository, config_data) if err_message: - raise CannotValidateNotificationMethodException(err_message) + raise CannotValidateNotificationMethodException(err_message) def find_targets(self, repository, config_data): target_info = config_data['target'] @@ -116,7 +115,6 @@ class QuayNotificationMethod(NotificationMethod): # Lookup the team's members return (True, None, model.organization.get_organization_team_members(org_team.id)) - def perform(self, notification_obj, event_handler, notification_data): repository = notification_obj.repository if not repository: @@ -152,24 +150,22 @@ class EmailMethod(NotificationMethod): 'is not authorized to receive ' 'notifications for this repository') - def perform(self, notification_obj, event_handler, notification_data): config_data = json.loads(notification_obj.config_json) email = config_data.get('email', '') if not email: return - msg = Message(event_handler.get_summary(notification_data['event_data'], notification_data), - sender='support@quay.io', - recipients=[email]) - msg.html = event_handler.get_message(notification_data['event_data'], notification_data) + with app.app_context(): + msg = Message(event_handler.get_summary(notification_data['event_data'], notification_data), + recipients=[email]) + msg.html = event_handler.get_message(notification_data['event_data'], notification_data) - try: - with app.app_context(): + try: mail.send(msg) - except Exception as ex: - logger.exception('Email was unable to be sent: %s' % ex.message) - raise NotificationMethodPerformException(ex.message) + except Exception as ex: + logger.exception('Email was unable to be sent: %s' % ex.message) + raise NotificationMethodPerformException(ex.message) class WebhookMethod(NotificationMethod): @@ -194,7 +190,7 @@ class WebhookMethod(NotificationMethod): try: resp = requests.post(url, data=json.dumps(payload), headers=headers, cert=SSLClientCert, timeout=METHOD_TIMEOUT) - if resp.status_code/100 != 2: + if resp.status_code / 100 != 2: error_message = '%s response for webhook to url: %s' % (resp.status_code, url) logger.error(error_message) logger.error(resp.content) @@ -209,6 +205,7 @@ class FlowdockMethod(NotificationMethod): """ Method for sending notifications to Flowdock via the Team Inbox API: https://www.flowdock.com/api/team-inbox """ + @classmethod def method_name(cls): return 'flowdock' @@ -233,7 +230,7 @@ class FlowdockMethod(NotificationMethod): headers = {'Content-type': 'application/json'} payload = { 'source': 'Quay', - 'from_address': 'support@quay.io', + 'from_address': 'support@quay.io', 'subject': event_handler.get_summary(notification_data['event_data'], notification_data), 'content': event_handler.get_message(notification_data['event_data'], notification_data), 'from_name': owner.username, @@ -245,7 +242,7 @@ class FlowdockMethod(NotificationMethod): try: resp = requests.post(url, data=json.dumps(payload), headers=headers, timeout=METHOD_TIMEOUT) - if resp.status_code/100 != 2: + if resp.status_code / 100 != 2: error_message = '%s response for flowdock to url: %s' % (resp.status_code, url) logger.error(error_message) logger.error(resp.content) @@ -260,6 +257,7 @@ class HipchatMethod(NotificationMethod): """ Method for sending notifications to Hipchat via the API: https://www.hipchat.com/docs/apiv2/method/send_room_notification """ + @classmethod def method_name(cls): return 'hipchat' @@ -306,7 +304,7 @@ class HipchatMethod(NotificationMethod): try: resp = requests.post(url, data=json.dumps(payload), headers=headers, timeout=METHOD_TIMEOUT) - if resp.status_code/100 != 2: + if resp.status_code / 100 != 2: error_message = '%s response for hipchat to url: %s' % (resp.status_code, url) logger.error(error_message) logger.error(resp.content) @@ -319,6 +317,7 @@ class HipchatMethod(NotificationMethod): from HTMLParser import HTMLParser + class SlackAdjuster(HTMLParser): def __init__(self): self.reset() @@ -336,7 +335,7 @@ class SlackAdjuster(HTMLParser): def handle_starttag(self, tag, attrs): if tag == 'a': - self.result.append('<%s|' % (self.get_attr(attrs, 'href'), )) + self.result.append('<%s|' % (self.get_attr(attrs, 'href'),)) if tag == 'i': self.result.append('_') @@ -360,6 +359,7 @@ class SlackAdjuster(HTMLParser): def get_data(self): return ''.join(self.result) + def adjust_tags(html): s = SlackAdjuster() s.feed(html) @@ -424,7 +424,7 @@ class SlackMethod(NotificationMethod): try: resp = requests.post(url, data=json.dumps(payload), headers=headers, timeout=METHOD_TIMEOUT) - if resp.status_code/100 != 2: + if resp.status_code / 100 != 2: error_message = '%s response for Slack to url: %s' % (resp.status_code, url) logger.error(error_message) logger.error(resp.content) diff --git a/endpoints/test/__init__.py b/endpoints/test/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/endpoints/test/shared.py b/endpoints/test/shared.py new file mode 100644 index 000000000..abb22ded9 --- /dev/null +++ b/endpoints/test/shared.py @@ -0,0 +1,68 @@ +import datetime +import json +import base64 + +from contextlib import contextmanager +from data import model + +from flask import g +from flask_principal import Identity + +CSRF_TOKEN_KEY = '_csrf_token' +CSRF_TOKEN = '123csrfforme' + +@contextmanager +def client_with_identity(auth_username, client): + with client.session_transaction() as sess: + if auth_username and auth_username is not None: + loaded = model.user.get_user(auth_username) + sess['user_id'] = loaded.uuid + sess['login_time'] = datetime.datetime.now() + sess[CSRF_TOKEN_KEY] = CSRF_TOKEN + else: + sess['user_id'] = 'anonymous' + + yield client + + with client.session_transaction() as sess: + sess['user_id'] = None + sess['login_time'] = None + sess[CSRF_TOKEN_KEY] = None + + +def add_csrf_param(params): + """ Returns a params dict with the CSRF parameter added. """ + params = params or {} + + if not CSRF_TOKEN_KEY in params: + params[CSRF_TOKEN_KEY] = CSRF_TOKEN + + return params + + +def gen_basic_auth(username, password): + """ Generates a basic auth header. """ + return 'Basic ' + base64.b64encode("%s:%s" % (username, password)) + + +def conduct_call(client, resource, url_for, method, params, body=None, expected_code=200, + headers=None): + """ Conducts a call to a Flask endpoint. """ + params = add_csrf_param(params) + + final_url = url_for(resource, **params) + + headers = headers or {} + headers.update({"Content-Type": "application/json"}) + + if body is not None: + body = json.dumps(body) + + # Required for anonymous calls to not exception. + g.identity = Identity(None, 'none') + + rv = client.open(final_url, method=method, data=body, headers=headers) + msg = '%s %s: got %s expected: %s | %s' % (method, final_url, rv.status_code, expected_code, + rv.data) + assert rv.status_code == expected_code, msg + return rv diff --git a/endpoints/v1/__init__.py b/endpoints/v1/__init__.py index 18ef430c4..472f0686b 100644 --- a/endpoints/v1/__init__.py +++ b/endpoints/v1/__init__.py @@ -4,7 +4,6 @@ from app import metric_queue, license_validator from endpoints.decorators import anon_protect, anon_allowed from util.metrics.metricqueue import time_blueprint - v1_bp = Blueprint('v1', __name__) license_validator.enforce_license_before_request(v1_bp) time_blueprint(v1_bp, metric_queue) @@ -28,6 +27,7 @@ def ping(): return response -from endpoints.v1 import index -from endpoints.v1 import registry -from endpoints.v1 import tag +from endpoints.v1 import ( + index, + registry, + tag,) diff --git a/endpoints/v1/index.py b/endpoints/v1/index.py index f579e5f15..b12c23dd5 100644 --- a/endpoints/v1/index.py +++ b/endpoints/v1/index.py @@ -6,22 +6,21 @@ from functools import wraps from flask import request, make_response, jsonify, session -from data.interfaces.v1 import pre_oci_model as model from app import authentication, userevents, metric_queue from auth.auth_context import get_authenticated_user, get_validated_token, get_validated_oauth_token from auth.decorators import process_auth -from auth.permissions import (ModifyRepositoryPermission, UserAdminPermission, - ReadRepositoryPermission, CreateRepositoryPermission, - repository_read_grant, repository_write_grant) +from auth.permissions import ( + ModifyRepositoryPermission, UserAdminPermission, ReadRepositoryPermission, + CreateRepositoryPermission, repository_read_grant, repository_write_grant) from auth.signedgrant import generate_signed_token +from endpoints.common import parse_repository_name +from endpoints.decorators import anon_protect, anon_allowed +from endpoints.notificationhelper import spawn_notification +from endpoints.v1 import v1_bp +from endpoints.v1.models_pre_oci import pre_oci_model as model +from util.audit import track_and_log from util.http import abort from util.names import REPOSITORY_NAME_REGEX -from endpoints.common import parse_repository_name -from endpoints.v1 import v1_bp -from endpoints.trackhelper import track_and_log -from endpoints.notificationhelper import spawn_notification -from endpoints.decorators import anon_protect, anon_allowed - logger = logging.getLogger(__name__) @@ -66,7 +65,9 @@ def generate_headers(scope=GrantType.READ_REPOSITORY, add_grant_for_status=None) response.headers['X-Docker-Token'] = signature return response + return wrapper + return decorator_method @@ -122,18 +123,15 @@ def get_user(): if get_validated_oauth_token(): return jsonify({ 'username': '$oauthtoken', - 'email': None, - }) + 'email': None,}) elif get_authenticated_user(): return jsonify({ 'username': get_authenticated_user().username, - 'email': get_authenticated_user().email, - }) + 'email': get_authenticated_user().email,}) elif get_validated_token(): return jsonify({ 'username': '$token', - 'email': None, - }) + 'email': None,}) abort(404) @@ -151,8 +149,7 @@ def update_user(username): return jsonify({ 'username': get_authenticated_user().username, - 'email': get_authenticated_user().email - }) + 'email': get_authenticated_user().email}) abort(403) @@ -181,8 +178,7 @@ def create_repository(namespace_name, repo_name): if not modify_perm.can(): abort(403, message='You do not have permission to modify repository %(namespace)s/%(repository)s', - issue='no-repo-write-permission', - namespace=namespace_name, repository=repo_name) + issue='no-repo-write-permission', namespace=namespace_name, repository=repo_name) elif repo.kind != 'image': msg = 'This repository is for managing %s resources and not container images.' % repo.kind abort(405, message=msg, namespace=namespace_name) @@ -205,8 +201,7 @@ def create_repository(namespace_name, repo_name): user_event_data = { 'action': 'push_start', 'repository': repo_name, - 'namespace': namespace_name, - } + 'namespace': namespace_name,} event = userevents.get_event(get_authenticated_user().username) event.publish_event_data('docker-cli', user_event_data) @@ -237,8 +232,7 @@ def update_images(namespace_name, repo_name): updated_tags = session.get('pushed_tags', {}) event_data = { - 'updated_tags': updated_tags, - } + 'updated_tags': updated_tags,} track_and_log('push_repo', repo) spawn_notification(repo, 'repo_push', event_data) @@ -329,7 +323,7 @@ def _conduct_repo_search(username, query, limit=25, page=1): offset = (page - 1) * limit if query: - matching_repos = model.get_sorted_matching_repositories(query, username, limit=limit+1, + matching_repos = model.get_sorted_matching_repositories(query, username, limit=limit + 1, offset=offset) else: matching_repos = [] @@ -340,8 +334,7 @@ def _conduct_repo_search(username, query, limit=25, page=1): 'name': repo.namespace_name + '/' + repo.name, 'description': repo.description, 'is_public': repo.is_public, - 'href': '/repository/' + repo.namespace_name + '/' + repo.name - }) + 'href': '/repository/' + repo.namespace_name + '/' + repo.name}) # Defined: https://docs.docker.com/v1.6/reference/api/registry_api/ return { @@ -350,5 +343,4 @@ def _conduct_repo_search(username, query, limit=25, page=1): 'num_pages': page + 1 if len(matching_repos) > limit else page, 'page': page, 'page_size': limit, - 'results': results, - } + 'results': results,} diff --git a/endpoints/v1/models_interface.py b/endpoints/v1/models_interface.py new file mode 100644 index 000000000..93aed61a6 --- /dev/null +++ b/endpoints/v1/models_interface.py @@ -0,0 +1,221 @@ +from abc import ABCMeta, abstractmethod +from collections import namedtuple + +from six import add_metaclass + + +class Repository( + namedtuple('Repository', ['id', 'name', 'namespace_name', 'description', 'is_public', + 'kind'])): + """ + Repository represents a namespaced collection of tags. + :type id: int + :type name: string + :type namespace_name: string + :type description: string + :type is_public: bool + :type kind: string + """ + + +@add_metaclass(ABCMeta) +class DockerRegistryV1DataInterface(object): + """ + Interface that represents all data store interactions required by a Docker Registry v1. + """ + + @abstractmethod + def placement_locations_and_path_docker_v1(self, namespace_name, repo_name, image_id): + """ + Returns all the placements for the image with the given V1 Docker ID, found under the given + repository or None if no image was found. + """ + pass + + @abstractmethod + def docker_v1_metadata(self, namespace_name, repo_name, image_id): + """ + Returns various pieces of metadata associated with an image with the given V1 Docker ID, + including the checksum and its V1 JSON metadata. + """ + pass + + @abstractmethod + def update_docker_v1_metadata(self, namespace_name, repo_name, image_id, created_date_str, + comment, command, compat_json, parent_image_id=None): + """ + Updates various pieces of V1 metadata associated with a particular image. + """ + pass + + @abstractmethod + def storage_exists(self, namespace_name, repo_name, image_id): + """ + Returns whether storage already exists for the image with the V1 Docker ID under the given + repository. + """ + pass + + @abstractmethod + def store_docker_v1_checksums(self, namespace_name, repo_name, image_id, checksum, + content_checksum): + """ + Stores the various V1 checksums for the image with the V1 Docker ID. + """ + pass + + @abstractmethod + def is_image_uploading(self, namespace_name, repo_name, image_id): + """ + Returns whether the image with the V1 Docker ID is currently marked as uploading. + """ + pass + + @abstractmethod + def update_image_uploading(self, namespace_name, repo_name, image_id, is_uploading): + """ + Marks the image with the V1 Docker ID with the given uploading status. + """ + pass + + @abstractmethod + def update_image_sizes(self, namespace_name, repo_name, image_id, size, uncompressed_size): + """ + Updates the sizing information for the image with the given V1 Docker ID. + """ + pass + + @abstractmethod + def get_image_size(self, namespace_name, repo_name, image_id): + """ + Returns the wire size of the image with the given Docker V1 ID. + """ + pass + + @abstractmethod + def create_bittorrent_pieces(self, namespace_name, repo_name, image_id, pieces_bytes): + """ + Saves the BitTorrent piece hashes for the image with the given Docker V1 ID. + """ + pass + + @abstractmethod + def image_ancestry(self, namespace_name, repo_name, image_id): + """ + Returns a list containing the full ancestry of Docker V1 IDs, in order, for the image with the + given Docker V1 ID. + """ + pass + + @abstractmethod + def repository_exists(self, namespace_name, repo_name): + """ + Returns whether the repository with the given name and namespace exists. + """ + pass + + @abstractmethod + def create_or_link_image(self, username, namespace_name, repo_name, image_id, storage_location): + """ + Adds the given image to the given repository, by either linking to an existing image visible to + the user with the given username, or creating a new one if no existing image matches. + """ + pass + + @abstractmethod + def create_temp_hidden_tag(self, namespace_name, repo_name, image_id, expiration): + """ + Creates a hidden tag under the matching namespace pointing to the image with the given V1 Docker + ID. + """ + pass + + @abstractmethod + def list_tags(self, namespace_name, repo_name): + """ + Returns all the tags defined in the repository with the given namespace and name. + """ + pass + + @abstractmethod + def create_or_update_tag(self, namespace_name, repo_name, image_id, tag_name): + """ + Creates or updates a tag under the matching repository to point to the image with the given + Docker V1 ID. + """ + pass + + @abstractmethod + def find_image_id_by_tag(self, namespace_name, repo_name, tag_name): + """ + Returns the Docker V1 image ID for the HEAD image for the tag with the given name under the + matching repository, or None if none. + """ + pass + + @abstractmethod + def delete_tag(self, namespace_name, repo_name, tag_name): + """ + Deletes the given tag from the given repository. + """ + pass + + @abstractmethod + def load_token(self, token): + """ + Loads the data associated with the given (deprecated) access token, and, if + found returns True. + """ + pass + + @abstractmethod + def verify_robot(self, username, token): + """ + Returns True if the given robot username and token match an existing robot + account. + """ + pass + + @abstractmethod + def change_user_password(self, user, new_password): + """ + Changes the password associated with the given user. + """ + pass + + @abstractmethod + def get_repository(self, namespace_name, repo_name): + """ + Returns the repository with the given name under the given namespace or None + if none. + """ + pass + + @abstractmethod + def create_repository(self, namespace_name, repo_name, user=None): + """ + Creates a new repository under the given namespace with the given name, for + the given user. + """ + pass + + @abstractmethod + def repository_is_public(self, namespace_name, repo_name): + """ + Returns whether the repository with the given name under the given namespace + is public. If no matching repository was found, returns False. + """ + pass + + @abstractmethod + def validate_oauth_token(self, token): + """ Returns whether the given OAuth token validates. """ + pass + + @abstractmethod + def get_sorted_matching_repositories(self, search_term, filter_username=None, offset=0, + limit=25): + """ + Returns a sorted list of repositories matching the given search term. + """ + pass diff --git a/data/interfaces/v1.py b/endpoints/v1/models_pre_oci.py similarity index 52% rename from data/interfaces/v1.py rename to endpoints/v1/models_pre_oci.py index abc3fb858..2de743e70 100644 --- a/data/interfaces/v1.py +++ b/endpoints/v1/models_pre_oci.py @@ -1,234 +1,15 @@ -from abc import ABCMeta, abstractmethod -from collections import namedtuple - -from six import add_metaclass - from app import app, storage as store from data import model -from data.model import db_transaction +from endpoints.v1.models_interface import DockerRegistryV1DataInterface, Repository from util.morecollections import AttrDict -class Repository(namedtuple('Repository', ['id', 'name', 'namespace_name', 'description', - 'is_public', 'kind'])): - """ - Repository represents a namespaced collection of tags. - :type id: int - :type name: string - :type namespace_name: string - :type description: string - :type is_public: bool - :type kind: string - """ - - -@add_metaclass(ABCMeta) -class DockerRegistryV1DataInterface(object): - """ - Interface that represents all data store interactions required by a Docker Registry v1. - """ - - @abstractmethod - def placement_locations_and_path_docker_v1(self, namespace_name, repo_name, image_id): - """ - Returns all the placements for the image with the given V1 Docker ID, found under the given - repository or None if no image was found. - """ - pass - - @abstractmethod - def docker_v1_metadata(self, namespace_name, repo_name, image_id): - """ - Returns various pieces of metadata associated with an image with the given V1 Docker ID, - including the checksum and its V1 JSON metadata. - """ - pass - - @abstractmethod - def update_docker_v1_metadata(self, namespace_name, repo_name, image_id, created_date_str, - comment, command, compat_json, parent_image_id=None): - """ - Updates various pieces of V1 metadata associated with a particular image. - """ - pass - - @abstractmethod - def storage_exists(self, namespace_name, repo_name, image_id): - """ - Returns whether storage already exists for the image with the V1 Docker ID under the given - repository. - """ - pass - - @abstractmethod - def store_docker_v1_checksums(self, namespace_name, repo_name, image_id, checksum, - content_checksum): - """ - Stores the various V1 checksums for the image with the V1 Docker ID. - """ - pass - - @abstractmethod - def is_image_uploading(self, namespace_name, repo_name, image_id): - """ - Returns whether the image with the V1 Docker ID is currently marked as uploading. - """ - pass - - @abstractmethod - def update_image_uploading(self, namespace_name, repo_name, image_id, is_uploading): - """ - Marks the image with the V1 Docker ID with the given uploading status. - """ - pass - - @abstractmethod - def update_image_sizes(self, namespace_name, repo_name, image_id, size, uncompressed_size): - """ - Updates the sizing information for the image with the given V1 Docker ID. - """ - pass - - @abstractmethod - def get_image_size(self, namespace_name, repo_name, image_id): - """ - Returns the wire size of the image with the given Docker V1 ID. - """ - pass - - @abstractmethod - def create_bittorrent_pieces(self, namespace_name, repo_name, image_id, pieces_bytes): - """ - Saves the BitTorrent piece hashes for the image with the given Docker V1 ID. - """ - pass - - @abstractmethod - def image_ancestry(self, namespace_name, repo_name, image_id): - """ - Returns a list containing the full ancestry of Docker V1 IDs, in order, for the image with the - given Docker V1 ID. - """ - pass - - @abstractmethod - def repository_exists(self, namespace_name, repo_name): - """ - Returns whether the repository with the given name and namespace exists. - """ - pass - - @abstractmethod - def create_or_link_image(self, username, namespace_name, repo_name, image_id, storage_location): - """ - Adds the given image to the given repository, by either linking to an existing image visible to - the user with the given username, or creating a new one if no existing image matches. - """ - pass - - @abstractmethod - def create_temp_hidden_tag(self, namespace_name, repo_name, image_id, expiration): - """ - Creates a hidden tag under the matching namespace pointing to the image with the given V1 Docker - ID. - """ - pass - - @abstractmethod - def list_tags(self, namespace_name, repo_name): - """ - Returns all the tags defined in the repository with the given namespace and name. - """ - pass - - @abstractmethod - def create_or_update_tag(self, namespace_name, repo_name, image_id, tag_name): - """ - Creates or updates a tag under the matching repository to point to the image with the given - Docker V1 ID. - """ - pass - - @abstractmethod - def find_image_id_by_tag(self, namespace_name, repo_name, tag_name): - """ - Returns the Docker V1 image ID for the HEAD image for the tag with the given name under the - matching repository, or None if none. - """ - pass - - @abstractmethod - def delete_tag(self, namespace_name, repo_name, tag_name): - """ - Deletes the given tag from the given repository. - """ - pass - - @abstractmethod - def load_token(self, token): - """ - Loads the data associated with the given (deprecated) access token, and, if - found returns True. - """ - pass - - @abstractmethod - def verify_robot(self, username, token): - """ - Returns True if the given robot username and token match an existing robot - account. - """ - pass - - @abstractmethod - def change_user_password(self, user, new_password): - """ - Changes the password associated with the given user. - """ - pass - - @abstractmethod - def get_repository(self, namespace_name, repo_name): - """ - Returns the repository with the given name under the given namespace or None - if none. - """ - pass - - @abstractmethod - def create_repository(self, namespace_name, repo_name, user=None): - """ - Creates a new repository under the given namespace with the given name, for - the given user. - """ - pass - - @abstractmethod - def repository_is_public(self, namespace_name, repo_name): - """ - Returns whether the repository with the given name under the given namespace - is public. If no matching repository was found, returns False. - """ - pass - - @abstractmethod - def validate_oauth_token(self, token): - """ Returns whether the given OAuth token validates. """ - pass - - @abstractmethod - def get_sorted_matching_repositories(self, search_term, filter_username=None, offset=0, limit=25): - """ - Returns a sorted list of repositories matching the given search term. - """ - pass - - class PreOCIModel(DockerRegistryV1DataInterface): """ PreOCIModel implements the data model for the v1 Docker Registry protocol using a database schema before it was changed to support the OCI specification. """ + def placement_locations_and_path_docker_v1(self, namespace_name, repo_name, image_id): repo_image = model.image.get_repo_image_extended(namespace_name, repo_name, image_id) if not repo_image or repo_image.storage is None: @@ -245,8 +26,7 @@ class PreOCIModel(DockerRegistryV1DataInterface): 'repo_name': repo_name, 'image_id': image_id, 'checksum': repo_image.v1_checksum, - 'compat_json': repo_image.v1_json_metadata, - }) + 'compat_json': repo_image.v1_json_metadata,}) def update_docker_v1_metadata(self, namespace_name, repo_name, image_id, created_date_str, comment, command, compat_json, parent_image_id=None): @@ -274,7 +54,7 @@ class PreOCIModel(DockerRegistryV1DataInterface): if repo_image is None or repo_image.storage is None: return - with db_transaction(): + with model.db_transaction(): repo_image.storage.content_checksum = content_checksum repo_image.v1_checksum = checksum repo_image.storage.save() @@ -388,10 +168,10 @@ class PreOCIModel(DockerRegistryV1DataInterface): def validate_oauth_token(self, token): return bool(model.oauth.validate_access_token(token)) - def get_sorted_matching_repositories(self, search_term, filter_username=None, offset=0, limit=25): - repos = model.repository.get_filtered_matching_repositories(search_term, - filter_username=filter_username, - offset=offset, limit=limit) + def get_sorted_matching_repositories(self, search_term, filter_username=None, offset=0, + limit=25): + repos = model.repository.get_filtered_matching_repositories( + search_term, filter_username=filter_username, offset=offset, limit=limit) return [_repository_for_repo(repo) for repo in repos] @@ -403,8 +183,7 @@ def _repository_for_repo(repo): namespace_name=repo.namespace_user.username, description=repo.description, is_public=model.repository.is_repository_public(repo), - kind=model.repository.get_repo_kind_name(repo), - ) + kind=model.repository.get_repo_kind_name(repo),) pre_oci_model = PreOCIModel() diff --git a/endpoints/v1/registry.py b/endpoints/v1/registry.py index 845fd464a..bbe28fe77 100644 --- a/endpoints/v1/registry.py +++ b/endpoints/v1/registry.py @@ -10,13 +10,12 @@ from flask import make_response, request, session, Response, redirect, abort as from app import storage as store, app, metric_queue from auth.auth_context import get_authenticated_user from auth.decorators import extract_namespace_repo_from_session, process_auth -from auth.permissions import (ReadRepositoryPermission, - ModifyRepositoryPermission) +from auth.permissions import (ReadRepositoryPermission, ModifyRepositoryPermission) from auth.registry_jwt_auth import get_granted_username from data import model, database -from data.interfaces.v1 import pre_oci_model as model from digest import checksums from endpoints.v1 import v1_bp +from endpoints.v1.models_pre_oci import pre_oci_model as model from endpoints.decorators import anon_protect from util.http import abort, exact_abort from util.registry.filelike import SocketReader @@ -24,7 +23,6 @@ from util.registry import gzipstream from util.registry.replication import queue_storage_replication from util.registry.torrent import PieceHasher - logger = logging.getLogger(__name__) @@ -38,18 +36,21 @@ def _finish_image(namespace, repository, image_id): def require_completion(f): """This make sure that the image push correctly finished.""" + @wraps(f) def wrapper(namespace, repository, *args, **kwargs): image_id = kwargs['image_id'] if model.is_image_uploading(namespace, repository, image_id): - abort(400, 'Image %(image_id)s is being uploaded, retry later', - issue='upload-in-progress', image_id=image_id) + abort(400, 'Image %(image_id)s is being uploaded, retry later', issue='upload-in-progress', + image_id=image_id) return f(namespace, repository, *args, **kwargs) + return wrapper def set_cache_headers(f): """Returns HTTP headers suitable for caching.""" + @wraps(f) def wrapper(*args, **kwargs): # Set TTL to 1 year by default @@ -59,8 +60,7 @@ def set_cache_headers(f): headers = { 'Cache-Control': 'public, max-age={0}'.format(ttl), 'Expires': expires, - 'Last-Modified': 'Thu, 01 Jan 1970 00:00:00 GMT', - } + 'Last-Modified': 'Thu, 01 Jan 1970 00:00:00 GMT',} if 'If-Modified-Since' in request.headers: response = make_response('Not modified', 304) response.headers.extend(headers) @@ -69,6 +69,7 @@ def set_cache_headers(f): # Prevent the Cookie to be sent when the object is cacheable session.modified = False return f(*args, **kwargs) + return wrapper @@ -92,8 +93,7 @@ def head_image_layer(namespace, repository, image_id, headers): locations, _ = model.placement_locations_and_path_docker_v1(namespace, repository, image_id) if locations is None: logger.debug('Could not find any blob placement locations') - abort(404, 'Image %(image_id)s not found', issue='unknown-image', - image_id=image_id) + abort(404, 'Image %(image_id)s not found', issue='unknown-image', image_id=image_id) # Add the Accept-Ranges header if the storage engine supports resumable # downloads. @@ -129,8 +129,7 @@ def get_image_layer(namespace, repository, image_id, headers): logger.debug('Looking up placement locations and path') locations, path = model.placement_locations_and_path_docker_v1(namespace, repository, image_id) if not locations or not path: - abort(404, 'Image %(image_id)s not found', issue='unknown-image', - image_id=image_id) + abort(404, 'Image %(image_id)s not found', issue='unknown-image', image_id=image_id) try: logger.debug('Looking up the direct download URL for path: %s', path) direct_download_url = store.get_direct_download_url(locations, path) @@ -145,8 +144,7 @@ def get_image_layer(namespace, repository, image_id, headers): return Response(store.stream_read(locations, path), headers=headers) except (IOError, AttributeError): logger.exception('Image layer data not found') - abort(404, 'Image %(image_id)s not found', issue='unknown-image', - image_id=image_id) + abort(404, 'Image %(image_id)s not found', issue='unknown-image', image_id=image_id) abort(403) @@ -214,10 +212,8 @@ def put_image_layer(namespace, repository, image_id): try: start_time = time() store.stream_write(locations, path, sr) - metric_queue.chunk_size.Observe(size_info.compressed_size, - labelvalues=[list(locations)[0]]) - metric_queue.chunk_upload_time.Observe(time() - start_time, - labelvalues=[list(locations)[0]]) + metric_queue.chunk_size.Observe(size_info.compressed_size, labelvalues=[list(locations)[0]]) + metric_queue.chunk_upload_time.Observe(time() - start_time, labelvalues=[list(locations)[0]]) except IOError: logger.exception('Exception when writing image data') abort(520, 'Image %(image_id)s could not be written. Please try again.', image_id=image_id) @@ -227,7 +223,8 @@ def put_image_layer(namespace, repository, image_id): size_info.uncompressed_size) # Save the BitTorrent pieces. - model.create_bittorrent_pieces(namespace, repository, image_id, piece_hasher.final_piece_hashes()) + model.create_bittorrent_pieces(namespace, repository, image_id, + piece_hasher.final_piece_hashes()) # Append the computed checksum. csums = [] @@ -307,8 +304,8 @@ def put_image_checksum(namespace, repository, image_id): logger.debug('Marking image path') if not model.is_image_uploading(namespace, repository, image_id): - abort(409, 'Cannot set checksum for image %(image_id)s', - issue='image-write-error', image_id=image_id) + abort(409, 'Cannot set checksum for image %(image_id)s', issue='image-write-error', + image_id=image_id) logger.debug('Storing image and content checksums') @@ -323,8 +320,8 @@ def put_image_checksum(namespace, repository, image_id): logger.debug('session checksums: %s', session.get('checksum', [])) logger.debug('client supplied checksum: %s', checksum) logger.debug('put_image_checksum: Wrong checksum') - abort(400, 'Checksum mismatch for image: %(image_id)s', - issue='checksum-mismatch', image_id=image_id) + abort(400, 'Checksum mismatch for image: %(image_id)s', issue='checksum-mismatch', + image_id=image_id) # Mark the image as uploaded. _finish_image(namespace, repository, image_id) @@ -416,16 +413,16 @@ def put_image_json(namespace, repository, image_id): pass if not data or not isinstance(data, dict): - abort(400, 'Invalid JSON for image: %(image_id)s\nJSON: %(json)s', - issue='invalid-request', image_id=image_id, json=request.data) + abort(400, 'Invalid JSON for image: %(image_id)s\nJSON: %(json)s', issue='invalid-request', + image_id=image_id, json=request.data) if 'id' not in data: - abort(400, 'Missing key `id` in JSON for image: %(image_id)s', - issue='invalid-request', image_id=image_id) + abort(400, 'Missing key `id` in JSON for image: %(image_id)s', issue='invalid-request', + image_id=image_id) if image_id != data['id']: - abort(400, 'JSON data contains invalid id for image: %(image_id)s', - issue='invalid-request', image_id=image_id) + abort(400, 'JSON data contains invalid id for image: %(image_id)s', issue='invalid-request', + image_id=image_id) logger.debug('Looking up repo image') @@ -469,7 +466,8 @@ def put_image_json(namespace, repository, image_id): command = json.dumps(command_list) if command_list else None logger.debug('Setting image metadata') - model.update_docker_v1_metadata(namespace, repository, image_id, data.get('created'), + model.update_docker_v1_metadata(namespace, repository, image_id, + data.get('created'), data.get('comment'), command, uploaded_metadata, parent_id) return make_response('true', 200) diff --git a/endpoints/v1/tag.py b/endpoints/v1/tag.py index ce3726374..4a741ff6b 100644 --- a/endpoints/v1/tag.py +++ b/endpoints/v1/tag.py @@ -3,18 +3,15 @@ import json from flask import abort, request, jsonify, make_response, session - -from util.names import TAG_ERROR, TAG_REGEX from auth.decorators import process_auth -from auth.permissions import (ReadRepositoryPermission, - ModifyRepositoryPermission) +from auth.permissions import (ReadRepositoryPermission, ModifyRepositoryPermission) from data import model -from data.interfaces.v1 import pre_oci_model as model from endpoints.common import parse_repository_name from endpoints.decorators import anon_protect from endpoints.v1 import v1_bp -from endpoints.trackhelper import track_and_log - +from endpoints.v1.models_pre_oci import pre_oci_model as model +from util.audit import track_and_log +from util.names import TAG_ERROR, TAG_REGEX logger = logging.getLogger(__name__) diff --git a/endpoints/v2/__init__.py b/endpoints/v2/__init__.py index 483265818..db490de02 100644 --- a/endpoints/v2/__init__.py +++ b/endpoints/v2/__init__.py @@ -12,21 +12,19 @@ import features from app import app, metric_queue, get_app_url, license_validator from auth.auth_context import get_grant_context -from auth.permissions import (ReadRepositoryPermission, ModifyRepositoryPermission, - AdministerRepositoryPermission) +from auth.permissions import ( + ReadRepositoryPermission, ModifyRepositoryPermission, AdministerRepositoryPermission) from auth.registry_jwt_auth import process_registry_jwt_auth, get_auth_headers -from data.interfaces.v2 import pre_oci_model as model from endpoints.decorators import anon_protect, anon_allowed from endpoints.v2.errors import V2RegistryException, Unauthorized, Unsupported, NameUnknown +from endpoints.v2.models_pre_oci import data_model as model from util.http import abort from util.metrics.metricqueue import time_blueprint from util.registry.dockerver import docker_version from util.pagination import encrypt_page_token, decrypt_page_token - logger = logging.getLogger(__name__) - v2_bp = Blueprint('v2', __name__) license_validator.enforce_license_before_request(v2_bp) time_blueprint(v2_bp, metric_queue) @@ -34,9 +32,7 @@ time_blueprint(v2_bp, metric_queue) @v2_bp.app_errorhandler(V2RegistryException) def handle_registry_v2_exception(error): - response = jsonify({ - 'errors': [error.as_dict()] - }) + response = jsonify({'errors': [error.as_dict()]}) response.status_code = error.http_status_code if response.status_code == 401: @@ -53,6 +49,7 @@ def paginate(limit_kwarg_name='limit', offset_kwarg_name='offset', """ Decorates a handler adding a parsed pagination token and a callback to encode a response token. """ + def wrapper(func): @wraps(func) def wrapped(*args, **kwargs): @@ -62,7 +59,7 @@ def paginate(limit_kwarg_name='limit', offset_kwarg_name='offset', requested_limit = 0 limit = max(min(requested_limit, _MAX_RESULTS_PER_PAGE), 1) - next_page_token = request.args.get('next_page', None) + next_page_token = request.args.get('next_page', request.args.get('last', None)) # Decrypt the next page token, if any. offset = 0 @@ -86,7 +83,9 @@ def paginate(limit_kwarg_name='limit', offset_kwarg_name='offset', kwargs[offset_kwarg_name] = offset kwargs[callback_kwarg_name] = callback return func(*args, **kwargs) + return wrapped + return wrapper @@ -94,17 +93,15 @@ def _require_repo_permission(permission_class, scopes=None, allow_public=False): def wrapper(func): @wraps(func) def wrapped(namespace_name, repo_name, *args, **kwargs): - logger.debug('Checking permission %s for repo: %s/%s', permission_class, - namespace_name, repo_name) + logger.debug('Checking permission %s for repo: %s/%s', permission_class, namespace_name, + repo_name) repository = namespace_name + '/' + repo_name repo = model.get_repository(namespace_name, repo_name) if repo is None: raise Unauthorized(repository=repository, scopes=scopes) permission = permission_class(namespace_name, repo_name) - if (permission.can() or - (allow_public and - repo.is_public)): + if (permission.can() or (allow_public and repo.is_public)): if repo.kind != 'image': msg = 'This repository is for managing %s resources and not container images.' % repo.kind raise Unsupported(detail=msg) @@ -112,16 +109,15 @@ def _require_repo_permission(permission_class, scopes=None, allow_public=False): raise Unauthorized(repository=repository, scopes=scopes) return wrapped + return wrapper -require_repo_read = _require_repo_permission(ReadRepositoryPermission, - scopes=['pull'], +require_repo_read = _require_repo_permission(ReadRepositoryPermission, scopes=['pull'], allow_public=True) -require_repo_write = _require_repo_permission(ModifyRepositoryPermission, - scopes=['pull', 'push']) -require_repo_admin = _require_repo_permission(AdministerRepositoryPermission, - scopes=['pull', 'push']) +require_repo_write = _require_repo_permission(ModifyRepositoryPermission, scopes=['pull', 'push']) +require_repo_admin = _require_repo_permission(AdministerRepositoryPermission, scopes=[ + 'pull', 'push']) def get_input_stream(flask_request): @@ -138,7 +134,9 @@ def route_show_if(value): abort(404) return f(*args, **kwargs) + return decorated_function + return decorator @@ -169,5 +167,4 @@ from endpoints.v2 import ( catalog, manifest, tag, - v2auth, -) + v2auth,) diff --git a/endpoints/v2/blob.py b/endpoints/v2/blob.py index cd77ee2ee..f736eaccc 100644 --- a/endpoints/v2/blob.py +++ b/endpoints/v2/blob.py @@ -10,22 +10,20 @@ import resumablehashlib from app import storage, app, get_app_url, metric_queue from auth.registry_jwt_auth import process_registry_jwt_auth from data import database -from data.interfaces.v2 import pre_oci_model as model from digest import digest_tools from endpoints.common import parse_repository_name -from endpoints.v2 import v2_bp, require_repo_read, require_repo_write, get_input_stream -from endpoints.v2.errors import (BlobUnknown, BlobUploadInvalid, BlobUploadUnknown, Unsupported, - NameUnknown, LayerTooLarge) from endpoints.decorators import anon_protect +from endpoints.v2 import v2_bp, require_repo_read, require_repo_write, get_input_stream +from endpoints.v2.errors import ( + BlobUnknown, BlobUploadInvalid, BlobUploadUnknown, Unsupported, NameUnknown, LayerTooLarge) +from endpoints.v2.models_pre_oci import data_model as model from util.cache import cache_control from util.registry.filelike import wrap_with_handler, StreamSlice from util.registry.gzipstream import calculate_size_handler from util.registry.torrent import PieceHasher - logger = logging.getLogger(__name__) - BASE_BLOB_ROUTE = '//blobs/' BLOB_DIGEST_ROUTE = BASE_BLOB_ROUTE.format(digest_tools.DIGEST_PATTERN) RANGE_HEADER_REGEX = re.compile(r'^bytes=([0-9]+)-([0-9]+)$') @@ -52,8 +50,7 @@ def check_blob_exists(namespace_name, repo_name, digest): headers = { 'Docker-Content-Digest': digest, 'Content-Length': blob.size, - 'Content-Type': BLOB_CONTENT_TYPE, - } + 'Content-Type': BLOB_CONTENT_TYPE,} # If our storage supports range requests, let the client know. if storage.get_supports_resumable_downloads(blob.locations): @@ -102,10 +99,7 @@ def download_blob(namespace_name, repo_name, digest): storage.stream_read(blob.locations, path), headers=headers.update({ 'Content-Length': blob.size, - 'Content-Type': BLOB_CONTENT_TYPE, - }), - ) - + 'Content-Type': BLOB_CONTENT_TYPE,}),) @v2_bp.route('//blobs/uploads/', methods=['POST']) @@ -128,13 +122,13 @@ def start_blob_upload(namespace_name, repo_name): return Response( status=202, headers={ - 'Docker-Upload-UUID': new_upload_uuid, - 'Range': _render_range(0), - 'Location': get_app_url() + url_for('v2.upload_chunk', - repository='%s/%s' % (namespace_name, repo_name), - upload_uuid=new_upload_uuid) - }, - ) + 'Docker-Upload-UUID': + new_upload_uuid, + 'Range': + _render_range(0), + 'Location': + get_app_url() + url_for('v2.upload_chunk', repository='%s/%s' % + (namespace_name, repo_name), upload_uuid=new_upload_uuid)},) # The user plans to send us the entire body right now. # Find the upload. @@ -158,12 +152,11 @@ def start_blob_upload(namespace_name, repo_name): return Response( status=201, headers={ - 'Docker-Content-Digest': digest, - 'Location': get_app_url() + url_for('v2.download_blob', - repository='%s/%s' % (namespace_name, repo_name), - digest=digest), - }, - ) + 'Docker-Content-Digest': + digest, + 'Location': + get_app_url() + url_for('v2.download_blob', repository='%s/%s' % + (namespace_name, repo_name), digest=digest),},) @v2_bp.route('//blobs/uploads/', methods=['GET']) @@ -180,9 +173,8 @@ def fetch_existing_upload(namespace_name, repo_name, upload_uuid): status=204, headers={ 'Docker-Upload-UUID': upload_uuid, - 'Range': _render_range(blob_upload.byte_count+1), # byte ranges are exclusive - }, - ) + 'Range': _render_range(blob_upload.byte_count + 1), # byte ranges are exclusive + },) @v2_bp.route('//blobs/uploads/', methods=['PATCH']) @@ -211,9 +203,7 @@ def upload_chunk(namespace_name, repo_name, upload_uuid): headers={ 'Location': _current_request_url(), 'Range': _render_range(updated_blob_upload.byte_count, with_bytes_prefix=False), - 'Docker-Upload-UUID': upload_uuid, - }, - ) + 'Docker-Upload-UUID': upload_uuid,},) @v2_bp.route('//blobs/uploads/', methods=['PUT']) @@ -242,15 +232,12 @@ def monolithic_upload_or_last_chunk(namespace_name, repo_name, upload_uuid): _finish_upload(namespace_name, repo_name, updated_blob_upload, digest) # Write the response to the client. - return Response( - status=201, - headers={ - 'Docker-Content-Digest': digest, - 'Location': get_app_url() + url_for('v2.download_blob', - repository='%s/%s' % (namespace_name, repo_name), - digest=digest), - } - ) + return Response(status=201, headers={ + 'Docker-Content-Digest': + digest, + 'Location': + get_app_url() + url_for('v2.download_blob', repository='%s/%s' % + (namespace_name, repo_name), digest=digest),}) @v2_bp.route('//blobs/uploads/', methods=['DELETE']) @@ -300,9 +287,11 @@ def _abort_range_not_satisfiable(valid_end, upload_uuid): TODO(jzelinskie): Unify this with the V2RegistryException class. """ - flask_abort(Response(status=416, headers={'Location': _current_request_url(), - 'Range': '0-{0}'.format(valid_end), - 'Docker-Upload-UUID': upload_uuid})) + flask_abort( + Response(status=416, headers={ + 'Location': _current_request_url(), + 'Range': '0-{0}'.format(valid_end), + 'Docker-Upload-UUID': upload_uuid})) def _parse_range_header(range_header_text): @@ -415,16 +404,15 @@ def _upload_chunk(blob_upload, range_header): length, input_fp, blob_upload.storage_metadata, - content_type=BLOB_CONTENT_TYPE, - ) + content_type=BLOB_CONTENT_TYPE,) if upload_error is not None: logger.error('storage.stream_upload_chunk returned error %s', upload_error) return None # Update the chunk upload time metric. - metric_queue.chunk_upload_time.Observe(time.time() - start_time, - labelvalues=[length_written, list(location_set)[0]]) + metric_queue.chunk_upload_time.Observe(time.time() - start_time, labelvalues=[ + length_written, list(location_set)[0]]) # If we determined an uncompressed size and this is the first chunk, add it to the blob. # Otherwise, we clear the size from the blob as it was uploaded in multiple chunks. @@ -499,8 +487,7 @@ def _finalize_blob_database(namespace_name, repo_name, blob_upload, digest, alre repo_name, digest, blob_upload, - app.config['PUSH_TEMP_TAG_EXPIRATION_SEC'], - ) + app.config['PUSH_TEMP_TAG_EXPIRATION_SEC'],) # If it doesn't already exist, create the BitTorrent pieces for the blob. if blob_upload.piece_sha_state is not None and not already_existed: @@ -521,5 +508,4 @@ def _finish_upload(namespace_name, repo_name, blob_upload, digest): repo_name, blob_upload, digest, - _finalize_blob_storage(blob_upload, digest), - ) + _finalize_blob_storage(blob_upload, digest),) diff --git a/endpoints/v2/catalog.py b/endpoints/v2/catalog.py index 8ae243460..096dd1d15 100644 --- a/endpoints/v2/catalog.py +++ b/endpoints/v2/catalog.py @@ -1,9 +1,12 @@ +import features + from flask import jsonify from auth.registry_jwt_auth import process_registry_jwt_auth, get_granted_entity from endpoints.decorators import anon_protect from endpoints.v2 import v2_bp, paginate -from data.interfaces.v2 import pre_oci_model as model +from endpoints.v2.models_pre_oci import data_model as model + @v2_bp.route('/_catalog', methods=['GET']) @process_registry_jwt_auth() @@ -15,11 +18,12 @@ def catalog_search(limit, offset, pagination_callback): if entity: username = entity.user.username - visible_repositories = model.get_visible_repositories(username, limit+1, offset) + include_public = bool(features.PUBLIC_CATALOG) + visible_repositories = model.get_visible_repositories(username, limit + 1, offset, + include_public=include_public) response = jsonify({ 'repositories': ['%s/%s' % (repo.namespace_name, repo.name) - for repo in visible_repositories][0:limit], - }) + for repo in visible_repositories][0:limit],}) pagination_callback(len(visible_repositories), response) return response diff --git a/endpoints/v2/errors.py b/endpoints/v2/errors.py index 127424ca1..0ae998106 100644 --- a/endpoints/v2/errors.py +++ b/endpoints/v2/errors.py @@ -1,8 +1,9 @@ import bitmath + class V2RegistryException(Exception): - def __init__(self, error_code_str, message, detail, http_status_code=400, - repository=None, scopes=None): + def __init__(self, error_code_str, message, detail, http_status_code=400, repository=None, + scopes=None): super(V2RegistryException, self).__init__(message) self.http_status_code = http_status_code self.repository = repository @@ -15,104 +16,81 @@ class V2RegistryException(Exception): return { 'code': self._error_code_str, 'message': self.message, - 'detail': self._detail if self._detail is not None else {}, - } + 'detail': self._detail if self._detail is not None else {},} class BlobUnknown(V2RegistryException): def __init__(self, detail=None): - super(BlobUnknown, self).__init__('BLOB_UNKNOWN', - 'blob unknown to registry', - detail, - 404) + super(BlobUnknown, self).__init__('BLOB_UNKNOWN', 'blob unknown to registry', detail, 404) class BlobUploadInvalid(V2RegistryException): def __init__(self, detail=None): - super(BlobUploadInvalid, self).__init__('BLOB_UPLOAD_INVALID', - 'blob upload invalid', - detail) + super(BlobUploadInvalid, self).__init__('BLOB_UPLOAD_INVALID', 'blob upload invalid', detail) class BlobUploadUnknown(V2RegistryException): def __init__(self, detail=None): super(BlobUploadUnknown, self).__init__('BLOB_UPLOAD_UNKNOWN', - 'blob upload unknown to registry', - detail, - 404) + 'blob upload unknown to registry', detail, 404) class DigestInvalid(V2RegistryException): def __init__(self, detail=None): super(DigestInvalid, self).__init__('DIGEST_INVALID', - 'provided digest did not match uploaded content', - detail) + 'provided digest did not match uploaded content', detail) class ManifestBlobUnknown(V2RegistryException): def __init__(self, detail=None): super(ManifestBlobUnknown, self).__init__('MANIFEST_BLOB_UNKNOWN', - 'manifest blob unknown to registry', - detail) + 'manifest blob unknown to registry', detail) class ManifestInvalid(V2RegistryException): def __init__(self, detail=None, http_status_code=400): - super(ManifestInvalid, self).__init__('MANIFEST_INVALID', - 'manifest invalid', - detail, + super(ManifestInvalid, self).__init__('MANIFEST_INVALID', 'manifest invalid', detail, http_status_code) class ManifestUnknown(V2RegistryException): def __init__(self, detail=None): - super(ManifestUnknown, self).__init__('MANIFEST_UNKNOWN', - 'manifest unknown', - detail, - 404) + super(ManifestUnknown, self).__init__('MANIFEST_UNKNOWN', 'manifest unknown', detail, 404) class ManifestUnverified(V2RegistryException): def __init__(self, detail=None): super(ManifestUnverified, self).__init__('MANIFEST_UNVERIFIED', - 'manifest failed signature verification', - detail) + 'manifest failed signature verification', detail) class NameInvalid(V2RegistryException): def __init__(self, detail=None, message=None): - super(NameInvalid, self).__init__('NAME_INVALID', - message or 'invalid repository name', - detail) + super(NameInvalid, self).__init__('NAME_INVALID', message or 'invalid repository name', detail) class NameUnknown(V2RegistryException): def __init__(self, detail=None): - super(NameUnknown, self).__init__('NAME_UNKNOWN', - 'repository name not known to registry', - detail, - 404) + super(NameUnknown, self).__init__('NAME_UNKNOWN', 'repository name not known to registry', + detail, 404) class SizeInvalid(V2RegistryException): def __init__(self, detail=None): super(SizeInvalid, self).__init__('SIZE_INVALID', - 'provided length did not match content length', - detail) + 'provided length did not match content length', detail) class TagAlreadyExists(V2RegistryException): def __init__(self, detail=None): - super(TagAlreadyExists, self).__init__('TAG_ALREADY_EXISTS', - 'tag was already pushed', - detail, + super(TagAlreadyExists, self).__init__('TAG_ALREADY_EXISTS', 'tag was already pushed', detail, 409) + class TagInvalid(V2RegistryException): def __init__(self, detail=None): - super(TagInvalid, self).__init__('TAG_INVALID', - 'manifest tag did not match URI', - detail) + super(TagInvalid, self).__init__('TAG_INVALID', 'manifest tag did not match URI', detail) + class LayerTooLarge(V2RegistryException): def __init__(self, uploaded=None, max_allowed=None): @@ -123,43 +101,33 @@ class LayerTooLarge(V2RegistryException): detail = { 'reason': '%s is greater than maximum allowed size %s' % (uploaded, max_allowed), 'max_allowed': max_allowed, - 'uploaded': uploaded, - } + 'uploaded': uploaded,} up_str = bitmath.Byte(uploaded).best_prefix().format("{value:.2f} {unit}") max_str = bitmath.Byte(max_allowed).best_prefix().format("{value:.2f} {unit}") - message = 'Uploaded blob of %s is larger than %s allowed by this registry' % (up_str, max_str) + message = 'Uploaded blob of %s is larger than %s allowed by this registry' % (up_str, + max_str) + class Unauthorized(V2RegistryException): def __init__(self, detail=None, repository=None, scopes=None): - super(Unauthorized, self).__init__('UNAUTHORIZED', - 'access to the requested resource is not authorized', - detail, - 401, - repository=repository, - scopes=scopes) + super(Unauthorized, + self).__init__('UNAUTHORIZED', 'access to the requested resource is not authorized', + detail, 401, repository=repository, scopes=scopes) class Unsupported(V2RegistryException): def __init__(self, detail=None, message=None): - super(Unsupported, self).__init__('UNSUPPORTED', - message or 'The operation is unsupported.', - detail, - 405) + super(Unsupported, self).__init__('UNSUPPORTED', message or 'The operation is unsupported.', + detail, 405) class InvalidLogin(V2RegistryException): def __init__(self, message=None): - super(InvalidLogin, self).__init__('UNAUTHORIZED', - message or 'Specified credentials are invalid', - {}, - 401) - + super(InvalidLogin, self).__init__('UNAUTHORIZED', message or + 'Specified credentials are invalid', {}, 401) class InvalidRequest(V2RegistryException): def __init__(self, message=None): - super(InvalidRequest, self).__init__('INVALID_REQUEST', - message or 'Invalid request', - {}, - 400) \ No newline at end of file + super(InvalidRequest, self).__init__('INVALID_REQUEST', message or 'Invalid request', {}, 400) diff --git a/endpoints/v2/manifest.py b/endpoints/v2/manifest.py index a4155add2..5d480472b 100644 --- a/endpoints/v2/manifest.py +++ b/endpoints/v2/manifest.py @@ -8,30 +8,30 @@ import features from app import docker_v2_signing_key, app, metric_queue from auth.registry_jwt_auth import process_registry_jwt_auth -from data.interfaces.v2 import pre_oci_model as model, Label from digest import digest_tools from endpoints.common import parse_repository_name from endpoints.decorators import anon_protect -from endpoints.v2 import v2_bp, require_repo_read, require_repo_write -from endpoints.v2.errors import (BlobUnknown, ManifestInvalid, ManifestUnknown, TagInvalid, - NameInvalid) -from endpoints.trackhelper import track_and_log from endpoints.notificationhelper import spawn_notification +from endpoints.v2 import v2_bp, require_repo_read, require_repo_write +from endpoints.v2.errors import ( + BlobUnknown, ManifestInvalid, ManifestUnknown, TagInvalid, NameInvalid) +from endpoints.v2.models_interface import Label +from endpoints.v2.models_pre_oci import data_model as model from image.docker import ManifestException from image.docker.schema1 import DockerSchema1Manifest, DockerSchema1ManifestBuilder from image.docker.schema2 import DOCKER_SCHEMA2_CONTENT_TYPES +from util.audit import track_and_log from util.names import VALID_TAG_PATTERN from util.registry.replication import queue_replication_batch from util.validation import is_json - logger = logging.getLogger(__name__) - BASE_MANIFEST_ROUTE = '//manifests/' MANIFEST_DIGEST_ROUTE = BASE_MANIFEST_ROUTE.format(digest_tools.DIGEST_PATTERN) MANIFEST_TAGNAME_ROUTE = BASE_MANIFEST_ROUTE.format(VALID_TAG_PATTERN) + @v2_bp.route(MANIFEST_TAGNAME_ROUTE, methods=['GET']) @parse_repository_name() @process_registry_jwt_auth(scopes=['pull']) @@ -51,14 +51,14 @@ def fetch_manifest_by_tagname(namespace_name, repo_name, manifest_ref): repo = model.get_repository(namespace_name, repo_name) if repo is not None: track_and_log('pull_repo', repo, analytics_name='pull_repo_100x', analytics_sample=0.01, - tag=manifest_ref) + tag=manifest_ref) metric_queue.repository_pull.Inc(labelvalues=[namespace_name, repo_name, 'v2', True]) return Response( manifest.json, status=200, - headers={'Content-Type': manifest.media_type, 'Docker-Content-Digest': manifest.digest}, - ) + headers={'Content-Type': manifest.media_type, + 'Docker-Content-Digest': manifest.digest},) @v2_bp.route(MANIFEST_DIGEST_ROUTE, methods=['GET']) @@ -77,8 +77,9 @@ def fetch_manifest_by_digest(namespace_name, repo_name, manifest_ref): track_and_log('pull_repo', repo, manifest_digest=manifest_ref) metric_queue.repository_pull.Inc(labelvalues=[namespace_name, repo_name, 'v2', True]) - return Response(manifest.json, status=200, headers={'Content-Type': manifest.media_type, - 'Docker-Content-Digest': manifest.digest}) + return Response(manifest.json, status=200, headers={ + 'Content-Type': manifest.media_type, + 'Docker-Content-Digest': manifest.digest}) def _reject_manifest2_schema2(func): @@ -88,6 +89,7 @@ def _reject_manifest2_schema2(func): raise ManifestInvalid(detail={'message': 'manifest schema version not supported'}, http_status_code=415) return func(*args, **kwargs) + return wrapped @@ -130,8 +132,7 @@ def write_manifest_by_digest(namespace_name, repo_name, manifest_ref): def _write_manifest(namespace_name, repo_name, manifest): - if (manifest.namespace == '' and - features.LIBRARY_SUPPORT and + if (manifest.namespace == '' and features.LIBRARY_SUPPORT and namespace_name == app.config['LIBRARY_NAMESPACE']): pass elif manifest.namespace != namespace_name: @@ -173,8 +174,7 @@ def _write_manifest(namespace_name, repo_name, manifest): rewritten_image.comment, rewritten_image.command, rewritten_image.compat_json, - rewritten_image.parent_image_id, - ) + rewritten_image.parent_image_id,) except ManifestException as me: logger.exception("exception when rewriting v1 metadata") raise ManifestInvalid(detail={'message': 'failed synthesizing v1 metadata: %s' % me.message}) @@ -211,12 +211,11 @@ def _write_manifest_and_log(namespace_name, repo_name, manifest): 'OK', status=202, headers={ - 'Docker-Content-Digest': manifest.digest, - 'Location': url_for('v2.fetch_manifest_by_digest', - repository='%s/%s' % (namespace_name, repo_name), - manifest_ref=manifest.digest), - }, - ) + 'Docker-Content-Digest': + manifest.digest, + 'Location': + url_for('v2.fetch_manifest_by_digest', repository='%s/%s' % (namespace_name, repo_name), + manifest_ref=manifest.digest),},) @v2_bp.route(MANIFEST_DIGEST_ROUTE, methods=['DELETE']) @@ -270,5 +269,6 @@ def _generate_and_store_manifest(namespace_name, repo_name, tag_name): manifest.bytes) return manifest + def _determine_media_type(value): media_type_name = 'application/json' if is_json(value) else 'text/plain' diff --git a/endpoints/v2/models_interface.py b/endpoints/v2/models_interface.py new file mode 100644 index 000000000..bbfd51b2c --- /dev/null +++ b/endpoints/v2/models_interface.py @@ -0,0 +1,258 @@ +from abc import ABCMeta, abstractmethod +from collections import namedtuple + +from namedlist import namedlist +from six import add_metaclass + + +class Repository( + namedtuple('Repository', [ + 'id', 'name', 'namespace_name', 'description', 'is_public', 'kind', 'trust_enabled'])): + """ + Repository represents a namespaced collection of tags. + :type id: int + :type name: string + :type namespace_name: string + :type description: string + :type is_public: bool + :type kind: string + :type trust_enabled: bool + """ + + +class ManifestJSON(namedtuple('ManifestJSON', ['digest', 'json', 'media_type'])): + """ + ManifestJSON represents a Manifest of any format. + """ + + +class Tag(namedtuple('Tag', ['name', 'repository'])): + """ + Tag represents a user-facing alias for referencing a set of Manifests. + """ + + +class BlobUpload( + namedlist('BlobUpload', [ + 'uuid', 'byte_count', 'uncompressed_byte_count', 'chunk_count', 'sha_state', 'location_name', + 'storage_metadata', 'piece_sha_state', 'piece_hashes', 'repo_namespace_name', 'repo_name'])): + """ + BlobUpload represents the current state of an Blob being uploaded. + """ + + +class Blob(namedtuple('Blob', ['uuid', 'digest', 'size', 'locations'])): + """ + Blob represents an opaque binary blob saved to the storage system. + """ + + +class RepositoryReference(namedtuple('RepositoryReference', ['id', 'name', 'namespace_name'])): + """ + RepositoryReference represents a reference to a Repository, without its full metadata. + """ + + +class Label(namedtuple('Label', ['key', 'value', 'source_type', 'media_type'])): + """ + Label represents a key-value pair that describes a particular Manifest. + """ + + +@add_metaclass(ABCMeta) +class DockerRegistryV2DataInterface(object): + """ + Interface that represents all data store interactions required by a Docker Registry v1. + """ + + @abstractmethod + def create_repository(self, namespace_name, repo_name, creating_user=None): + """ + Creates a new repository under the specified namespace with the given name. The user supplied is + the user creating the repository, if any. + """ + pass + + @abstractmethod + def get_repository(self, namespace_name, repo_name): + """ + Returns a repository tuple for the repository with the given name under the given namespace. + Returns None if no such repository was found. + """ + pass + + @abstractmethod + def has_active_tag(self, namespace_name, repo_name, tag_name): + """ + Returns whether there is an active tag for the tag with the given name under the matching + repository, if any, or none if none. + """ + pass + + @abstractmethod + def get_manifest_by_tag(self, namespace_name, repo_name, tag_name): + """ + Returns the current manifest for the tag with the given name under the matching repository, if + any, or None if none. + """ + pass + + @abstractmethod + def get_manifest_by_digest(self, namespace_name, repo_name, digest): + """ + Returns the manifest matching the given digest under the matching repository, if any, or None if + none. + """ + pass + + @abstractmethod + def delete_manifest_by_digest(self, namespace_name, repo_name, digest): + """ + Deletes the manifest with the associated digest (if any) and returns all removed tags that + pointed to that manifest. If the manifest was not found, returns an empty list. + """ + pass + + @abstractmethod + def get_docker_v1_metadata_by_tag(self, namespace_name, repo_name, tag_name): + """ + Returns the Docker V1 metadata associated with the tag with the given name under the matching + repository, if any. If none, returns None. + """ + pass + + @abstractmethod + def get_docker_v1_metadata_by_image_id(self, namespace_name, repo_name, docker_image_ids): + """ + Returns a map of Docker V1 metadata for each given image ID, matched under the repository with + the given namespace and name. Returns an empty map if the matching repository was not found. + """ + pass + + @abstractmethod + def get_parents_docker_v1_metadata(self, namespace_name, repo_name, docker_image_id): + """ + Returns an ordered list containing the Docker V1 metadata for each parent of the image with the + given docker ID under the matching repository. Returns an empty list if the image was not found. + """ + pass + + @abstractmethod + def create_manifest_and_update_tag(self, namespace_name, repo_name, tag_name, manifest_digest, + manifest_bytes): + """ + Creates a new manifest with the given digest and byte data, and assigns the tag with the given + name under the matching repository to it. + """ + pass + + @abstractmethod + def synthesize_v1_image(self, repository, storage, image_id, created, comment, command, + compat_json, parent_image_id): + """ + Synthesizes a V1 image under the specified repository, pointing to the given storage and returns + the V1 metadata for the synthesized image. + """ + pass + + @abstractmethod + def save_manifest(self, namespace_name, repo_name, tag_name, leaf_layer_docker_id, + manifest_digest, manifest_bytes): + """ + Saves a manifest pointing to the given leaf image, with the given manifest, under the matching + repository as a tag with the given name. + + Returns a boolean whether or not the tag was newly created or not. + """ + pass + + @abstractmethod + def repository_tags(self, namespace_name, repo_name, limit, offset): + """ + Returns the active tags under the repository with the given name and namespace. + """ + pass + + @abstractmethod + def get_visible_repositories(self, username, limit, offset): + """ + Returns the repositories visible to the user with the given username, if any. + """ + pass + + @abstractmethod + def create_blob_upload(self, namespace_name, repo_name, upload_uuid, location_name, + storage_metadata): + """ + Creates a blob upload under the matching repository with the given UUID and metadata. + Returns whether the matching repository exists. + """ + pass + + @abstractmethod + def blob_upload_by_uuid(self, namespace_name, repo_name, upload_uuid): + """ + Searches for a blob upload with the given UUID under the given repository and returns it or None + if none. + """ + pass + + @abstractmethod + def update_blob_upload(self, blob_upload): + """ + Saves any changes to the blob upload object given to the backing data store. + Fields that can change: + - uncompressed_byte_count + - piece_hashes + - piece_sha_state + - storage_metadata + - byte_count + - chunk_count + - sha_state + """ + pass + + @abstractmethod + def delete_blob_upload(self, namespace_name, repo_name, uuid): + """ + Deletes the blob upload with the given uuid under the matching repository. If none, does + nothing. + """ + pass + + @abstractmethod + def create_blob_and_temp_tag(self, namespace_name, repo_name, blob_digest, blob_upload, + expiration_sec): + """ + Creates a blob and links a temporary tag with the specified expiration to it under the matching + repository. + """ + pass + + @abstractmethod + def get_blob_by_digest(self, namespace_name, repo_name, digest): + """ + Returns the blob with the given digest under the matching repository or None if none. + """ + pass + + @abstractmethod + def save_bittorrent_pieces(self, blob, piece_size, piece_bytes): + """ + Saves the BitTorrent piece hashes for the given blob. + """ + pass + + @abstractmethod + def create_manifest_labels(self, namespace_name, repo_name, manifest_digest, labels): + """ + Creates a new labels for the provided manifest. + """ + pass + + @abstractmethod + def get_blob_path(self, blob): + """ + Once everything is moved over, this could be in util.registry and not even touch the database. + """ + pass diff --git a/data/interfaces/v2.py b/endpoints/v2/models_pre_oci.py similarity index 51% rename from data/interfaces/v2.py rename to endpoints/v2/models_pre_oci.py index f949ddc4e..a241c7259 100644 --- a/data/interfaces/v2.py +++ b/endpoints/v2/models_pre_oci.py @@ -1,272 +1,26 @@ -from abc import ABCMeta, abstractmethod -from collections import namedtuple - -from namedlist import namedlist from peewee import IntegrityError -from six import add_metaclass from data import model, database from data.model import DataModelException +from endpoints.v2.models_interface import ( + Blob, + BlobUpload, + DockerRegistryV2DataInterface, + ManifestJSON, + Repository, + RepositoryReference, + Tag,) from image.docker.v1 import DockerV1Metadata _MEDIA_TYPE = "application/vnd.docker.distribution.manifest.v1+prettyjws" -class Repository(namedtuple('Repository', ['id', 'name', 'namespace_name', 'description', - 'is_public', 'kind', 'trust_enabled'])): - """ - Repository represents a namespaced collection of tags. - :type id: int - :type name: string - :type namespace_name: string - :type description: string - :type is_public: bool - :type kind: string - :type trust_enabled: bool - """ - -class ManifestJSON(namedtuple('ManifestJSON', ['digest', 'json', 'media_type'])): - """ - ManifestJSON represents a Manifest of any format. - """ - - -class Tag(namedtuple('Tag', ['name', 'repository'])): - """ - Tag represents a user-facing alias for referencing a set of Manifests. - """ - - -class BlobUpload(namedlist('BlobUpload', ['uuid', 'byte_count', 'uncompressed_byte_count', - 'chunk_count', 'sha_state', 'location_name', - 'storage_metadata', 'piece_sha_state', 'piece_hashes', - 'repo_namespace_name', 'repo_name'])): - """ - BlobUpload represents the current state of an Blob being uploaded. - """ - - -class Blob(namedtuple('Blob', ['uuid', 'digest', 'size', 'locations'])): - """ - Blob represents an opaque binary blob saved to the storage system. - """ - - -class RepositoryReference(namedtuple('RepositoryReference', ['id', 'name', 'namespace_name'])): - """ - RepositoryReference represents a reference to a Repository, without its full metadata. - """ - -class Label(namedtuple('Label', ['key', 'value', 'source_type', 'media_type'])): - """ - Label represents a key-value pair that describes a particular Manifest. - """ - - -@add_metaclass(ABCMeta) -class DockerRegistryV2DataInterface(object): - """ - Interface that represents all data store interactions required by a Docker Registry v1. - """ - - @abstractmethod - def create_repository(self, namespace_name, repo_name, creating_user=None): - """ - Creates a new repository under the specified namespace with the given name. The user supplied is - the user creating the repository, if any. - """ - pass - - @abstractmethod - def get_repository(self, namespace_name, repo_name): - """ - Returns a repository tuple for the repository with the given name under the given namespace. - Returns None if no such repository was found. - """ - pass - - @abstractmethod - def has_active_tag(self, namespace_name, repo_name, tag_name): - """ - Returns whether there is an active tag for the tag with the given name under the matching - repository, if any, or none if none. - """ - pass - - @abstractmethod - def get_manifest_by_tag(self, namespace_name, repo_name, tag_name): - """ - Returns the current manifest for the tag with the given name under the matching repository, if - any, or None if none. - """ - pass - - @abstractmethod - def get_manifest_by_digest(self, namespace_name, repo_name, digest): - """ - Returns the manifest matching the given digest under the matching repository, if any, or None if - none. - """ - pass - - @abstractmethod - def delete_manifest_by_digest(self, namespace_name, repo_name, digest): - """ - Deletes the manifest with the associated digest (if any) and returns all removed tags that - pointed to that manifest. If the manifest was not found, returns an empty list. - """ - pass - - @abstractmethod - def get_docker_v1_metadata_by_tag(self, namespace_name, repo_name, tag_name): - """ - Returns the Docker V1 metadata associated with the tag with the given name under the matching - repository, if any. If none, returns None. - """ - pass - - @abstractmethod - def get_docker_v1_metadata_by_image_id(self, namespace_name, repo_name, docker_image_ids): - """ - Returns a map of Docker V1 metadata for each given image ID, matched under the repository with - the given namespace and name. Returns an empty map if the matching repository was not found. - """ - pass - - @abstractmethod - def get_parents_docker_v1_metadata(self, namespace_name, repo_name, docker_image_id): - """ - Returns an ordered list containing the Docker V1 metadata for each parent of the image with the - given docker ID under the matching repository. Returns an empty list if the image was not found. - """ - pass - - @abstractmethod - def create_manifest_and_update_tag(self, namespace_name, repo_name, tag_name, manifest_digest, - manifest_bytes): - """ - Creates a new manifest with the given digest and byte data, and assigns the tag with the given - name under the matching repository to it. - """ - pass - - @abstractmethod - def synthesize_v1_image(self, repository, storage, image_id, created, comment, command, - compat_json, parent_image_id): - """ - Synthesizes a V1 image under the specified repository, pointing to the given storage and returns - the V1 metadata for the synthesized image. - """ - pass - - @abstractmethod - def save_manifest(self, namespace_name, repo_name, tag_name, leaf_layer_docker_id, - manifest_digest, manifest_bytes): - """ - Saves a manifest pointing to the given leaf image, with the given manifest, under the matching - repository as a tag with the given name. - - Returns a boolean whether or not the tag was newly created or not. - """ - pass - - @abstractmethod - def repository_tags(self, namespace_name, repo_name, limit, offset): - """ - Returns the active tags under the repository with the given name and namespace. - """ - pass - - @abstractmethod - def get_visible_repositories(self, username, limit, offset): - """ - Returns the repositories visible to the user with the given username, if any. - """ - pass - - @abstractmethod - def create_blob_upload(self, namespace_name, repo_name, upload_uuid, location_name, storage_metadata): - """ - Creates a blob upload under the matching repository with the given UUID and metadata. - Returns whether the matching repository exists. - """ - pass - - @abstractmethod - def blob_upload_by_uuid(self, namespace_name, repo_name, upload_uuid): - """ - Searches for a blob upload with the given UUID under the given repository and returns it or None - if none. - """ - pass - - @abstractmethod - def update_blob_upload(self, blob_upload): - """ - Saves any changes to the blob upload object given to the backing data store. - Fields that can change: - - uncompressed_byte_count - - piece_hashes - - piece_sha_state - - storage_metadata - - byte_count - - chunk_count - - sha_state - """ - pass - - @abstractmethod - def delete_blob_upload(self, namespace_name, repo_name, uuid): - """ - Deletes the blob upload with the given uuid under the matching repository. If none, does - nothing. - """ - pass - - @abstractmethod - def create_blob_and_temp_tag(self, namespace_name, repo_name, blob_digest, blob_upload, - expiration_sec): - """ - Creates a blob and links a temporary tag with the specified expiration to it under the matching - repository. - """ - pass - - @abstractmethod - def get_blob_by_digest(self, namespace_name, repo_name, digest): - """ - Returns the blob with the given digest under the matching repository or None if none. - """ - pass - - @abstractmethod - def save_bittorrent_pieces(self, blob, piece_size, piece_bytes): - """ - Saves the BitTorrent piece hashes for the given blob. - """ - pass - - @abstractmethod - def create_manifest_labels(self, namespace_name, repo_name, manifest_digest, labels): - """ - Creates a new labels for the provided manifest. - """ - pass - - - @abstractmethod - def get_blob_path(self, blob): - """ - Once everything is moved over, this could be in util.registry and not even touch the database. - """ - pass - - class PreOCIModel(DockerRegistryV2DataInterface): """ PreOCIModel implements the data model for the v2 Docker Registry protocol using a database schema before it was changed to support the OCI specification. """ + def create_repository(self, namespace_name, repo_name, creating_user=None): return model.repository.create_repository(namespace_name, repo_name, creating_user) @@ -299,14 +53,10 @@ class PreOCIModel(DockerRegistryV2DataInterface): def delete_manifest_by_digest(self, namespace_name, repo_name, digest): def _tag_view(tag): - return Tag( - name=tag.name, - repository=RepositoryReference( - id=tag.repository_id, - name=repo_name, - namespace_name=namespace_name, - ) - ) + return Tag(name=tag.name, repository=RepositoryReference( + id=tag.repository_id, + name=repo_name, + namespace_name=namespace_name,)) tags = model.tag.delete_manifest_by_digest(namespace_name, repo_name, digest) return [_tag_view(tag) for tag in tags] @@ -324,8 +74,9 @@ class PreOCIModel(DockerRegistryV2DataInterface): return {} images_query = model.image.lookup_repository_images(repo, docker_image_ids) - return {image.docker_image_id: _docker_v1_metadata(namespace_name, repo_name, image) - for image in images_query} + return { + image.docker_image_id: _docker_v1_metadata(namespace_name, repo_name, image) + for image in images_query} def get_parents_docker_v1_metadata(self, namespace_name, repo_name, docker_image_id): repo_image = model.image.get_repo_image(namespace_name, repo_name, docker_image_id) @@ -367,33 +118,32 @@ class PreOCIModel(DockerRegistryV2DataInterface): def save_manifest(self, namespace_name, repo_name, tag_name, leaf_layer_docker_id, manifest_digest, manifest_bytes): - (_, newly_created) = model.tag.store_tag_manifest(namespace_name, repo_name, tag_name, - leaf_layer_docker_id, manifest_digest, - manifest_bytes) + (_, newly_created) = model.tag.store_tag_manifest( + namespace_name, repo_name, tag_name, leaf_layer_docker_id, manifest_digest, manifest_bytes) return newly_created def repository_tags(self, namespace_name, repo_name, limit, offset): def _tag_view(tag): - return Tag( - name=tag.name, - repository=RepositoryReference( - id=tag.repository_id, - name=repo_name, - namespace_name=namespace_name, - ) - ) + return Tag(name=tag.name, repository=RepositoryReference( + id=tag.repository_id, + name=repo_name, + namespace_name=namespace_name,)) tags_query = model.tag.list_repository_tags(namespace_name, repo_name) tags_query = tags_query.limit(limit).offset(offset) return [_tag_view(tag) for tag in tags_query] - def get_visible_repositories(self, username, limit, offset): + def get_visible_repositories(self, username, limit, offset, include_public=None): + if include_public is None: + include_public = (username is None) + query = model.repository.get_visible_repositories(username, kind_filter='image', - include_public=(username is None)) + include_public=include_public) query = query.limit(limit).offset(offset) return [_repository_for_repo(repo) for repo in query] - def create_blob_upload(self, namespace_name, repo_name, upload_uuid, location_name, storage_metadata): + def create_blob_upload(self, namespace_name, repo_name, upload_uuid, location_name, + storage_metadata): try: model.blob.initiate_upload(namespace_name, repo_name, upload_uuid, location_name, storage_metadata) @@ -418,8 +168,7 @@ class PreOCIModel(DockerRegistryV2DataInterface): piece_sha_state=found.piece_sha_state, piece_hashes=found.piece_hashes, location_name=found.location.name, - storage_metadata=found.storage_metadata, - ) + storage_metadata=found.storage_metadata,) def update_blob_upload(self, blob_upload): # Lookup the blob upload object. @@ -448,17 +197,14 @@ class PreOCIModel(DockerRegistryV2DataInterface): def create_blob_and_temp_tag(self, namespace_name, repo_name, blob_digest, blob_upload, expiration_sec): location_obj = model.storage.get_image_location_for_name(blob_upload.location_name) - blob_record = model.blob.store_blob_record_and_temp_link(namespace_name, repo_name, - blob_digest, location_obj.id, - blob_upload.byte_count, - expiration_sec, - blob_upload.uncompressed_byte_count) + blob_record = model.blob.store_blob_record_and_temp_link( + namespace_name, repo_name, blob_digest, location_obj.id, blob_upload.byte_count, + expiration_sec, blob_upload.uncompressed_byte_count) return Blob( uuid=blob_record.uuid, digest=blob_digest, size=blob_upload.byte_count, - locations=[blob_upload.location_name], - ) + locations=[blob_upload.location_name],) def lookup_blobs_by_digest(self, namespace_name, repo_name, digests): def _blob_view(blob_record): @@ -466,7 +212,7 @@ class PreOCIModel(DockerRegistryV2DataInterface): uuid=blob_record.uuid, digest=blob_record.content_checksum, size=blob_record.image_size, - locations=None, # Note: Locations is None in this case. + locations=None, # Note: Locations is None in this case. ) repo = model.repository.get_repository(namespace_name, repo_name) @@ -482,8 +228,7 @@ class PreOCIModel(DockerRegistryV2DataInterface): uuid=blob_record.uuid, digest=digest, size=blob_record.image_size, - locations=blob_record.locations, - ) + locations=blob_record.locations,) except model.BlobDoesNotExist: return None @@ -524,8 +269,7 @@ def _docker_v1_metadata(namespace_name, repo_name, repo_image): comment=repo_image.comment, command=repo_image.command, # TODO: make sure this isn't needed anywhere, as it is expensive to lookup - parent_image_id=None, - ) + parent_image_id=None,) def _repository_for_repo(repo): @@ -537,8 +281,7 @@ def _repository_for_repo(repo): description=repo.description, is_public=model.repository.is_repository_public(repo), kind=model.repository.get_repo_kind_name(repo), - trust_enabled=repo.trust_enabled, - ) + trust_enabled=repo.trust_enabled,) -pre_oci_model = PreOCIModel() +data_model = PreOCIModel() diff --git a/endpoints/v2/tag.py b/endpoints/v2/tag.py index 683480ac2..776663520 100644 --- a/endpoints/v2/tag.py +++ b/endpoints/v2/tag.py @@ -2,9 +2,10 @@ from flask import jsonify from auth.registry_jwt_auth import process_registry_jwt_auth from endpoints.common import parse_repository_name -from endpoints.v2 import v2_bp, require_repo_read, paginate from endpoints.decorators import anon_protect -from data.interfaces.v2 import pre_oci_model as model +from endpoints.v2 import v2_bp, require_repo_read, paginate +from endpoints.v2.models_pre_oci import data_model as model + @v2_bp.route('//tags/list', methods=['GET']) @parse_repository_name() @@ -16,8 +17,7 @@ def list_all_tags(namespace_name, repo_name, limit, offset, pagination_callback) tags = model.repository_tags(namespace_name, repo_name, limit, offset) response = jsonify({ 'name': '{0}/{1}'.format(namespace_name, repo_name), - 'tags': [tag.name for tag in tags], - }) + 'tags': [tag.name for tag in tags],}) pagination_callback(len(tags), response) return response diff --git a/endpoints/v2/v2auth.py b/endpoints/v2/v2auth.py index 0d9e8ffb0..7eb08e1bb 100644 --- a/endpoints/v2/v2auth.py +++ b/endpoints/v2/v2auth.py @@ -13,18 +13,18 @@ from auth.permissions import (ModifyRepositoryPermission, ReadRepositoryPermissi from endpoints.decorators import anon_protect from endpoints.v2 import v2_bp from endpoints.v2.errors import InvalidLogin, NameInvalid, InvalidRequest, Unsupported, Unauthorized -from data.interfaces.v2 import pre_oci_model as model +from endpoints.v2.models_pre_oci import data_model as model from util.cache import no_cache from util.names import parse_namespace_repository, REPOSITORY_NAME_REGEX -from util.security.registry_jwt import (generate_bearer_token, build_context_and_subject, QUAY_TUF_ROOT, - SIGNER_TUF_ROOT, DISABLED_TUF_ROOT) +from util.security.registry_jwt import (generate_bearer_token, build_context_and_subject, + QUAY_TUF_ROOT, SIGNER_TUF_ROOT, DISABLED_TUF_ROOT) logger = logging.getLogger(__name__) - TOKEN_VALIDITY_LIFETIME_S = 60 * 60 # 1 hour SCOPE_REGEX_TEMPLATE = r'^repository:((?:{}\/)?((?:[\.a-zA-Z0-9_\-]+\/)*[\.a-zA-Z0-9_\-]+)):((?:push|pull|\*)(?:,(?:push|pull|\*))*)$' + @lru_cache(maxsize=1) def get_scope_regex(): hostname = re.escape(app.config['SERVER_HOSTNAME']) @@ -64,8 +64,7 @@ def generate_registry_jwt(auth_result): access = [] user_event_data = { - 'action': 'login', - } + 'action': 'login',} tuf_root = DISABLED_TUF_ROOT if len(scope_param) > 0: @@ -101,8 +100,8 @@ def generate_registry_jwt(auth_result): repo_is_public = repo is not None and repo.is_public invalid_repo_message = '' if repo is not None and repo.kind != 'image': - invalid_repo_message = (('This repository is for managing %s resources ' + - 'and not container images.') % repo.kind) + invalid_repo_message = (( + 'This repository is for managing %s resources ' + 'and not container images.') % repo.kind) if 'push' in actions: # If there is no valid user or token, then the repository cannot be @@ -150,8 +149,7 @@ def generate_registry_jwt(auth_result): access.append({ 'type': 'repository', 'name': registry_and_repo, - 'actions': final_actions, - }) + 'actions': final_actions,}) # Set the user event data for the auth. if 'push' in final_actions: @@ -164,8 +162,7 @@ def generate_registry_jwt(auth_result): user_event_data = { 'action': user_action, 'repository': reponame, - 'namespace': namespace, - } + 'namespace': namespace,} tuf_root = get_tuf_root(repo, namespace, reponame) elif user is None and token is None: @@ -179,7 +176,8 @@ def generate_registry_jwt(auth_result): event.publish_event_data('docker-cli', user_event_data) # Build the signed JWT. - context, subject = build_context_and_subject(user=user, token=token, oauthtoken=oauthtoken, tuf_root=tuf_root) + context, subject = build_context_and_subject(user=user, token=token, oauthtoken=oauthtoken, + tuf_root=tuf_root) token = generate_bearer_token(audience_param, subject, context, access, TOKEN_VALIDITY_LIFETIME_S, instance_keys) return jsonify({'token': token}) @@ -188,7 +186,7 @@ def generate_registry_jwt(auth_result): def get_tuf_root(repo, namespace, reponame): if not features.SIGNING or repo is None or not repo.trust_enabled: return DISABLED_TUF_ROOT - + # Users with write access to a repo will see signer-rooted TUF metadata if ModifyRepositoryPermission(namespace, reponame).can(): return SIGNER_TUF_ROOT diff --git a/endpoints/verbs/__init__.py b/endpoints/verbs/__init__.py index 35a919cbb..93f863989 100644 --- a/endpoints/verbs/__init__.py +++ b/endpoints/verbs/__init__.py @@ -10,28 +10,26 @@ from auth.auth_context import get_authenticated_user from auth.decorators import process_auth from auth.permissions import ReadRepositoryPermission from data import database -from data.interfaces.verbs import pre_oci_model as model from endpoints.common import route_show_if, parse_repository_name from endpoints.decorators import anon_protect -from endpoints.trackhelper import track_and_log +from endpoints.verbs.models_pre_oci import pre_oci_model as model from endpoints.v2.blob import BLOB_DIGEST_ROUTE from image.appc import AppCImageFormatter from image.docker.squashed import SquashedDockerImageFormatter from storage import Storage +from util.audit import track_and_log from util.http import exact_abort from util.registry.filelike import wrap_with_handler from util.registry.queuefile import QueueFile from util.registry.queueprocess import QueueProcess -from util.registry.torrent import (make_torrent, per_user_torrent_filename, public_torrent_filename, - PieceHasher) - +from util.registry.torrent import ( + make_torrent, per_user_torrent_filename, public_torrent_filename, PieceHasher) logger = logging.getLogger(__name__) verbs = Blueprint('verbs', __name__) license_validator.enforce_license_before_request(verbs) - LAYER_MIMETYPE = 'binary/octet-stream' @@ -60,7 +58,8 @@ def _open_stream(formatter, repo_image, tag, derived_image_id, handlers): logger.debug('Returning image layer %s: %s', current_image.image_id, current_image_path) yield current_image_stream - stream = formatter.build_stream(repo_image, tag, derived_image_id, get_next_image, get_next_layer) + stream = formatter.build_stream(repo_image, tag, derived_image_id, get_next_image, + get_next_layer) for handler_fn in handlers: stream = wrap_with_handler(stream, handler_fn) @@ -89,6 +88,7 @@ def _write_derived_image_to_storage(verb, derived_image, queue_file): """ Read from the generated stream and write it back to the storage engine. This method runs in a separate process. """ + def handle_exception(ex): logger.debug('Exception when building %s derived image %s: %s', verb, derived_image.ref, ex) @@ -139,8 +139,9 @@ def _torrent_for_blob(blob, is_public): torrent_file = make_torrent(name, webseed, blob.size, torrent_info.piece_length, torrent_info.pieces) - headers = {'Content-Type': 'application/x-bittorrent', - 'Content-Disposition': 'attachment; filename={0}.torrent'.format(name)} + headers = { + 'Content-Type': 'application/x-bittorrent', + 'Content-Disposition': 'attachment; filename={0}.torrent'.format(name)} return make_response(torrent_file, 200, headers) @@ -158,8 +159,7 @@ def _torrent_repo_verb(repo_image, tag, verb, **kwargs): abort(406) # Return the torrent. - repo = model.get_repository(repo_image.repository.namespace_name, - repo_image.repository.name) + repo = model.get_repository(repo_image.repository.namespace_name, repo_image.repository.name) repo_is_public = repo is not None and repo.is_public torrent = _torrent_for_blob(derived_image.blob, repo_is_public) @@ -229,15 +229,14 @@ def _repo_verb(namespace, repository, tag, verb, formatter, sign=False, checker= metric_queue.repository_pull.Inc(labelvalues=[namespace, repository, verb, True]) # Lookup/create the derived image for the verb and repo image. - derived_image = model.lookup_or_create_derived_image(repo_image, verb, - storage.preferred_locations[0], - varying_metadata={'tag': tag}) + derived_image = model.lookup_or_create_derived_image( + repo_image, verb, storage.preferred_locations[0], varying_metadata={'tag': tag}) if not derived_image.blob.uploading: logger.debug('Derived %s image %s exists in storage', verb, derived_image.ref) derived_layer_path = model.get_blob_path(derived_image.blob) is_head_request = request.method == 'HEAD' - download_url = storage.get_direct_download_url(derived_image.blob.locations, derived_layer_path, - head=is_head_request) + download_url = storage.get_direct_download_url(derived_image.blob.locations, + derived_layer_path, head=is_head_request) if download_url: logger.debug('Redirecting to download URL for derived %s image %s', verb, derived_image.ref) return redirect(download_url) @@ -246,8 +245,9 @@ def _repo_verb(namespace, repository, tag, verb, formatter, sign=False, checker= database.close_db_filter(None) logger.debug('Sending cached derived %s image %s', verb, derived_image.ref) - return send_file(storage.stream_read_file(derived_image.blob.locations, derived_layer_path), - mimetype=LAYER_MIMETYPE) + return send_file( + storage.stream_read_file(derived_image.blob.locations, derived_layer_path), + mimetype=LAYER_MIMETYPE) logger.debug('Building and returning derived %s image %s', verb, derived_image.ref) @@ -270,9 +270,12 @@ def _repo_verb(namespace, repository, tag, verb, formatter, sign=False, checker= # and send the results to the client and storage. handlers = [hasher.update] args = (formatter, repo_image, tag, derived_image_id, handlers) - queue_process = QueueProcess(_open_stream, - 8 * 1024, 10 * 1024 * 1024, # 8K/10M chunk/max - args, finished=_store_metadata_and_cleanup) + queue_process = QueueProcess( + _open_stream, + 8 * 1024, + 10 * 1024 * 1024, # 8K/10M chunk/max + args, + finished=_store_metadata_and_cleanup) client_queue_file = QueueFile(queue_process.create_queue(), 'client') storage_queue_file = QueueFile(queue_process.create_queue(), 'storage') @@ -336,11 +339,13 @@ def get_aci_signature(server, namespace, repository, tag, os, arch): @route_show_if(features.ACI_CONVERSION) @anon_protect -@verbs.route('/aci/////aci///', methods=['GET', 'HEAD']) +@verbs.route('/aci/////aci///', methods=[ + 'GET', 'HEAD']) @process_auth def get_aci_image(server, namespace, repository, tag, os, arch): - return _repo_verb(namespace, repository, tag, 'aci', AppCImageFormatter(), - sign=True, checker=os_arch_checker(os, arch), os=os, arch=arch) + return _repo_verb(namespace, repository, tag, 'aci', + AppCImageFormatter(), sign=True, checker=os_arch_checker(os, arch), os=os, + arch=arch) @anon_protect diff --git a/endpoints/verbs/models_interface.py b/endpoints/verbs/models_interface.py new file mode 100644 index 000000000..0bb8fccac --- /dev/null +++ b/endpoints/verbs/models_interface.py @@ -0,0 +1,154 @@ +from abc import ABCMeta, abstractmethod +from collections import namedtuple + +from six import add_metaclass + + +class Repository( + namedtuple('Repository', ['id', 'name', 'namespace_name', 'description', 'is_public', + 'kind'])): + """ + Repository represents a namespaced collection of tags. + :type id: int + :type name: string + :type namespace_name: string + :type description: string + :type is_public: bool + :type kind: string + """ + + +class DerivedImage(namedtuple('DerivedImage', ['ref', 'blob', 'internal_source_image_db_id'])): + """ + DerivedImage represents a user-facing alias for an image which was derived from another image. + """ + + +class RepositoryReference(namedtuple('RepositoryReference', ['id', 'name', 'namespace_name'])): + """ + RepositoryReference represents a reference to a Repository, without its full metadata. + """ + + +class ImageWithBlob( + namedtuple('Image', [ + 'image_id', 'blob', 'compat_metadata', 'repository', 'internal_db_id', 'v1_metadata'])): + """ + ImageWithBlob represents a user-facing alias for referencing an image, along with its blob. + """ + + +class Blob(namedtuple('Blob', ['uuid', 'size', 'uncompressed_size', 'uploading', 'locations'])): + """ + Blob represents an opaque binary blob saved to the storage system. + """ + + +class TorrentInfo(namedtuple('TorrentInfo', ['piece_length', 'pieces'])): + """ + TorrentInfo represents the torrent piece information associated with a blob. + """ + + +@add_metaclass(ABCMeta) +class VerbsDataInterface(object): + """ + Interface that represents all data store interactions required by the registry's custom HTTP + verbs. + """ + + @abstractmethod + def get_repository(self, namespace_name, repo_name): + """ + Returns a repository tuple for the repository with the given name under the given namespace. + Returns None if no such repository was found. + """ + pass + + @abstractmethod + def get_manifest_layers_with_blobs(self, repo_image): + """ + Returns the full set of manifest layers and their associated blobs starting at the given + repository image and working upwards to the root image. + """ + pass + + @abstractmethod + def get_blob_path(self, blob): + """ + Returns the storage path for the given blob. + """ + pass + + @abstractmethod + def get_derived_image_signature(self, derived_image, signer_name): + """ + Returns the signature associated with the derived image and a specific signer or None if none. + """ + pass + + @abstractmethod + def set_derived_image_signature(self, derived_image, signer_name, signature): + """ + Sets the calculated signature for the given derived image and signer to that specified. + """ + pass + + @abstractmethod + def delete_derived_image(self, derived_image): + """ + Deletes a derived image and all of its storage. + """ + pass + + @abstractmethod + def set_blob_size(self, blob, size): + """ + Sets the size field on a blob to the value specified. + """ + pass + + @abstractmethod + def get_repo_blob_by_digest(self, namespace_name, repo_name, digest): + """ + Returns the blob with the given digest under the matching repository or None if none. + """ + pass + + @abstractmethod + def get_torrent_info(self, blob): + """ + Returns the torrent information associated with the given blob or None if none. + """ + pass + + @abstractmethod + def set_torrent_info(self, blob, piece_length, pieces): + """ + Sets the torrent infomation associated with the given blob to that specified. + """ + pass + + @abstractmethod + def lookup_derived_image(self, repo_image, verb, varying_metadata=None): + """ + Looks up the derived image for the given repository image, verb and optional varying metadata + and returns it or None if none. + """ + pass + + @abstractmethod + def lookup_or_create_derived_image(self, repo_image, verb, location, varying_metadata=None): + """ + Looks up the derived image for the given repository image, verb and optional varying metadata + and returns it. If none exists, a new derived image is created. + """ + pass + + @abstractmethod + def get_tag_image(self, namespace_name, repo_name, tag_name): + """ + Returns the image associated with the live tag with the given name under the matching repository + or None if none. + """ + pass diff --git a/data/interfaces/verbs.py b/endpoints/verbs/models_pre_oci.py similarity index 53% rename from data/interfaces/verbs.py rename to endpoints/verbs/models_pre_oci.py index 6222f46b7..26a955603 100644 --- a/data/interfaces/verbs.py +++ b/endpoints/verbs/models_pre_oci.py @@ -1,155 +1,16 @@ import json -from abc import ABCMeta, abstractmethod -from collections import namedtuple - -from six import add_metaclass - from data import model from image.docker.v1 import DockerV1Metadata - -class Repository(namedtuple('Repository', ['id', 'name', 'namespace_name', 'description', - 'is_public', 'kind'])): - """ - Repository represents a namespaced collection of tags. - :type id: int - :type name: string - :type namespace_name: string - :type description: string - :type is_public: bool - :type kind: string - """ - - -class DerivedImage(namedtuple('DerivedImage', ['ref', 'blob', 'internal_source_image_db_id'])): - """ - DerivedImage represents a user-facing alias for an image which was derived from another image. - """ - -class RepositoryReference(namedtuple('RepositoryReference', ['id', 'name', 'namespace_name'])): - """ - RepositoryReference represents a reference to a Repository, without its full metadata. - """ - -class ImageWithBlob(namedtuple('Image', ['image_id', 'blob', 'compat_metadata', 'repository', - 'internal_db_id', 'v1_metadata'])): - """ - ImageWithBlob represents a user-facing alias for referencing an image, along with its blob. - """ - -class Blob(namedtuple('Blob', ['uuid', 'size', 'uncompressed_size', 'uploading', 'locations'])): - """ - Blob represents an opaque binary blob saved to the storage system. - """ - -class TorrentInfo(namedtuple('TorrentInfo', ['piece_length', 'pieces'])): - """ - TorrentInfo represents the torrent piece information associated with a blob. - """ - - -@add_metaclass(ABCMeta) -class VerbsDataInterface(object): - """ - Interface that represents all data store interactions required by the registry's custom HTTP - verbs. - """ - @abstractmethod - def get_repository(self, namespace_name, repo_name): - """ - Returns a repository tuple for the repository with the given name under the given namespace. - Returns None if no such repository was found. - """ - pass - - @abstractmethod - def get_manifest_layers_with_blobs(self, repo_image): - """ - Returns the full set of manifest layers and their associated blobs starting at the given - repository image and working upwards to the root image. - """ - pass - - @abstractmethod - def get_blob_path(self, blob): - """ - Returns the storage path for the given blob. - """ - pass - - @abstractmethod - def get_derived_image_signature(self, derived_image, signer_name): - """ - Returns the signature associated with the derived image and a specific signer or None if none. - """ - pass - - @abstractmethod - def set_derived_image_signature(self, derived_image, signer_name, signature): - """ - Sets the calculated signature for the given derived image and signer to that specified. - """ - pass - - @abstractmethod - def delete_derived_image(self, derived_image): - """ - Deletes a derived image and all of its storage. - """ - pass - - @abstractmethod - def set_blob_size(self, blob, size): - """ - Sets the size field on a blob to the value specified. - """ - pass - - @abstractmethod - def get_repo_blob_by_digest(self, namespace_name, repo_name, digest): - """ - Returns the blob with the given digest under the matching repository or None if none. - """ - pass - - @abstractmethod - def get_torrent_info(self, blob): - """ - Returns the torrent information associated with the given blob or None if none. - """ - pass - - @abstractmethod - def set_torrent_info(self, blob, piece_length, pieces): - """ - Sets the torrent infomation associated with the given blob to that specified. - """ - pass - - @abstractmethod - def lookup_derived_image(self, repo_image, verb, varying_metadata=None): - """ - Looks up the derived image for the given repository image, verb and optional varying metadata - and returns it or None if none. - """ - pass - - @abstractmethod - def lookup_or_create_derived_image(self, repo_image, verb, location, varying_metadata=None): - """ - Looks up the derived image for the given repository image, verb and optional varying metadata - and returns it. If none exists, a new derived image is created. - """ - pass - - @abstractmethod - def get_tag_image(self, namespace_name, repo_name, tag_name): - """ - Returns the image associated with the live tag with the given name under the matching repository - or None if none. - """ - pass +from endpoints.verbs.models_interface import ( + Blob, + DerivedImage, + ImageWithBlob, + Repository, + RepositoryReference, + TorrentInfo, + VerbsDataInterface,) class PreOCIModel(VerbsDataInterface): @@ -166,13 +27,11 @@ class PreOCIModel(VerbsDataInterface): return _repository_for_repo(repo) def get_manifest_layers_with_blobs(self, repo_image): - repo_image_record = model.image.get_image_by_id(repo_image.repository.namespace_name, - repo_image.repository.name, - repo_image.image_id) + repo_image_record = model.image.get_image_by_id( + repo_image.repository.namespace_name, repo_image.repository.name, repo_image.image_id) - parents = model.image.get_parent_images_with_placements(repo_image.repository.namespace_name, - repo_image.repository.name, - repo_image_record) + parents = model.image.get_parent_images_with_placements( + repo_image.repository.namespace_name, repo_image.repository.name, repo_image_record) yield repo_image @@ -190,8 +49,7 @@ class PreOCIModel(VerbsDataInterface): compat_metadata=metadata, v1_metadata=_docker_v1_metadata(repo_image.repository.namespace_name, repo_image.repository.name, parent), - internal_db_id=parent.id, - ) + internal_db_id=parent.id,) def get_derived_image_signature(self, derived_image, signer_name): storage = model.storage.get_storage_by_uuid(derived_image.blob.uuid) @@ -239,8 +97,7 @@ class PreOCIModel(VerbsDataInterface): return TorrentInfo( pieces=torrent_info.pieces, - piece_length=torrent_info.piece_length, - ) + piece_length=torrent_info.piece_length,) def set_torrent_info(self, blob, piece_length, pieces): blob_record = model.storage.get_storage_by_uuid(blob.uuid) @@ -277,12 +134,10 @@ class PreOCIModel(VerbsDataInterface): repository=RepositoryReference( namespace_name=namespace_name, name=repo_name, - id=found.repository_id, - ), + id=found.repository_id,), compat_metadata=metadata, v1_metadata=_docker_v1_metadata(namespace_name, repo_name, found), - internal_db_id=found.id, - ) + internal_db_id=found.id,) pre_oci_model = PreOCIModel() @@ -307,8 +162,7 @@ def _docker_v1_metadata(namespace_name, repo_name, repo_image): # Note: These are not needed in verbs and are expensive to load, so we just skip them. content_checksum=None, - parent_image_id=None, - ) + parent_image_id=None,) def _derived_image(blob_record, repo_image): @@ -318,8 +172,7 @@ def _derived_image(blob_record, repo_image): return DerivedImage( ref=repo_image.internal_db_id, blob=_blob(blob_record), - internal_source_image_db_id=repo_image.internal_db_id, - ) + internal_source_image_db_id=repo_image.internal_db_id,) def _blob(blob_record): @@ -336,8 +189,8 @@ def _blob(blob_record): size=blob_record.image_size, uncompressed_size=blob_record.uncompressed_size, uploading=blob_record.uploading, - locations=locations, - ) + locations=locations,) + def _repository_for_repo(repo): """ Returns a Repository object representing the Pre-OCI data model repo instance given. """ @@ -347,5 +200,4 @@ def _repository_for_repo(repo): namespace_name=repo.namespace_user.username, description=repo.description, is_public=model.repository.is_repository_public(repo), - kind=model.repository.get_repo_kind_name(repo), - ) + kind=model.repository.get_repo_kind_name(repo),) diff --git a/endpoints/verbs/test/test_security.py b/endpoints/verbs/test/test_security.py new file mode 100644 index 000000000..eeb79c567 --- /dev/null +++ b/endpoints/verbs/test/test_security.py @@ -0,0 +1,74 @@ +import pytest + +from flask import url_for +from endpoints.test.shared import conduct_call, gen_basic_auth +from test.fixtures import * + +NO_ACCESS_USER = 'freshuser' +READ_ACCESS_USER = 'reader' +ADMIN_ACCESS_USER = 'devtable' +CREATOR_ACCESS_USER = 'creator' + +PUBLIC_REPO = 'public/publicrepo' +PRIVATE_REPO = 'devtable/shared' +ORG_REPO = 'buynlarge/orgrepo' +ANOTHER_ORG_REPO = 'buynlarge/anotherorgrepo' + +ACI_ARGS = { + 'server': 'someserver', + 'tag': 'fake', + 'os': 'linux', + 'arch': 'x64',} + + +@pytest.mark.parametrize('user', [ + (0, None), + (1, NO_ACCESS_USER), + (2, READ_ACCESS_USER), + (3, CREATOR_ACCESS_USER), + (4, ADMIN_ACCESS_USER),]) +@pytest.mark.parametrize( + 'endpoint,method,repository,single_repo_path,params,expected_statuses', + [ + ('get_aci_signature', 'GET', PUBLIC_REPO, False, ACI_ARGS, (404, 404, 404, 404, 404)), + ('get_aci_signature', 'GET', PRIVATE_REPO, False, ACI_ARGS, (403, 403, 404, 403, 404)), + ('get_aci_signature', 'GET', ORG_REPO, False, ACI_ARGS, (403, 403, 404, 403, 404)), + ('get_aci_signature', 'GET', ANOTHER_ORG_REPO, False, ACI_ARGS, (403, 403, 403, 403, 404)), + + # get_aci_image + ('get_aci_image', 'GET', PUBLIC_REPO, False, ACI_ARGS, (404, 404, 404, 404, 404)), + ('get_aci_image', 'GET', PRIVATE_REPO, False, ACI_ARGS, (403, 403, 404, 403, 404)), + ('get_aci_image', 'GET', ORG_REPO, False, ACI_ARGS, (403, 403, 404, 403, 404)), + ('get_aci_image', 'GET', ANOTHER_ORG_REPO, False, ACI_ARGS, (403, 403, 403, 403, 404)), + + # get_squashed_tag + ('get_squashed_tag', 'GET', PUBLIC_REPO, False, dict(tag='fake'), (404, 404, 404, 404, 404)), + ('get_squashed_tag', 'GET', PRIVATE_REPO, False, dict(tag='fake'), (403, 403, 404, 403, 404)), + ('get_squashed_tag', 'GET', ORG_REPO, False, dict(tag='fake'), (403, 403, 404, 403, 404)), + ('get_squashed_tag', 'GET', ANOTHER_ORG_REPO, False, dict(tag='fake'), (403, 403, 403, 403, + 404)), + + # get_tag_torrent + ('get_tag_torrent', 'GET', PUBLIC_REPO, True, dict(digest='sha256:1234'), (404, 404, 404, 404, + 404)), + ('get_tag_torrent', 'GET', PRIVATE_REPO, True, dict(digest='sha256:1234'), (403, 403, 404, 403, + 404)), + ('get_tag_torrent', 'GET', ORG_REPO, True, dict(digest='sha256:1234'), (403, 403, 404, 403, + 404)), + ('get_tag_torrent', 'GET', ANOTHER_ORG_REPO, True, dict(digest='sha256:1234'), (403, 403, 403, + 403, 404)),]) +def test_verbs_security(user, endpoint, method, repository, single_repo_path, params, + expected_statuses, app, client): + headers = {} + if user[1] is not None: + headers['Authorization'] = gen_basic_auth(user[1], 'password') + + if single_repo_path: + params['repository'] = repository + else: + (namespace, repo_name) = repository.split('/') + params['namespace'] = namespace + params['repository'] = repo_name + + conduct_call(client, 'verbs.' + endpoint, url_for, method, params, + expected_code=expected_statuses[user[0]], headers=headers) diff --git a/endpoints/web.py b/endpoints/web.py index cd158bbc6..499cc181c 100644 --- a/endpoints/web.py +++ b/endpoints/web.py @@ -1,3 +1,4 @@ +import os import json import logging @@ -38,6 +39,8 @@ from util.saas.useranalytics import build_error_callback from util.systemlogs import build_logs_archive from util.useremails import send_email_changed from util.registry.gzipinputstream import GzipInputStream +from _init import ROOT_DIR + PGP_KEY_MIMETYPE = 'application/pgp-keys' @@ -287,7 +290,7 @@ def dbrevision_health(): db_revision = result[0] # Find the local revision from the file system. - with open('ALEMBIC_HEAD', 'r') as f: + with open(os.path.join(ROOT_DIR, 'ALEMBIC_HEAD'), 'r') as f: local_revision = f.readline().split(' ')[0] data = { @@ -479,11 +482,8 @@ def build_status_badge(namespace_name, repo_name): if not repo or token != repo.badge_token: abort(404) - # Lookup the tags for the repository. - tags = model.tag.list_repository_tags(namespace_name, repo_name) - is_empty = len(list(tags)) == 0 + is_empty = model.repository.is_empty(namespace_name, repo_name) recent_build = model.build.get_recent_repository_build(namespace_name, repo_name) - if not is_empty and (not recent_build or recent_build.phase == 'complete'): status_name = 'ready' elif recent_build and recent_build.phase == 'error': @@ -677,8 +677,6 @@ def attach_bitbucket_trigger(namespace_name, repo_name): abort(404, message=msg) elif repo.kind.name != 'image': abort(501) - elif repo.trust_enabled: - abort(400) trigger = model.build.create_build_trigger(repo, BitbucketBuildTrigger.service_name(), None, current_user.db_user()) @@ -714,8 +712,6 @@ def attach_custom_build_trigger(namespace_name, repo_name): abort(404, message=msg) elif repo.kind.name != 'image': abort(501) - elif repo.trust_enabled: - abort(400) trigger = model.build.create_build_trigger(repo, CustomBuildTrigger.service_name(), None, current_user.db_user()) diff --git a/endpoints/webhooks.py b/endpoints/webhooks.py index e5f4c60b6..370f5d20e 100644 --- a/endpoints/webhooks.py +++ b/endpoints/webhooks.py @@ -46,8 +46,10 @@ def stripe_webhook(): change_type = '' if event_type.endswith('.deleted'): plan_id = request_data['data']['object']['plan']['id'] - change_type = 'canceled %s' % plan_id - send_subscription_change(change_type, customer_id, cust_email, quay_username) + requested = bool(request_data.get('request')) + if requested: + change_type = 'canceled %s' % plan_id + send_subscription_change(change_type, customer_id, cust_email, quay_username) elif event_type.endswith('.created'): plan_id = request_data['data']['object']['plan']['id'] change_type = 'subscribed %s' % plan_id @@ -89,8 +91,6 @@ def build_trigger_webhook(trigger_uuid, **kwargs): if trigger.repository.kind.name != 'image': abort(501, 'Build triggers cannot be invoked on application repositories') - elif trigger.repository.trust_enabled: - abort(400, 'Build triggers cannot be invoked on repositories with trust enabled') logger.debug('Passing webhook request to handler %s', handler) try: diff --git a/external_libraries.py b/external_libraries.py index 0ace4a47f..f286abd2d 100644 --- a/external_libraries.py +++ b/external_libraries.py @@ -2,7 +2,8 @@ import urllib2 import re import os -LOCAL_DIRECTORY = '/static/ldn/' +from _init import STATIC_FONTS_DIR, STATIC_LDN_DIR +LOCAL_PATH = '/static/ldn/' EXTERNAL_JS = [ 'code.jquery.com/jquery.js', @@ -56,14 +57,14 @@ EXTERNAL_CSS_FONTS = [ def get_external_javascript(local=False): if local: - return [LOCAL_DIRECTORY + format_local_name(src) for src in EXTERNAL_JS] + return [LOCAL_PATH + format_local_name(src) for src in EXTERNAL_JS] return ['//' + src for src in EXTERNAL_JS] def get_external_css(local=False): if local: - return [LOCAL_DIRECTORY + format_local_name(src) for src in EXTERNAL_CSS] + return [LOCAL_PATH + format_local_name(src) for src in EXTERNAL_CSS] return ['//' + src for src in EXTERNAL_CSS] @@ -88,7 +89,7 @@ if __name__ == '__main__': filename = format_local_name(url) print 'Writing %s' % filename - with open(LOCAL_DIRECTORY + filename, 'w') as f: + with open(STATIC_LDN_DIR + filename, 'w') as f: f.write(contents) for url in EXTERNAL_CSS_FONTS: @@ -96,7 +97,7 @@ if __name__ == '__main__': response = urllib2.urlopen('https://' + url) filename = os.path.basename(url).split('?')[0] - with open('static/ldn/' + filename, "wb") as local_file: + with open(STATIC_LDN_DIR + filename, "wb") as local_file: local_file.write(response.read()) for url in EXTERNAL_FONTS: @@ -104,5 +105,5 @@ if __name__ == '__main__': response = urllib2.urlopen('https://' + url) filename = os.path.basename(url).split('?')[0] - with open('static/fonts/' + filename, "wb") as local_file: + with open(STATIC_FONTS_DIR + filename, "wb") as local_file: local_file.write(response.read()) diff --git a/features/__init__.py b/features/__init__.py index b5822681e..ca8c2880a 100644 --- a/features/__init__.py +++ b/features/__init__.py @@ -27,5 +27,3 @@ class FeatureNameValue(object): def __nonzero__(self): return self.value.__nonzero__() - - diff --git a/grunt/Gruntfile.js b/grunt/Gruntfile.js deleted file mode 100644 index f0f8c35f2..000000000 --- a/grunt/Gruntfile.js +++ /dev/null @@ -1,101 +0,0 @@ -module.exports = function(grunt) { - - // Project configuration. - grunt.initConfig({ - pkg: grunt.file.readJSON('package.json'), - concat: { - options: { - process: function(src, filepath) { - var unwraps = ['/js/']; - - var shouldWrap = true; - for (var i = 0; i < unwraps.length; ++i) { - if (filepath.indexOf(unwraps[i]) >= 0) { - shouldWrap = false; - break; - } - } - - if (shouldWrap) { - return '// Source: ' + filepath + '\n' + - '(function() {\n' + src + '\n})();\n'; - } else { - return '// Source: ' + filepath + '\n' + src + '\n\n'; - } - }, - }, - build: { - src: [ - '../static/lib/**/*.js', - '../static/build/*.js', - '../static/js/**/*.js', - '../static/dist/template-cache.js', - '!../static/js/**/*.spec.js' - ], - dest: '../static/dist/<%= pkg.name %>.js' - } - }, - - cssmin: { - '../static/dist/<%= pkg.name %>.css': ['../static/lib/**/*.css', '../static/css/**/*.css'] - }, - - uglify: { - options: { - mangle: false, - sourceMap: false - }, - js_min: { - files: { - '../static/dist/<%= pkg.name %>.min.js': ['../static/dist/<%= pkg.name %>.js'] - } - } - }, - - ngtemplates: { - options: { - url: function(path) { - return '/' + path.substr(3); // remove the ../ - }, - htmlmin: { - collapseBooleanAttributes: false, - collapseWhitespace: true, - removeAttributeQuotes: true, - removeComments: true, // Only if you don't use comment directives! - removeEmptyAttributes: true, - removeRedundantAttributes: true, - removeScriptTypeAttributes: true, - removeStyleLinkTypeAttributes: true, - keepClosingSlash: true // For inline SVG - } - }, - quay: { - src: ['../static/partials/*.html', '../static/directives/*.html', '../static/directives/*.html', - '../static/directives/config/*.html', '../static/tutorial/*.html', - '../static/js/directives/ui/**/*.html'], - dest: '../static/dist/template-cache.js' - } - }, - - cachebuster: { - build: { - options: { - format: 'json', - basedir: '../static/' - }, - src: [ '../static/dist/template-cache.js', '../static/dist/<%= pkg.name %>.min.js', - '../static/dist/<%= pkg.name %>.css' ], - dest: '../static/dist/cachebusters.json' - } - } - }); - - grunt.loadNpmTasks('grunt-contrib-uglify'); - grunt.loadNpmTasks('grunt-contrib-concat'); - grunt.loadNpmTasks('grunt-contrib-cssmin'); - grunt.loadNpmTasks('grunt-angular-templates'); - grunt.loadNpmTasks('grunt-cachebuster'); - - // Default task(s). - grunt.registerTask('default', ['ngtemplates', 'concat', 'cssmin', 'uglify', 'cachebuster']); -}; diff --git a/grunt/package.json b/grunt/package.json deleted file mode 100644 index 95a69c285..000000000 --- a/grunt/package.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "name": "quay-frontend", - "version": "0.1.0", - "devDependencies": { - "grunt": "~0.4.4", - "grunt-contrib-concat": "~0.4.0", - "grunt-contrib-cssmin": "~0.9.0", - "grunt-angular-templates": "~1.1.0", - "grunt-contrib-uglify": "~0.4.0", - "grunt-cachebuster": "~0.1.5" - } -} diff --git a/health/healthcheck.py b/health/healthcheck.py index 4208a0a62..cfb427a31 100644 --- a/health/healthcheck.py +++ b/health/healthcheck.py @@ -4,6 +4,7 @@ from health.services import check_all_services logger = logging.getLogger(__name__) + def get_healthchecker(app, config_provider, instance_keys): """ Returns a HealthCheck instance for the given app. """ return HealthCheck.get_checker(app, config_provider, instance_keys) @@ -62,7 +63,6 @@ class HealthCheck(object): return (data, 200 if is_healthy else 503) - @classmethod def get_checker(cls, app, config_provider, instance_keys): name = app.config['HEALTH_CHECKER'][0] @@ -77,8 +77,8 @@ class HealthCheck(object): class LocalHealthCheck(HealthCheck): def __init__(self, app, config_provider, instance_keys): - super(LocalHealthCheck, self).__init__(app, config_provider, instance_keys, - ['redis', 'storage']) + super(LocalHealthCheck, self).__init__(app, config_provider, instance_keys, [ + 'redis', 'storage']) @classmethod def check_names(cls): @@ -88,8 +88,8 @@ class LocalHealthCheck(HealthCheck): class RDSAwareHealthCheck(HealthCheck): def __init__(self, app, config_provider, instance_keys, access_key, secret_key, db_instance='quay', region='us-east-1'): - super(RDSAwareHealthCheck, self).__init__(app, config_provider, instance_keys, - ['redis', 'storage']) + super(RDSAwareHealthCheck, self).__init__(app, config_provider, instance_keys, [ + 'redis', 'storage']) self.access_key = access_key self.secret_key = secret_key @@ -121,7 +121,6 @@ class RDSAwareHealthCheck(HealthCheck): return self.calculate_overall_health(service_statuses, skip=skip, notes=notes) - def _get_rds_status(self): """ Returns the status of the RDS instance as reported by AWS. """ try: @@ -130,7 +129,8 @@ class RDSAwareHealthCheck(HealthCheck): response = region.describe_db_instances()['DescribeDBInstancesResponse'] result = response['DescribeDBInstancesResult'] - instances = [i for i in result['DBInstances'] if i['DBInstanceIdentifier'] == self.db_instance] + instances = [ + i for i in result['DBInstances'] if i['DBInstanceIdentifier'] == self.db_instance] if not instances: return 'error' diff --git a/health/models_interface.py b/health/models_interface.py new file mode 100644 index 000000000..ff49a4dde --- /dev/null +++ b/health/models_interface.py @@ -0,0 +1,14 @@ +from abc import ABCMeta, abstractmethod +from six import add_metaclass + + +@add_metaclass(ABCMeta) +class HealthCheckDataInterface(object): + """ + Interface that represents all data store interactions required by health checks. + """ + + @abstractmethod + def check_health(self, app_config): + """ Returns True if the connection to the database is healthy and False otherwise. """ + pass diff --git a/health/models_pre_oci.py b/health/models_pre_oci.py new file mode 100644 index 000000000..9f50b55eb --- /dev/null +++ b/health/models_pre_oci.py @@ -0,0 +1,10 @@ +from data.model import health +from health.models_interface import HealthCheckDataInterface + + +class PreOCIModel(HealthCheckDataInterface): + def check_health(self, app_config): + return health.check_health(app_config) + + +pre_oci_model = PreOCIModel() diff --git a/health/services.py b/health/services.py index 66a8b4033..49ba07199 100644 --- a/health/services.py +++ b/health/services.py @@ -1,7 +1,6 @@ import logging -from data.model import health from app import build_logs, storage - +from health.models_pre_oci import pre_oci_model as model logger = logging.getLogger(__name__) @@ -30,12 +29,14 @@ def _check_registry_gunicorn(app): def _check_database(app): """ Returns the status of the database, as accessed from this instance. """ - return health.check_health(app.config) + return model.check_health(app.config) + def _check_redis(app): """ Returns the status of Redis, as accessed from this instance. """ return build_logs.check_health() + def _check_storage(app): """ Returns the status of storage, as accessed from this instance. """ try: diff --git a/image/appc/test/test_appc.py b/image/appc/test/test_appc.py index de829b4e8..06f1e8a8d 100644 --- a/image/appc/test/test_appc.py +++ b/image/appc/test/test_appc.py @@ -1,7 +1,7 @@ import pytest from image.appc import DockerV1ToACIManifestTranslator -from data.interfaces.verbs import RepositoryReference, ImageWithBlob +from endpoints.verbs.models_interface import RepositoryReference, ImageWithBlob from util.dict_wrappers import JSONPathDict diff --git a/initdb.py b/initdb.py index c196bfff0..387cc50f8 100644 --- a/initdb.py +++ b/initdb.py @@ -332,6 +332,7 @@ def initialize_database(): LogEntryKind.create(name='add_repo_notification') LogEntryKind.create(name='delete_repo_notification') + LogEntryKind.create(name='reset_repo_notification') LogEntryKind.create(name='regenerate_robot_token') diff --git a/karma.conf.js b/karma.conf.js index 220ed46e0..83c9e9b79 100644 --- a/karma.conf.js +++ b/karma.conf.js @@ -38,7 +38,6 @@ module.exports = function(config) { ], exclude: [], preprocessors: { - 'static/lib/ngReact/react.ngReact.min.js': ['webpack'], 'static/lib/angular-moment.min.js': ['webpack'], 'node_modules/core-js/index.js': ['webpack'], 'static/test/test-index.ts': ['webpack'], @@ -59,8 +58,11 @@ module.exports = function(config) { colors: true, logLevel: config.LOG_INFO, autoWatch: true, - browsers: ['PhantomJS', 'Chrome'], + browsers: ['ChromeHeadless'], singleRun: false, - concurrency: Infinity + concurrency: Infinity, + mime: { + 'text/x-typescript': ['ts','tsx'] + } }); }; diff --git a/loghandler.py b/loghandler.py new file mode 100755 index 000000000..d3d9948cb --- /dev/null +++ b/loghandler.py @@ -0,0 +1,114 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from __future__ import absolute_import + +import datetime +import json +import logging +import re +import traceback + + +LOG_FORMAT_REGEXP = re.compile(r'\((.+?)\)', re.IGNORECASE) + + +def _json_default(obj): + """ + Coerce everything to strings. + All objects representing time get output as ISO8601. + """ + if isinstance(obj, (datetime.date, datetime.time, datetime.datetime)): + return obj.isoformat() + + elif isinstance(obj, Exception): + return "Exception: %s" % str(obj) + + return str(obj) + + +# skip natural LogRecord attributes +# http://docs.python.org/library/logging.html#logrecord-attributes +RESERVED_ATTRS = set([ + 'args', 'asctime', 'created', 'exc_info', 'exc_text', 'filename', 'funcName', 'levelname', + 'levelno', 'lineno', 'module', 'msecs', 'message', 'msg', 'name', 'pathname', 'process', + 'processName', 'relativeCreated', 'stack_info', 'thread', 'threadName' +]) + + +class JsonFormatter(logging.Formatter): + """ + A custom formatter to format logging records as json strings. + extra values will be formatted as str() if nor supported by + json default encoder + """ + + def __init__(self, *args, **kwargs): + """ + :param json_default: a function for encoding non-standard objects + as outlined in http://docs.python.org/2/library/json.html + :param json_encoder: optional custom encoder + :param json_serializer: a :meth:`json.dumps`-compatible callable + that will be used to serialize the log record. + :param prefix: an optional key prefix to nest logs + """ + self.json_default = kwargs.pop("json_default", _json_default) + self.json_encoder = kwargs.pop("json_encoder", None) + self.json_serializer = kwargs.pop("json_serializer", json.dumps) + self.default_values = kwargs.pop("default_extra", {}) + self.prefix_key = kwargs.pop("prefix_key", "data") + + logging.Formatter.__init__(self, *args, **kwargs) + + self._fmt_parameters = self._parse_format_string() + self._skip_fields = set(self._fmt_parameters) + self._skip_fields.update(RESERVED_ATTRS) + + def _parse_format_string(self): + """Parses format string looking for substitutions""" + standard_formatters = LOG_FORMAT_REGEXP + return standard_formatters.findall(self._fmt) + + def add_fields(self, log_record, record, message_dict): + """ + Override this method to implement custom logic for adding fields. + """ + + target = log_record + if self.prefix_key: + log_record[self.prefix_key] = {} + target = log_record[self.prefix_key] + + for field, value in record.__dict__.iteritems(): + if field in self._fmt_parameters and field in RESERVED_ATTRS: + log_record[field] = value + elif field not in RESERVED_ATTRS: + target[field] = value + + target.update(message_dict) + target.update(self.default_values) + + def format(self, record): + """Formats a log record and serializes to json""" + message_dict = {} + if isinstance(record.msg, dict): + message_dict = record.msg + record.message = None + if "message" in message_dict: + record.message = message_dict.pop("message", "") + else: + record.message = record.getMessage() + + # only format time if needed + if "asctime" in self._fmt_parameters: + record.asctime = self.formatTime(record, self.datefmt) + + # Display formatted exception, but allow overriding it in the + # user-supplied dict. + if record.exc_info and not message_dict.get('exc_info'): + message_dict['exc_info'] = traceback.format_list(traceback.extract_tb(record.exc_info[2])) + log_record = {} + + self.add_fields(log_record, record, message_dict) + + return self.json_serializer(log_record, default=self.json_default, cls=self.json_encoder) diff --git a/package.json b/package.json index 12d938a2d..9d36e33a3 100644 --- a/package.json +++ b/package.json @@ -4,11 +4,13 @@ "private": true, "version": "1.0.0", "scripts": { - "dev": "./node_modules/.bin/karma start --browsers PhantomJS", - "test": "./node_modules/.bin/karma start --single-run --browsers PhantomJS", + "dev": "./node_modules/.bin/karma start --browsers ChromeHeadless", + "test": "./node_modules/.bin/karma start --single-run --browsers ChromeHeadless", "test:node": "JASMINE_CONFIG_PATH=static/test/jasmine.json ./node_modules/.bin/jasmine-ts './static/js/**/*.spec.ts'", - "build": "./node_modules/.bin/webpack --progress", - "watch": "./node_modules/.bin/webpack --watch" + "e2e": "./node_modules/.bin/ts-node ./node_modules/.bin/protractor static/test/protractor.conf.ts", + "build": "NODE_ENV=production ./node_modules/.bin/webpack --progress", + "watch": "./node_modules/.bin/webpack --watch", + "lint": "./node_modules/.bin/tslint --type-check -p tsconfig.json -e **/*.spec.ts" }, "repository": { "type": "git", @@ -25,17 +27,21 @@ "bootstrap": "^3.3.2", "bootstrap-datepicker": "^1.6.4", "cal-heatmap": "^3.3.10", + "clipboard": "^1.6.1", "core-js": "^2.4.1", "d3": "^3.3.3", "eonasdan-bootstrap-datetimepicker": "^4.17.43", + "file-saver": "^1.3.3", "jquery": "1.12.4", "ng-metadata": "^4.0.1", "raven-js": "^3.1.0", - "react": "^15.3.2", - "react-dom": "^15.3.2", "restangular": "^1.2.0", "rxjs": "^5.0.1", - "underscore": "^1.5.2" + "showdown": "^1.6.4", + "showdown-highlightjs-extension": "^0.1.2", + "underscore": "^1.5.2", + "urijs": "^1.18.10", + "zeroclipboard": "^2.3.0" }, "devDependencies": { "@types/angular": "1.6.2", @@ -47,27 +53,28 @@ "@types/jquery": "^2.0.40", "@types/react": "0.14.39", "@types/react-dom": "0.14.17", + "@types/showdown": "^1.4.32", "angular-mocks": "1.6.2", - "angular-ts-decorators": "0.0.19", "css-loader": "0.25.0", + "html-loader": "^0.4.5", "jasmine-core": "^2.5.2", "jasmine-ts": "0.0.3", - "karma": "^0.13.22", - "karma-chrome-launcher": "^2.0.0", + "karma": "^1.7.0", + "karma-chrome-launcher": "^2.1.1", "karma-coverage": "^0.5.5", "karma-es6-shim": "^1.0.0", "karma-jasmine": "^0.3.8", - "karma-phantomjs-launcher": "^1.0.0", "karma-webpack": "^1.8.1", - "node-sass": "3.10.1", - "phantomjs-prebuilt": "^2.1.7", - "sass-loader": "4.0.2", + "ngtemplate-loader": "^1.3.1", + "protractor": "^5.1.2", + "script-loader": "^0.7.0", "source-map-loader": "0.1.5", "style-loader": "0.13.1", "ts-loader": "^0.9.5", "ts-mocks": "^0.2.2", + "ts-node": "^3.0.6", + "tslint": "^5.4.3", "typescript": "^2.2.1", - "typings": "1.4.0", "webpack": "^2.2" } } diff --git a/quay-base.dockerfile b/quay-base.dockerfile new file mode 100644 index 000000000..996469cde --- /dev/null +++ b/quay-base.dockerfile @@ -0,0 +1,103 @@ +# vim:ft=dockerfile + +FROM phusion/baseimage:0.9.19 + +ENV DEBIAN_FRONTEND noninteractive +ENV HOME /root +ENV QUAYDIR /quay-registry +ENV QUAYCONF /quay-registry/conf +ENV QUAYPATH "." + +RUN mkdir $QUAYDIR +WORKDIR $QUAYDIR + +# This is so we don't break http golang/go#17066 +# When Ubuntu has nginx >= 1.11.0 we can switch back. +RUN add-apt-repository ppa:nginx/development + +# Add Yarn repository until it is officially added to Ubuntu +RUN curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - \ + && echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list + +# Install system packages +RUN apt-get update && apt-get upgrade -y \ + && apt-get install -y \ + dnsmasq \ + g++ \ + gdb \ + gdebi-core \ + git \ + jpegoptim \ + libevent-2.0.5 \ + libevent-dev \ + libffi-dev \ + libfreetype6-dev \ + libgpgme11 \ + libgpgme11-dev \ + libjpeg62 \ + libjpeg62-dev \ + libjpeg8 \ + libldap-2.4-2 \ + libldap2-dev \ + libmagic1 \ + libpq-dev \ + libpq5 \ + libsasl2-dev \ + libsasl2-modules \ + monit \ + nginx \ + nodejs \ + optipng \ + openssl \ + python-dbg \ + python-dev \ + python-pip \ + python-virtualenv \ + yarn=0.22.0-1 \ + w3m # 26MAY2017 + +# Install cfssl +RUN mkdir /gocode +ENV GOPATH /gocode +RUN curl -O https://storage.googleapis.com/golang/go1.6.linux-amd64.tar.gz && \ + tar -xvf go1.6.linux-amd64.tar.gz && \ + mv go /usr/local && \ + rm -rf go1.6.linux-amd64.tar.gz && \ + /usr/local/go/bin/go get -u github.com/cloudflare/cfssl/cmd/cfssl && \ + /usr/local/go/bin/go get -u github.com/cloudflare/cfssl/cmd/cfssljson && \ + cp /gocode/bin/cfssljson /bin/cfssljson && \ + cp /gocode/bin/cfssl /bin/cfssl && \ + rm -rf /gocode && rm -rf /usr/local/go + +# Install jwtproxy +RUN curl -L -o /usr/local/bin/jwtproxy https://github.com/coreos/jwtproxy/releases/download/v0.0.1/jwtproxy-linux-x64 \ + && chmod +x /usr/local/bin/jwtproxy + +# Install prometheus-aggregator +RUN curl -L -o /usr/local/bin/prometheus-aggregator https://github.com/coreos/prometheus-aggregator/releases/download/v0.0.1-alpha/prometheus-aggregator \ + && chmod +x /usr/local/bin/prometheus-aggregator + +# Install python dependencies +COPY requirements.txt requirements-tests.txt ./ +RUN virtualenv --distribute venv \ + && venv/bin/pip install -r requirements.txt \ + && venv/bin/pip install -r requirements-tests.txt \ + && venv/bin/pip freeze + + +# Install front-end dependencies +RUN ln -s /usr/bin/nodejs /usr/bin/node +COPY static/ package.json tsconfig.json webpack.config.js tslint.json yarn.lock ./ +RUN yarn install --ignore-engines + + +RUN mkdir -p /etc/my_init.d /etc/systlog-ng /usr/local/bin /etc/monit $QUAYDIR/static/fonts $QUAYDIR/static/ldn /usr/local/nginx/logs/ + +COPY external_libraries.py _init.py ./ + +RUN venv/bin/python -m external_libraries + +RUN rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* /root/.cache +VOLUME ["/var/log", "/datastorage", "/tmp"] + +EXPOSE 443 8443 80 diff --git a/registry.py b/registry.py index 861420f08..c2ba7c8ed 100644 --- a/registry.py +++ b/registry.py @@ -1,7 +1,3 @@ -import logging -import logging.config -import os - import endpoints.decorated # Note: We need to import this module to make sure the decorators are registered. import features @@ -11,8 +7,6 @@ from endpoints.appr import appr_bp, registry # registry needed to ensure routes from endpoints.v1 import v1_bp from endpoints.v2 import v2_bp -if os.environ.get('DEBUGLOG') == 'true': - logging.config.fileConfig('conf/logging_debug.conf', disable_existing_loggers=False) application.register_blueprint(v1_bp, url_prefix='/v1') application.register_blueprint(v2_bp, url_prefix='/v2') diff --git a/requirements-dev.txt b/requirements-dev.txt index 0cdbc4aea..6165eb863 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -3,3 +3,4 @@ pylint ipdb tqdm yapf==0.15.2 +ffctl>=0.1.2 diff --git a/requirements-tests.txt b/requirements-tests.txt index bb613f9cc..435f7c1fe 100644 --- a/requirements-tests.txt +++ b/requirements-tests.txt @@ -1,4 +1,3 @@ -pytest pytest-cov python-coveralls pytest-flask @@ -6,3 +5,4 @@ pytest-runner pytest-xdist pytest-timeout -e git+https://github.com/ant31/pytest-sugar.git#egg=pytest-sugar +-e git+https://github.com/ant31/pytest.git#egg=pytest diff --git a/secscan.py b/secscan.py index a2ea9753f..489a58efc 100644 --- a/secscan.py +++ b/secscan.py @@ -1,11 +1,5 @@ -import os -import logging.config - from app import app as application - from endpoints.secscan import secscan -if os.environ.get('DEBUGLOG') == 'true': - logging.config.fileConfig('conf/logging_debug.conf', disable_existing_loggers=False) application.register_blueprint(secscan, url_prefix='/secscan') diff --git a/setup.cfg b/setup.cfg index 7a0fb2639..c59b93489 100644 --- a/setup.cfg +++ b/setup.cfg @@ -10,3 +10,11 @@ branch = True [coverage:report] omit = test/* + +[pep8] +ignore = E111,E114 +max-line-length = 100 + +[flake8] +ignore = E111,E114 +max-line-length = 100 \ No newline at end of file diff --git a/static/css/core-ui.css b/static/css/core-ui.css index ef80ab7d7..d28c74129 100644 --- a/static/css/core-ui.css +++ b/static/css/core-ui.css @@ -973,13 +973,18 @@ a:focus { table-layout: fixed; } -.co-fixed-table .co-flowing-col{ +.co-fixed-table .co-flowing-col { overflow: hidden; text-overflow: ellipsis; padding-left: 16px; vertical-align: middle; } +.co-fixed-table .nowrap-col { + white-space: nowrap; + overflow: hidden; +} + .co-table td { border-bottom: 1px solid #eee; padding: 10px; @@ -1239,6 +1244,7 @@ a:focus { .co-top-bar { height: 50px; + padding-bottom: 40px; } .co-check-bar .co-checked-actions .btn { diff --git a/static/css/directives/components/pages/_mixins.scss b/static/css/directives/components/pages/_mixins.scss deleted file mode 100644 index c3aef8b2b..000000000 --- a/static/css/directives/components/pages/_mixins.scss +++ /dev/null @@ -1,10 +0,0 @@ -/* - A list of useful mixins -*/ - -@mixin box-shadow($args...) { - -webkit-box-shadow: $args; - -moz-box-shadow: $args; - box-shadow: $args; - -o-box-shadow: $args; -} diff --git a/static/css/directives/components/pages/repo-page/body.scss b/static/css/directives/components/pages/repo-page/body.scss deleted file mode 100644 index 30b2101d5..000000000 --- a/static/css/directives/components/pages/repo-page/body.scss +++ /dev/null @@ -1,50 +0,0 @@ -.rp-description { - font-size: 16px; -} - -.rp-throbber { - position: relative; -} - -.rp-panelBody { - padding: 15px 30px; -} - -.rp-tabs { - border-bottom: 1px solid #DDD; -} -.rp-tabs > li.active > a, -.rp-tabs > li.active > a:focus, -.rp-tabs > li.active > a:hover { - border-width: 0; -} - -.rp-tabs { - padding: 0 15px; - font-size: 20px; - - li.active a { - color: #51a3d9; - border-bottom: 1px solid #51a3d9; - - &:hover { - color: #51a3d9; - border-bottom: 1px solid #51a3d9; - } - } - - li a { - color: #333; - border-bottom: 1px solid #DDD; - &:focus, - &:hover { - border: 1px solid #fff; - border-bottom: 1px solid #ddd; - background-color: #fff; - } - } -} - -.rp-tagSpan { - margin: 0 2px; -} diff --git a/static/css/directives/components/pages/repo-page/header.scss b/static/css/directives/components/pages/repo-page/header.scss deleted file mode 100644 index afa9450e2..000000000 --- a/static/css/directives/components/pages/repo-page/header.scss +++ /dev/null @@ -1,30 +0,0 @@ -.rp-button { - float: right; - margin-right: 30px; -} - -.rp-button__dropdown { - background-color: #fff; - border-radius: 4px; - box-shadow: 0 1px 2px 0 rgba(0, 0, 0, 0.25), 0 0 1px 0 rgba(0, 0, 0, 0.5), inset 0 1px 0 0 rgba(255, 255, 255, 0.2); -} - -.rp-button__text { - margin-right: 10px; -} - -.rp-button__text--bold { - font-weight: 600; -} - - -.rp-header__row { - margin: 0; -} - -.rp-title { - font-size: 24px; - color: #333; - float: left; -} - diff --git a/static/css/directives/components/pages/repo-page/repo-page.scss b/static/css/directives/components/pages/repo-page/repo-page.scss deleted file mode 100644 index 476b41f8f..000000000 --- a/static/css/directives/components/pages/repo-page/repo-page.scss +++ /dev/null @@ -1,33 +0,0 @@ -// Repo Page specific styles here -@import "../mixins"; -@import "body"; -@import "header"; -@import "sidebar"; - -.rp-header { - padding: 30px; -} - -.rp-mainPanel { - margin-bottom: 20px; - background-color: #fff; - @include box-shadow(0px 2px 2px rgba(0, 0, 0, 0.4)); - overflow: hidden; - display: table; - - [class*="col-"] { - float: none; - display: table-cell; - vertical-align: top; - } -} - -.rp-main { - padding: 0; - border-right: 1px solid #ddd; -} - -.rp-sidebar { - padding: 30px 30px 0 30px; -} - diff --git a/static/css/directives/components/pages/repo-page/sidebar.scss b/static/css/directives/components/pages/repo-page/sidebar.scss deleted file mode 100644 index 3e973822d..000000000 --- a/static/css/directives/components/pages/repo-page/sidebar.scss +++ /dev/null @@ -1,52 +0,0 @@ -.rp-badge { - float: left; - width: 100%; - margin-bottom: 20px; -} - -.rp-badge__icon { - float: left; - height: 25px; - font-size: 16px; - padding: 0 12px; - color: #ffffff; -} - -.rp-badge__icon--private { - @extend .rp-badge__icon; - background-color: #d64456; -} - -.rp-badge__icon--public { - @extend .rp-badge__icon; - background-color: #2fc98e; -} - -.rp-imagesHeader { - font-size: 18px; - margin-bottom: 30px; -} - -.rp-imagesTable { - margin-bottom: 30px; -} - -.rp-imagesTable__headerCell { - font-size: 13px; - font-weight: 300; - font-style: normal; - color: #999; - padding: 10px; - border-bottom: 1px solid #ddd; -} - -.rp-imagesTable__tagIcon { - padding-right: 4px; -} - -.rp-sharing { - font-size: 16px; - color: #333; - margin-bottom: 30px; -} - diff --git a/static/css/directives/repo-view/repo-panel-tags.css b/static/css/directives/repo-view/repo-panel-tags.css index 7e03596bf..c5ca2543a 100644 --- a/static/css/directives/repo-view/repo-panel-tags.css +++ b/static/css/directives/repo-view/repo-panel-tags.css @@ -43,7 +43,7 @@ .repo-panel-tags-element .image-track-line.start { top: 18px; - height: 25px; + height: 28px; display: block; } @@ -144,6 +144,10 @@ padding-top: 0px; } +.repo-panel-tags-element .signing-delegations-list { + margin-top: 8px; +} + @media (max-width: 1000px) { .repo-panel-tags-element .image-track { display: none; diff --git a/static/css/directives/ui/app-public-view.css b/static/css/directives/ui/app-public-view.css index 39a5456cd..1436e99a6 100644 --- a/static/css/directives/ui/app-public-view.css +++ b/static/css/directives/ui/app-public-view.css @@ -72,4 +72,10 @@ .app-public-view-element .co-panel .co-panel-heading i.fa { display: none; +} + +.app-public-view-element .co-tab-panel { + margin: 0px; + box-shadow: none; + border: none; } \ No newline at end of file diff --git a/static/css/directives/ui/cor-tabs.css b/static/css/directives/ui/cor-tabs.css index e7f405d9c..d49094965 100644 --- a/static/css/directives/ui/cor-tabs.css +++ b/static/css/directives/ui/cor-tabs.css @@ -1,4 +1,4 @@ -cor-tabs { +.vertical cor-tabs { display: table-cell; float: none; vertical-align: top; @@ -6,6 +6,37 @@ cor-tabs { border-right: 1px solid #DDE7ED; } +@media (min-width: 768px) { + .horizontal-label { + color: #666; + } + + .vertical .horizontal-label { + display: none; + } + + .horizontal .horizontal-label { + display: inline-block; + vertical-align: middle; + } + + .horizontal .horizontal-label { + margin-left: 10px; + display: inline-block; + } + + .horizontal .cor-tab-itself { + font-size: 18px; + padding: 6px; + padding-left: 10px; + padding-right: 10px; + } + + .horizontal .cor-tab-itself i.fa { + display: none; + } +} + .co-tab-container { padding: 0px; } @@ -18,7 +49,14 @@ cor-tabs { vertical-align: top; } -.co-tab-content { +.horizontal .co-tab-content { + width: 100%; + display: block; + float: none; + padding: 30px; +} + +.vertical .co-tab-content { width: 100%; display: table-cell; float: none; @@ -68,6 +106,10 @@ cor-tabs { } @media (max-width: 767px) { + .vertical cor-tabs { + display: block; + } + .co-tabs { display: block; width: auto; @@ -78,6 +120,16 @@ cor-tabs { cor-tabs { position: relative; display: block; + float: none; + vertical-align: top; + background-color: #e8f1f6; + border-right: 1px solid #DDE7ED; + } + + .co-tab-element li.active { + background-color: white; + border-right: 1px solid white; + margin-right: -1px; } .co-tab-element .xs-toggle { @@ -106,24 +158,46 @@ cor-tabs { font-family: FontAwesome; } - .co-tab-element .xs-label { - line-height: 60px; + .co-tab-element .horizontal-label { font-size: 16px; margin-left: 16px; display: inline-block !important; color: gray; + vertical-align: middle; } - .co-tabs li a { + .cor-tab-itself { + width: 100%; + } + + .cor-tab-icon { + vertical-align: middle; + } + + .co-top-tab-bar { + padding: 0px; + } + + .co-top-tab-bar li { + padding: 0px; + } + + .co-tabs li a, .co-top-tab-bar li a { display: inline-block; height: 60px; - line-height: 60px; + line-height: 54px; white-space: nowrap; width: 100%; text-align: left; padding-left: 20px; text-decoration: none !important; font-size: 28px; + color: #666; + } + + .co-top-tab { + display: block !important; + border-bottom: 0px !important; } .co-tabs li a i { @@ -131,7 +205,7 @@ cor-tabs { font-size: 28px; } - .co-tabs li.active a .xs-label { + .co-tabs li.active a .horizontal-label { color: black; } @@ -145,6 +219,7 @@ cor-tabs { .co-tab-element.closed li { height: 0px; + padding: 0px; overflow: hidden; } diff --git a/static/css/directives/ui/create-external-notification.css b/static/css/directives/ui/create-external-notification.css index 133cced84..5c7602a63 100644 --- a/static/css/directives/ui/create-external-notification.css +++ b/static/css/directives/ui/create-external-notification.css @@ -66,4 +66,10 @@ .create-external-notification-element .help-text { margin-top: 10px; color: #aaa; -} \ No newline at end of file +} + +#authorizeEmailModal .loading-container { + display: flex; + justify-content: center; + margin: 20px; +} diff --git a/static/css/directives/ui/manifest-label-list.css b/static/css/directives/ui/manifest-label-list.css index 840f1b737..8cf247d42 100644 --- a/static/css/directives/ui/manifest-label-list.css +++ b/static/css/directives/ui/manifest-label-list.css @@ -1,5 +1,17 @@ .manifest-label-list-element { padding-left: 6px; + display: inline-block; + position: relative; +} + +.manifest-label-list-element:before { + content: "\f02c"; + font-family: FontAwesome; + position: absolute; + left: -22px; + top: 0px; + font-size: 15px; + color: #888; } .manifest-label-list-element .none { diff --git a/static/css/directives/ui/markdown-editor.css b/static/css/directives/ui/markdown-editor.css deleted file mode 100644 index bf5602db7..000000000 --- a/static/css/directives/ui/markdown-editor.css +++ /dev/null @@ -1,31 +0,0 @@ -.markdown-editor-element .wmd-panel .btn { - background-color: #ddd; -} - -.markdown-editor-element .wmd-panel .btn:hover { - background-color: #eee; -} - -.markdown-editor-element .wmd-panel .btn:active { - background-color: #ccc; -} - -.markdown-editor-element .preview-btn { - float: right; -} - -.markdown-editor-element .preview-btn.active { - box-shadow: inset 0 3px 5px rgba(0,0,0,.125); -} - -.markdown-editor-element .preview-panel .markdown-view { - border: 1px solid #eee; - padding: 4px; - min-height: 150px; -} - -.markdown-editor-element .preview-top-bar { - height: 43px; - line-height: 43px; - color: #ddd; -} \ No newline at end of file diff --git a/static/css/directives/ui/quay-service-status.css b/static/css/directives/ui/quay-service-status.css index 367a7dede..11368969e 100644 --- a/static/css/directives/ui/quay-service-status.css +++ b/static/css/directives/ui/quay-service-status.css @@ -1,3 +1,8 @@ +.quay-service-status-element { + display: flex; + align-items: center; +} + .quay-service-status-indicator { display: inline-block; border-radius: 50%; @@ -9,7 +14,9 @@ } .quay-service-status-description { - vertical-align: middle; + display: flex; + justify-content: center; + align-items: center; } .quay-service-status-indicator.none { diff --git a/static/css/directives/ui/tag-signing-display.css b/static/css/directives/ui/tag-signing-display.css index d05d4608b..e450fcbb2 100644 --- a/static/css/directives/ui/tag-signing-display.css +++ b/static/css/directives/ui/tag-signing-display.css @@ -2,10 +2,11 @@ text-align: center; display: inline-block; cursor: default; + position: relative; } .tag-signing-display-element .fa { - font-size: 18px; + font-size: 24px; } .tag-signing-display-element .fa.fa-question-circle { @@ -22,34 +23,153 @@ color: #9B9B9B; } -.tag-signing-display-element .signing-valid .okay, -.tag-signing-display-element .signing-valid .expires-soon { +.tag-signing-display-element .signing-valid.okay-release { color: #2FC98E; } - -.tag-signing-display-element .signing-valid .expires-soon { - position: relative; +.tag-signing-display-element .signing-valid.okay { + color: #5f9dd0; } -.tag-signing-display-element .signing-valid .expires-soon:after { - border-radius: 50%; - width: 6px; - height: 6px; - position: absolute; - bottom: 0px; - right: 0px; - z-index: 1; - display: inline-block; - content: " "; - background-color: #FCA657; -} - - -.tag-signing-display-element .signing-valid .expired { +.tag-signing-display-element .signing-valid.partial-okay { color: #FCA657; } .tag-signing-display-element .signing-invalid { color: #D64456; -} \ No newline at end of file +} + +.tag-signing-display-element .indicator { + position: relative; +} + +.tag-signing-display-element .expiring-soon { + border-radius: 100%; + background-color: #fbab62; + + position: absolute; + right: 0px; + bottom: 0px; + width: 8px; + height: 8px; + z-index: 2; +} + +.tag-signing-display-element .expired { + border-radius: 100%; + background-color: #ec5266; + + position: absolute; + right: 0px; + bottom: 0px; + width: 8px; + height: 8px; + z-index: 3; +} + +.tag-signing-display-element .invalid { + border-radius: 100%; + background-color: #ec5266; + + position: absolute; + right: 0px; + bottom: 0px; + width: 8px; + height: 8px; + z-index: 4; +} + +.tag-signing-display-element.extended { + display: block; + position: relative; +} + +.tag-signing-display-element.extended .fa { + color: #888 !important; + font-size: 16px; +} + +.tag-signing-display-element.extended .indicator { + font-size: 16px; + position: absolute; + left: -22px; + top: 4px; +} + +.tag-signing-display-element.extended .delegations { + margin: 0px; + padding: 0px; + text-align: left; + padding-left: 6px; + padding-top: 6px; +} + +.tag-signing-display-element.extended .delegations td { + padding: 4px; + border: 0px; +} + +.tag-signing-display-element.extended .delegations .delegation { + padding: 4px; + background-color: #eee; + border-radius: 4px; + font-size: 13px; + padding-right: 6px; + display: inline-block; +} + +.tag-signing-display-element.extended .delegations .delegation:before { + content: "\f00c"; + font-family: FontAwesome; + margin-right: 2px; + margin-left: 2px; + display: inline-block; + font-size: 10px; +} + +.tag-signing-display-element.extended .delegations .delegation.okay { + background-color: #d0deea; +} + +.tag-signing-display-element.extended .delegations .delegation.okay:before { + color: #5f9dd0; +} + +.tag-signing-display-element.extended .delegations .delegation.default { + background-color: #bdf1dd; +} + +.tag-signing-display-element.extended .delegations .delegation.default:before { + color: #2FC98E; +} + +.tag-signing-display-element.extended .delegations .delegation.warning { + background-color: #ffe0c4; +} + +.tag-signing-display-element.extended .delegations .delegation.warning:before { + content: "\f12a"; + color: #FCA657; +} + +.tag-signing-display-element.extended .delegations .delegation.error { + background-color: #ffcad1; +} + +.tag-signing-display-element.extended .delegations .delegation.error:before { + content: "\f00d"; + color: #D64456; +} + +.tag-signing-display-element.extended .delegations .delegation-name { + font-size: 14px; +} + +.tag-signing-display-element.extended .delegations .delegation-info { + display: inline-block; + white-space: nowrap; + vertical-align: middle; + margin-left: 4px; + font-size: 12px; +} + diff --git a/static/css/pages/error-view.css b/static/css/pages/error-view.css index d140d7206..bee7921d6 100644 --- a/static/css/pages/error-view.css +++ b/static/css/pages/error-view.css @@ -7,6 +7,10 @@ margin-bottom: 10px; } +.error-view-element h2 .repo-circle { + margin-right: 16px; +} + .error-view-element h3 { font-size: 24px; } diff --git a/static/css/quay.css b/static/css/quay.css index bf3f07000..c7ffa24f2 100644 --- a/static/css/quay.css +++ b/static/css/quay.css @@ -1569,7 +1569,7 @@ p.editable:hover i { transition: color 0.5s ease-in-out; } -.copy-box-element .copy-container .copy-icon.zeroclipboard-is-hover { +.copy-box-element .copy-container .copy-icon:hover { color: #444; } diff --git a/static/directives/build-logs-view.html b/static/directives/build-logs-view.html index f1574a602..f939caa95 100644 --- a/static/directives/build-logs-view.html +++ b/static/directives/build-logs-view.html @@ -1,6 +1,7 @@
- diff --git a/static/directives/config/config-setup-tool.html b/static/directives/config/config-setup-tool.html index 205142beb..f40b2b40f 100644 --- a/static/directives/config/config-setup-tool.html +++ b/static/directives/config/config-setup-tool.html @@ -53,59 +53,6 @@
- - Anonymous Access: - -
- Enable Anonymous Access -
-
- If enabled, public repositories and search can be accessed by anyone that can - reach the registry, even if they are not authenticated. Disable to only allow - authenticated users to view and pull "public" resources. -
- - - - User Creation: - -
- Enable Open User Creation -
-
- If enabled, user accounts can be created by anyone. - Users can always be created in the users panel under this superuser view. -
- - - - Encrypted Client Password: - -
- Require Encrypted Client Passwords -
-
- If enabled, users will not be able to login from the Docker command - line with a non-encrypted password and must generate an encrypted - password to use. -
-
- This feature is highly recommended for setups with external authentication, as Docker currently stores passwords in plaintext on user's machines. -
- - - - Team Invitations: - -
- Require Team Invitations -
-
- If enabled, when adding a new user to a team, they will receive an invitation to join the team, with the option to decline. - Otherwise, users will be immediately part of a team when added by a team administrator. -
- - @@ -404,6 +351,47 @@ + +
+
+ Action Log Rotation and Archiving +
+
+
+

+ All actions performed in are automatically logged. These logs are stored in a database table, which can become quite large. + Enabling log rotation and archiving will move all logs older than 30 days into storage. +

+
+
+ Enable Action Log Rotation +
+ + + + + + + + + + +
Storage location: + +
+ The storage location in which to place archived action logs. Logs will only be archived to this single location. +
+
Storage path: + +
+ The path under the configured storage engine in which to place the archived logs in JSON form. +
+
+
+
@@ -452,6 +440,22 @@
+ +
+
+ Application Registry +
+
+
+

If enabled, an additional registry API will be available for managing applications (Kubernetes manifests, Helm charts) via the App Registry specification. A great place to get started is to install the Helm Registry Plugin. +

+ +
+ Enable App Registry +
+
+
+
@@ -1149,6 +1153,95 @@
+ +
+
+ Access Settings +
+
+
+

Various settings around access and authentication to the registry.

+
+ + + + + + + + + + + + + + + + + + + + + + +
Basic Credentials Login: +
+ Login to User Interface via credentials +
+
+
+ Login to User Interface via credentials must be enabled. Click here to enable. +
+
+ Login to User Interface via credentials is enabled (requires at least one OIDC provider to disable) +
+
+
+ If enabled, users will be able to login to the user interface via their username and password credentials. +
+
+ If disabled, users will only be able to login to the user interface via one of the configured External Authentication providers. +
+
Anonymous Access: +
+ Enable Anonymous Access +
+
+ If enabled, public repositories and search can be accessed by anyone that can + reach the registry, even if they are not authenticated. Disable to only allow + authenticated users to view and pull "public" resources. +
+
User Creation: +
+ Enable Open User Creation +
+
+ If enabled, user accounts can be created by anyone. + Users can always be created in the users panel under this superuser view. +
+
Encrypted Client Password: +
+ Require Encrypted Client Passwords +
+
+ If enabled, users will not be able to login from the Docker command + line with a non-encrypted password and must generate an encrypted + password to use. +
+
+ This feature is highly recommended for setups with external authentication, as Docker currently stores passwords in plaintext on user's machines. +
+
Team Invitations: +
+ Require Team Invitations +
+
+ If enabled, when adding a new user to a team, they will receive an invitation to join the team, with the option to decline. + Otherwise, users will be immediately part of a team when added by a team administrator. +
+
+
+
diff --git a/static/directives/copy-box.html b/static/directives/copy-box.html index 7532a6d68..48685daf4 100644 --- a/static/directives/copy-box.html +++ b/static/directives/copy-box.html @@ -1,11 +1,12 @@
- - + +
diff --git a/static/directives/create-entity-dialog.html b/static/directives/create-entity-dialog.html index a3ef2d966..5e8bec9bf 100644 --- a/static/directives/create-entity-dialog.html +++ b/static/directives/create-entity-dialog.html @@ -29,7 +29,7 @@ ng-if="entity">
- -
diff --git a/static/directives/repo-view/repo-panel-builds.html b/static/directives/repo-view/repo-panel-builds.html index 554ae3c54..fcca2174b 100644 --- a/static/directives/repo-view/repo-panel-builds.html +++ b/static/directives/repo-view/repo-panel-builds.html @@ -1,14 +1,14 @@
-

Repository Builds

-
- Builds cannot be performed on this repository because Quay Content Trust is +
+ Builds cannot be performed on this repository because Quay Trust is enabled, which requires that all operations be signed by a user.
@@ -83,7 +83,7 @@
-
+
diff --git a/static/directives/repo-view/repo-panel-info.html b/static/directives/repo-view/repo-panel-info.html index a0f77fa71..5e2b47d51 100644 --- a/static/directives/repo-view/repo-panel-info.html +++ b/static/directives/repo-view/repo-panel-info.html @@ -32,7 +32,7 @@
No builds have been run for this repository.
-
+
Click on the Builds tab to start a new build.
@@ -57,11 +57,11 @@

Description

-
+
+
diff --git a/static/directives/repo-view/repo-panel-tags.html b/static/directives/repo-view/repo-panel-tags.html index 154dcfd97..0e572a568 100644 --- a/static/directives/repo-view/repo-panel-tags.html +++ b/static/directives/repo-view/repo-panel-tags.html @@ -53,7 +53,7 @@
  • + ng-class="repository.tag_operations_disabled ? 'disabled-option' : ''"> Delete Tags
  • @@ -131,7 +131,7 @@ - + @@ -243,7 +243,7 @@ + ng-class="repository.tag_operations_disabled ? 'disabled-option' : ''"> Add New Tag Edit Labels + ng-class="repository.tag_operations_disabled ? 'disabled-option' : ''"> Delete Tag @@ -261,9 +261,15 @@ - + +
    + + +
    + +
    diff --git a/static/directives/repository-events-table.html b/static/directives/repository-events-table.html index e8e0c963a..4a97cda02 100644 --- a/static/directives/repository-events-table.html +++ b/static/directives/repository-events-table.html @@ -29,6 +29,7 @@ Title Event Notification + Enabled @@ -70,6 +71,11 @@
    + + Disabled due to 3 failed attempts in a row + Enabled + + @@ -93,6 +99,9 @@ Delete Notification + + Re-enable Notification + diff --git a/static/directives/request-service-key-dialog.html b/static/directives/request-service-key-dialog.html index 40ab659f4..27ba1ba13 100644 --- a/static/directives/request-service-key-dialog.html +++ b/static/directives/request-service-key-dialog.html @@ -76,7 +76,10 @@ -
    + Optional notes for additional human-readable information about why the key was created. diff --git a/static/directives/service-keys-manager.html b/static/directives/service-keys-manager.html index 964523dab..1935846dc 100644 --- a/static/directives/service-keys-manager.html +++ b/static/directives/service-keys-manager.html @@ -167,7 +167,7 @@
    Approval notes
    -
    +
    @@ -251,7 +251,10 @@
  • {{ getKeyTitle(key) }}
  • -
    + Enter optional notes for additional human-readable information about why the keys were approved. @@ -268,7 +271,10 @@
    Approve service key {{ getKeyTitle(approvalKeyInfo.key) }}?
    -
    + Enter optional notes for additional human-readable information about why the key was approved. @@ -344,7 +350,10 @@ -
    + Optional notes for additional human-readable information about why the key was added. diff --git a/static/directives/tag-operations-dialog.html b/static/directives/tag-operations-dialog.html index 9c294ad82..5f5f7b2be 100644 --- a/static/directives/tag-operations-dialog.html +++ b/static/directives/tag-operations-dialog.html @@ -144,16 +144,17 @@ manifest-digest="restoreTagInfo.manifest_digest">?
    - -