Merge branch 'master' into no-signing-whitelist
This commit is contained in:
commit
45bf7efc84
434 changed files with 10877 additions and 11061 deletions
|
@ -1,5 +1,6 @@
|
||||||
./ci/
|
./ci/
|
||||||
conf/stack
|
conf/stack
|
||||||
|
conf/stack/**
|
||||||
screenshots
|
screenshots
|
||||||
tools
|
tools
|
||||||
test/data/registry
|
test/data/registry
|
||||||
|
@ -23,3 +24,5 @@ coverage
|
||||||
.npm-debug.log
|
.npm-debug.log
|
||||||
test/__pycache__
|
test/__pycache__
|
||||||
__pycache__
|
__pycache__
|
||||||
|
**/__pycache__
|
||||||
|
static/build/**
|
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -11,7 +11,6 @@ static/fonts
|
||||||
static/build
|
static/build
|
||||||
stack_local
|
stack_local
|
||||||
test/data/registry/
|
test/data/registry/
|
||||||
typings
|
|
||||||
GIT_HEAD
|
GIT_HEAD
|
||||||
.idea
|
.idea
|
||||||
.python-version
|
.python-version
|
||||||
|
@ -24,3 +23,4 @@ htmlcov
|
||||||
.npm-debug.log
|
.npm-debug.log
|
||||||
Dockerfile-e
|
Dockerfile-e
|
||||||
build/
|
build/
|
||||||
|
.vscode
|
||||||
|
|
99
.gitlab-ci.jsonnet
Normal file
99
.gitlab-ci.jsonnet
Normal file
|
@ -0,0 +1,99 @@
|
||||||
|
local utils = import '.gitlab-ci/utils.libsonnet';
|
||||||
|
local vars = import '.gitlab-ci/vars.libsonnet';
|
||||||
|
local mergeJob = utils.ci.mergeJob;
|
||||||
|
local images = vars.images;
|
||||||
|
local baseJob = (import '.gitlab-ci/base_jobs.libsonnet')(vars);
|
||||||
|
|
||||||
|
local stages_list = [
|
||||||
|
// gitlab-ci stages
|
||||||
|
'docker_base',
|
||||||
|
'docker_build',
|
||||||
|
'unit_tests',
|
||||||
|
'integration',
|
||||||
|
'docker_release',
|
||||||
|
'teardown',
|
||||||
|
];
|
||||||
|
|
||||||
|
local stages = utils.set(stages_list);
|
||||||
|
|
||||||
|
// List CI jobs
|
||||||
|
local jobs = {
|
||||||
|
// Helpers
|
||||||
|
local onlyMaster = {
|
||||||
|
only: ['master', 'tags'],
|
||||||
|
},
|
||||||
|
local onlyBranch = {
|
||||||
|
only: ['branches'],
|
||||||
|
},
|
||||||
|
|
||||||
|
'container-base-build': baseJob.dockerBuild + onlyMaster {
|
||||||
|
// ! Only master/tags
|
||||||
|
// Update the base container
|
||||||
|
stage: stages.docker_base,
|
||||||
|
script: [
|
||||||
|
'docker build --cache-from quay.io/quay/quay-base:latest' +
|
||||||
|
' -t %s -f quay-base.dockerfile .' % images.base.name,
|
||||||
|
'docker push %s' % images.base.name,
|
||||||
|
],
|
||||||
|
},
|
||||||
|
|
||||||
|
'container-build': baseJob.dockerBuild {
|
||||||
|
// Build and push the quay container.
|
||||||
|
// Docker Tag is the branch/tag name
|
||||||
|
stage: stages.docker_build,
|
||||||
|
script: [
|
||||||
|
'docker build -t %s -f Dockerfile .' % images.quayci.name,
|
||||||
|
'docker push %s' % images.quayci.name],
|
||||||
|
},
|
||||||
|
|
||||||
|
'container-release': baseJob.dockerBuild + onlyMaster {
|
||||||
|
// ! Only master/tags
|
||||||
|
// push the container to the 'prod' repository
|
||||||
|
local repo_with_sha = images.release.name,
|
||||||
|
stage: stages.docker_release,
|
||||||
|
script: [
|
||||||
|
'docker pull %s' % images.quayci.name,
|
||||||
|
'docker tag %s %s' % [images.quayci.name, repo_with_sha],
|
||||||
|
'docker push %s' % [repo_with_sha], # @TODO(ant31) add signing
|
||||||
|
],
|
||||||
|
},
|
||||||
|
|
||||||
|
// Unit-tests
|
||||||
|
local unittest_stage = baseJob.QuayTest {
|
||||||
|
stage: stages.unit_tests },
|
||||||
|
'unit-tests': unittest_stage {
|
||||||
|
script: [
|
||||||
|
'py.test --timeout=7200 --verbose --show-count ./ --color=no -x'] },
|
||||||
|
|
||||||
|
'registry-tests': unittest_stage {
|
||||||
|
script: [
|
||||||
|
'py.test --timeout=7200 --verbose --show-count ./test/registry_tests.py --color=no -x'] },
|
||||||
|
|
||||||
|
// UI tests
|
||||||
|
'karma-tests': unittest_stage {
|
||||||
|
script: [
|
||||||
|
'curl -Ss https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add -',
|
||||||
|
'echo "deb http://dl.google.com/linux/chrome/deb/ stable main" > /etc/apt/sources.list.d/google.list',
|
||||||
|
'apt-get update -yqqq',
|
||||||
|
'apt-get install -y google-chrome-stable',
|
||||||
|
'yarn test'
|
||||||
|
] },
|
||||||
|
|
||||||
|
// Unit-tests with real databases
|
||||||
|
local db_stage = { stage: stages.unit_tests },
|
||||||
|
local dbname = 'quay',
|
||||||
|
postgres: db_stage + baseJob.dbTest('postgresql',
|
||||||
|
image='postgres:9.6',
|
||||||
|
env={ POSTGRES_PASSWORD: dbname, POSTGRES_USER: dbname }),
|
||||||
|
|
||||||
|
mysql: db_stage + baseJob.dbTest('mysql+pymysql',
|
||||||
|
image='mysql:latest',
|
||||||
|
env={ [key]: dbname for key in ['MYSQL_ROOT_PASSWORD', 'MYSQL_DATABASE',
|
||||||
|
'MYSQL_USER', 'MYSQL_PASSWORD'] }),
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
{
|
||||||
|
stages: stages_list,
|
||||||
|
variables: vars.global,
|
||||||
|
} + jobs
|
155
.gitlab-ci.yml
Normal file
155
.gitlab-ci.yml
Normal file
|
@ -0,0 +1,155 @@
|
||||||
|
# Generated from .gitlab-ci.jsonnet
|
||||||
|
# DO NOT EDIT THIS FILE BY HAND -- YOUR CHANGES WILL BE OVERWRITTEN
|
||||||
|
---
|
||||||
|
container-base-build:
|
||||||
|
before_script:
|
||||||
|
- docker login -u $DOCKER_USER -p $DOCKER_PASS quay.io
|
||||||
|
image: docker:git
|
||||||
|
only:
|
||||||
|
- master
|
||||||
|
- tags
|
||||||
|
script:
|
||||||
|
- docker build --cache-from quay.io/quay/quay-base:latest -t quay.io/quay/quay-base:latest -f quay-base.dockerfile .
|
||||||
|
- docker push quay.io/quay/quay-base:latest
|
||||||
|
stage: docker_base
|
||||||
|
tags:
|
||||||
|
- kubernetes
|
||||||
|
variables:
|
||||||
|
DOCKER_DRIVER: overlay
|
||||||
|
DOCKER_HOST: tcp://docker-host.gitlab-runner.svc.cluster.local:2375
|
||||||
|
container-build:
|
||||||
|
before_script:
|
||||||
|
- docker login -u $DOCKER_USER -p $DOCKER_PASS quay.io
|
||||||
|
image: docker:git
|
||||||
|
script:
|
||||||
|
- docker build -t quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG} -f Dockerfile .
|
||||||
|
- docker push quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG}
|
||||||
|
stage: docker_build
|
||||||
|
tags:
|
||||||
|
- kubernetes
|
||||||
|
variables:
|
||||||
|
DOCKER_DRIVER: overlay
|
||||||
|
DOCKER_HOST: tcp://docker-host.gitlab-runner.svc.cluster.local:2375
|
||||||
|
container-release:
|
||||||
|
before_script:
|
||||||
|
- docker login -u $DOCKER_USER -p $DOCKER_PASS quay.io
|
||||||
|
image: docker:git
|
||||||
|
only:
|
||||||
|
- master
|
||||||
|
- tags
|
||||||
|
script:
|
||||||
|
- docker pull quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG}
|
||||||
|
- docker tag quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG} quay.io/quay/quay:${CI_COMMIT_REF_SLUG}-${CI_COMMIT_SHA}
|
||||||
|
- docker push quay.io/quay/quay:${CI_COMMIT_REF_SLUG}-${CI_COMMIT_SHA}
|
||||||
|
stage: docker_release
|
||||||
|
tags:
|
||||||
|
- kubernetes
|
||||||
|
variables:
|
||||||
|
DOCKER_DRIVER: overlay
|
||||||
|
DOCKER_HOST: tcp://docker-host.gitlab-runner.svc.cluster.local:2375
|
||||||
|
karma-tests:
|
||||||
|
before_script:
|
||||||
|
- cd $QUAYDIR
|
||||||
|
- source $QUAYDIR/venv/bin/activate
|
||||||
|
image: quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG}
|
||||||
|
script:
|
||||||
|
- curl -Ss https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add -
|
||||||
|
- echo "deb http://dl.google.com/linux/chrome/deb/ stable main" > /etc/apt/sources.list.d/google.list
|
||||||
|
- apt-get update -yqqq
|
||||||
|
- apt-get install -y google-chrome-stable
|
||||||
|
- yarn test
|
||||||
|
stage: unit_tests
|
||||||
|
tags:
|
||||||
|
- kubernetes
|
||||||
|
variables:
|
||||||
|
GIT_STRATEGY: none
|
||||||
|
PYTHONPATH: .
|
||||||
|
QUAYDIR: /quay-registry
|
||||||
|
TEST: 'true'
|
||||||
|
mysql:
|
||||||
|
before_script:
|
||||||
|
- cd $QUAYDIR
|
||||||
|
- source $QUAYDIR/venv/bin/activate
|
||||||
|
image: quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG}
|
||||||
|
script:
|
||||||
|
- sleep 30
|
||||||
|
- alembic upgrade head
|
||||||
|
- PYTHONPATH="." TEST="true" py.test --timeout=7200 --verbose --show-count ./ --color=no --ignore=endpoints/appr/test/ -x
|
||||||
|
services:
|
||||||
|
- mysql:latest
|
||||||
|
stage: unit_tests
|
||||||
|
tags:
|
||||||
|
- kubernetes
|
||||||
|
variables:
|
||||||
|
GIT_STRATEGY: none
|
||||||
|
MYSQL_DATABASE: quay
|
||||||
|
MYSQL_PASSWORD: quay
|
||||||
|
MYSQL_ROOT_PASSWORD: quay
|
||||||
|
MYSQL_USER: quay
|
||||||
|
PYTHONPATH: .
|
||||||
|
QUAYDIR: /quay-registry
|
||||||
|
SKIP_DB_SCHEMA: 'true'
|
||||||
|
TEST: 'true'
|
||||||
|
TEST_DATABASE_URI: mysql+pymysql://quay:quay@localhost/quay
|
||||||
|
postgres:
|
||||||
|
before_script:
|
||||||
|
- cd $QUAYDIR
|
||||||
|
- source $QUAYDIR/venv/bin/activate
|
||||||
|
image: quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG}
|
||||||
|
script:
|
||||||
|
- sleep 30
|
||||||
|
- alembic upgrade head
|
||||||
|
- PYTHONPATH="." TEST="true" py.test --timeout=7200 --verbose --show-count ./ --color=no --ignore=endpoints/appr/test/ -x
|
||||||
|
services:
|
||||||
|
- postgres:9.6
|
||||||
|
stage: unit_tests
|
||||||
|
tags:
|
||||||
|
- kubernetes
|
||||||
|
variables:
|
||||||
|
GIT_STRATEGY: none
|
||||||
|
POSTGRES_PASSWORD: quay
|
||||||
|
POSTGRES_USER: quay
|
||||||
|
PYTHONPATH: .
|
||||||
|
QUAYDIR: /quay-registry
|
||||||
|
SKIP_DB_SCHEMA: 'true'
|
||||||
|
TEST: 'true'
|
||||||
|
TEST_DATABASE_URI: postgresql://quay:quay@localhost/quay
|
||||||
|
registry-tests:
|
||||||
|
before_script:
|
||||||
|
- cd $QUAYDIR
|
||||||
|
- source $QUAYDIR/venv/bin/activate
|
||||||
|
image: quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG}
|
||||||
|
script:
|
||||||
|
- py.test --timeout=7200 --verbose --show-count ./test/registry_tests.py --color=no -x
|
||||||
|
stage: unit_tests
|
||||||
|
tags:
|
||||||
|
- kubernetes
|
||||||
|
variables:
|
||||||
|
GIT_STRATEGY: none
|
||||||
|
PYTHONPATH: .
|
||||||
|
QUAYDIR: /quay-registry
|
||||||
|
TEST: 'true'
|
||||||
|
stages:
|
||||||
|
- docker_base
|
||||||
|
- docker_build
|
||||||
|
- unit_tests
|
||||||
|
- integration
|
||||||
|
- docker_release
|
||||||
|
- teardown
|
||||||
|
unit-tests:
|
||||||
|
before_script:
|
||||||
|
- cd $QUAYDIR
|
||||||
|
- source $QUAYDIR/venv/bin/activate
|
||||||
|
image: quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG}
|
||||||
|
script:
|
||||||
|
- py.test --timeout=7200 --verbose --show-count ./ --color=no -x
|
||||||
|
stage: unit_tests
|
||||||
|
tags:
|
||||||
|
- kubernetes
|
||||||
|
variables:
|
||||||
|
GIT_STRATEGY: none
|
||||||
|
PYTHONPATH: .
|
||||||
|
QUAYDIR: /quay-registry
|
||||||
|
TEST: 'true'
|
||||||
|
variables:
|
||||||
|
FAILFASTCI_NAMESPACE: quay
|
50
.gitlab-ci/base_jobs.libsonnet
Normal file
50
.gitlab-ci/base_jobs.libsonnet
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
function(vars={})
|
||||||
|
{
|
||||||
|
dockerBuild: {
|
||||||
|
// base job to manage containers (build / push)
|
||||||
|
variables: {
|
||||||
|
DOCKER_DRIVER: "overlay",
|
||||||
|
DOCKER_HOST: "tcp://docker-host.gitlab-runner.svc.cluster.local:2375"
|
||||||
|
},
|
||||||
|
|
||||||
|
image: "docker:git",
|
||||||
|
before_script: [
|
||||||
|
"docker login -u $DOCKER_USER -p $DOCKER_PASS quay.io",
|
||||||
|
],
|
||||||
|
|
||||||
|
tags: [
|
||||||
|
"kubernetes",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
|
||||||
|
QuayTest: {
|
||||||
|
// base job to test the container
|
||||||
|
image: vars.images.quayci.name,
|
||||||
|
variables: {
|
||||||
|
TEST: "true",
|
||||||
|
PYTHONPATH: ".",
|
||||||
|
QUAYDIR: "/quay-registry",
|
||||||
|
GIT_STRATEGY: "none",
|
||||||
|
},
|
||||||
|
before_script: [
|
||||||
|
"cd $QUAYDIR",
|
||||||
|
"source $QUAYDIR/venv/bin/activate",
|
||||||
|
],
|
||||||
|
tags: [
|
||||||
|
"kubernetes",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
|
||||||
|
dbTest(scheme, image, env):: self.QuayTest {
|
||||||
|
variables+: {
|
||||||
|
SKIP_DB_SCHEMA: 'true',
|
||||||
|
TEST_DATABASE_URI: '%s://quay:quay@localhost/quay' % scheme,
|
||||||
|
} + env,
|
||||||
|
services: [image],
|
||||||
|
script: [
|
||||||
|
"sleep 30",
|
||||||
|
"alembic upgrade head",
|
||||||
|
'PYTHONPATH="." TEST="true" py.test --timeout=7200 --verbose --show-count ./ --color=no --ignore=endpoints/appr/test/ -x',
|
||||||
|
],
|
||||||
|
},
|
||||||
|
}
|
66
.gitlab-ci/utils.libsonnet
Normal file
66
.gitlab-ci/utils.libsonnet
Normal file
|
@ -0,0 +1,66 @@
|
||||||
|
{
|
||||||
|
local topSelf = self,
|
||||||
|
# Generate a sequence array from 1 to i
|
||||||
|
seq(i):: (
|
||||||
|
[x for x in std.range(1, i)]
|
||||||
|
),
|
||||||
|
|
||||||
|
objectFieldsHidden(obj):: (
|
||||||
|
std.setDiff(std.objectFieldsAll(obj), std.objectFields(obj))
|
||||||
|
),
|
||||||
|
|
||||||
|
objectFlatten(obj):: (
|
||||||
|
// Merge 1 level dict depth into toplevel
|
||||||
|
local visible = { [k]: obj[j][k]
|
||||||
|
for j in std.objectFieldsAll(obj)
|
||||||
|
for k in std.objectFieldsAll(obj[j]) };
|
||||||
|
|
||||||
|
visible
|
||||||
|
),
|
||||||
|
|
||||||
|
compact(array):: (
|
||||||
|
[x for x in array if x != null]
|
||||||
|
),
|
||||||
|
|
||||||
|
objectValues(obj):: (
|
||||||
|
local fields = std.objectFields(obj);
|
||||||
|
[obj[key] for key in fields]
|
||||||
|
),
|
||||||
|
|
||||||
|
objectMap(func, obj):: (
|
||||||
|
local fields = std.objectFields(obj);
|
||||||
|
{ [key]: func(obj[key]) for key in fields }
|
||||||
|
),
|
||||||
|
|
||||||
|
capitalize(str):: (
|
||||||
|
std.char(std.codepoint(str[0]) - 32) + str[1:]
|
||||||
|
),
|
||||||
|
|
||||||
|
test: self.capitalize("test"),
|
||||||
|
|
||||||
|
set(array)::
|
||||||
|
{ [key]: key for key in array },
|
||||||
|
|
||||||
|
containerName(repo, tag):: "%s:%s" % [repo, tag],
|
||||||
|
|
||||||
|
ci: {
|
||||||
|
|
||||||
|
mergeJob(base_job, jobs, stage=null):: {
|
||||||
|
[job_name]: base_job + jobs[job_name] +
|
||||||
|
if stage != null then { stage: stage } else {}
|
||||||
|
for job_name in std.objectFields(jobs)
|
||||||
|
},
|
||||||
|
|
||||||
|
only(key):: (
|
||||||
|
if key == "master"
|
||||||
|
then { only: ['master', 'tags'] }
|
||||||
|
else { only: ['branches'] }
|
||||||
|
),
|
||||||
|
|
||||||
|
setManual(key, values):: (
|
||||||
|
if std.objectHas(topSelf.set(values), key)
|
||||||
|
then { when: 'manual' }
|
||||||
|
else { only: ['branches'] }
|
||||||
|
),
|
||||||
|
},
|
||||||
|
}
|
27
.gitlab-ci/vars.libsonnet
Normal file
27
.gitlab-ci/vars.libsonnet
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
local utils = import "utils.libsonnet";
|
||||||
|
|
||||||
|
{
|
||||||
|
global: {
|
||||||
|
// .gitlab-ci.yaml top `variables` key
|
||||||
|
FAILFASTCI_NAMESPACE: "quay",
|
||||||
|
},
|
||||||
|
|
||||||
|
// internal variables
|
||||||
|
images: {
|
||||||
|
// Quay initial image, used in the FROM clause
|
||||||
|
base: { repo: "quay.io/quay/quay-base", tag: "latest",
|
||||||
|
name: utils.containerName(self.repo, self.tag),
|
||||||
|
},
|
||||||
|
|
||||||
|
// @TODO(ant31) release should use quay/quay
|
||||||
|
// release is a copy of the quayci image to the 'prod' repository
|
||||||
|
release: { repo: "quay.io/quay/quay",
|
||||||
|
tag: "${CI_COMMIT_REF_SLUG}-${CI_COMMIT_SHA}",
|
||||||
|
name: utils.containerName(self.repo, self.tag),
|
||||||
|
},
|
||||||
|
|
||||||
|
quayci: { repo: "quay.io/quay/quay-ci", tag: "${CI_COMMIT_REF_SLUG}",
|
||||||
|
name: utils.containerName(self.repo, self.tag),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
23
CHANGELOG.md
23
CHANGELOG.md
|
@ -1,3 +1,26 @@
|
||||||
|
### v2.4.0
|
||||||
|
|
||||||
|
- Added: Kubernetes Applications Support
|
||||||
|
- Added: Full-page search UI (#2529)
|
||||||
|
- Added: Always generate V2 manifests for tag operations in UI (#2608)
|
||||||
|
- Added: Option to enable public repositories in v2 catalog API (#2654)
|
||||||
|
- Added: Disable repository notifications after 3 failures (#2652)
|
||||||
|
- Added: Remove requirement for flash for copy button in UI (#2667)
|
||||||
|
|
||||||
|
- Fixed: Upgrade support for Markdown (#2624)
|
||||||
|
- Fixed: Kubernetes secret generation with secrets with CAPITAL names (#2640)
|
||||||
|
- Fixed: Content-Length reporting on HEAD requests (#2616)
|
||||||
|
- Fixed: Use configured email address as the sender in email notifications (#2635)
|
||||||
|
- Fixed: Better peformance on permissions lookup (#2628)
|
||||||
|
- Fixed: Disable federated login for new users if user creation is disabled (#2623)
|
||||||
|
- Fixed: Show build logs timestamps by default (#2647)
|
||||||
|
- Fixed: Custom TLS certificates tooling in superuser panel under Kubernetes (#2646, #2663)
|
||||||
|
- Fixed: Disable debug logs in superuser panel when under multiple instances (#2663)
|
||||||
|
- Fixed: External Notification Modal UI bug (#2650)
|
||||||
|
- Fixed: Security worker thrashing when security scanner not available
|
||||||
|
- Fixed: Torrent validation in superuser config panel (#2694)
|
||||||
|
- Fixed: Expensive database call in build badges (#2688)
|
||||||
|
|
||||||
### v2.3.4
|
### v2.3.4
|
||||||
|
|
||||||
- Added: Always show tag expiration options in superuser panel
|
- Added: Always show tag expiration options in superuser panel
|
||||||
|
|
206
Dockerfile
206
Dockerfile
|
@ -1,60 +1,10 @@
|
||||||
# vim:ft=dockerfile
|
# vim:ft=dockerfile
|
||||||
|
|
||||||
FROM phusion/baseimage:0.9.19
|
FROM quay.io/quay/quay-base:latest
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND noninteractive
|
WORKDIR $QUAYDIR
|
||||||
ENV HOME /root
|
|
||||||
|
|
||||||
# This is so we don't break http golang/go#17066
|
COPY requirements.txt requirements-tests.txt ./
|
||||||
# When Ubuntu has nginx >= 1.11.0 we can switch back.
|
|
||||||
RUN add-apt-repository ppa:nginx/development
|
|
||||||
|
|
||||||
# Add Yarn repository until it is officially added to Ubuntu
|
|
||||||
RUN curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add -
|
|
||||||
RUN echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list
|
|
||||||
|
|
||||||
# Install system packages
|
|
||||||
RUN apt-get update && apt-get upgrade -y # 27APR2017
|
|
||||||
RUN apt-get install -y \
|
|
||||||
dnsmasq \
|
|
||||||
g++ \
|
|
||||||
gdb \
|
|
||||||
gdebi-core \
|
|
||||||
git \
|
|
||||||
jpegoptim \
|
|
||||||
libevent-2.0.5 \
|
|
||||||
libevent-dev \
|
|
||||||
libffi-dev \
|
|
||||||
libfreetype6-dev \
|
|
||||||
libgpgme11 \
|
|
||||||
libgpgme11-dev \
|
|
||||||
libjpeg62 \
|
|
||||||
libjpeg62-dev \
|
|
||||||
libjpeg8 \
|
|
||||||
libldap-2.4-2 \
|
|
||||||
libldap2-dev \
|
|
||||||
libmagic1 \
|
|
||||||
libpq-dev \
|
|
||||||
libpq5 \
|
|
||||||
libsasl2-dev \
|
|
||||||
libsasl2-modules \
|
|
||||||
monit \
|
|
||||||
nginx \
|
|
||||||
nodejs \
|
|
||||||
optipng \
|
|
||||||
openssl \
|
|
||||||
python-dbg \
|
|
||||||
python-dev \
|
|
||||||
python-pip \
|
|
||||||
python-virtualenv \
|
|
||||||
yarn=0.22.0-1 \
|
|
||||||
w3m
|
|
||||||
|
|
||||||
# Install python dependencies
|
|
||||||
ADD requirements.txt requirements.txt
|
|
||||||
RUN virtualenv --distribute venv
|
|
||||||
RUN venv/bin/pip install -r requirements.txt # 07SEP2016
|
|
||||||
RUN venv/bin/pip freeze
|
|
||||||
|
|
||||||
# Check python dependencies for the GPL
|
# Check python dependencies for the GPL
|
||||||
# Due to the following bug, pip results must be piped to a file before grepping:
|
# Due to the following bug, pip results must be piped to a file before grepping:
|
||||||
|
@ -63,131 +13,43 @@ RUN cat requirements.txt | grep -v "^-e" | awk -F'==' '{print $1}' | xargs venv/
|
||||||
test -z $(cat pipinfo.txt | grep GPL | grep -v LGPL) && \
|
test -z $(cat pipinfo.txt | grep GPL | grep -v LGPL) && \
|
||||||
rm pipinfo.txt
|
rm pipinfo.txt
|
||||||
|
|
||||||
# Install cfssl
|
RUN virtualenv --distribute venv \
|
||||||
RUN mkdir /gocode
|
&& venv/bin/pip install -r requirements.txt \
|
||||||
ENV GOPATH /gocode
|
&& venv/bin/pip install -r requirements-tests.txt \
|
||||||
RUN curl -O https://storage.googleapis.com/golang/go1.6.linux-amd64.tar.gz && \
|
&& venv/bin/pip freeze
|
||||||
tar -xvf go1.6.linux-amd64.tar.gz && \
|
|
||||||
mv go /usr/local && \
|
|
||||||
rm -rf go1.6.linux-amd64.tar.gz && \
|
|
||||||
/usr/local/go/bin/go get -u github.com/cloudflare/cfssl/cmd/cfssl && \
|
|
||||||
/usr/local/go/bin/go get -u github.com/cloudflare/cfssl/cmd/cfssljson && \
|
|
||||||
cp /gocode/bin/cfssljson /bin/cfssljson && \
|
|
||||||
cp /gocode/bin/cfssl /bin/cfssl && \
|
|
||||||
rm -rf /gocode && rm -rf /usr/local/go
|
|
||||||
|
|
||||||
# Install jwtproxy
|
|
||||||
RUN curl -L -o /usr/local/bin/jwtproxy https://github.com/coreos/jwtproxy/releases/download/v0.0.1/jwtproxy-linux-x64
|
|
||||||
RUN chmod +x /usr/local/bin/jwtproxy
|
|
||||||
|
|
||||||
# Install prometheus-aggregator
|
|
||||||
RUN curl -L -o /usr/local/bin/prometheus-aggregator https://github.com/coreos/prometheus-aggregator/releases/download/v0.0.1-alpha/prometheus-aggregator
|
|
||||||
RUN chmod +x /usr/local/bin/prometheus-aggregator
|
|
||||||
|
|
||||||
# Install front-end dependencies
|
# Install front-end dependencies
|
||||||
RUN ln -s /usr/bin/nodejs /usr/bin/node
|
# JS depedencies
|
||||||
ADD package.json package.json
|
COPY yarn.lock ./
|
||||||
ADD tsconfig.json tsconfig.json
|
RUN yarn install --ignore-engines
|
||||||
ADD webpack.config.js webpack.config.js
|
|
||||||
ADD typings.json typings.json
|
|
||||||
ADD yarn.lock yarn.lock
|
|
||||||
RUN yarn install --ignore-engines
|
|
||||||
|
|
||||||
# Add static files
|
# JS compile
|
||||||
ADD static static
|
COPY static static
|
||||||
|
COPY package.json tsconfig.json webpack.config.js tslint.json ./
|
||||||
|
RUN yarn build \
|
||||||
|
&& jpegoptim static/img/**/*.jpg \
|
||||||
|
&& optipng -clobber -quiet static/img/**/*.png
|
||||||
|
|
||||||
# Run Webpack
|
COPY . .
|
||||||
RUN node_modules/.bin/webpack --progress
|
|
||||||
|
|
||||||
# Run front-end tests
|
|
||||||
ARG RUN_TESTS=true
|
|
||||||
ENV RUN_TESTS ${RUN_TESTS}
|
|
||||||
|
|
||||||
ADD karma.conf.js karma.conf.js
|
|
||||||
RUN if [ "$RUN_TESTS" = true ]; then \
|
|
||||||
yarn test; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Install Grunt and Grunt depenencies
|
|
||||||
RUN yarn global add grunt-cli
|
|
||||||
ADD grunt grunt
|
|
||||||
RUN cd grunt && yarn install
|
|
||||||
|
|
||||||
# Run Grunt
|
|
||||||
RUN cd grunt && grunt
|
|
||||||
|
|
||||||
# Optimize our images
|
|
||||||
ADD static/img static/img
|
|
||||||
RUN jpegoptim static/img/**/*.jpg
|
|
||||||
RUN optipng -clobber -quiet static/img/**/*.png
|
|
||||||
|
|
||||||
RUN apt-get remove -y --auto-remove python-dev g++ libjpeg62-dev libevent-dev libldap2-dev libsasl2-dev libpq-dev libffi-dev libgpgme11-dev nodejs jpegoptim optipng w3m
|
|
||||||
RUN apt-get autoremove -y
|
|
||||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
|
||||||
|
|
||||||
# Set up the init system
|
# Set up the init system
|
||||||
ADD conf/init/copy_config_files.sh /etc/my_init.d/
|
RUN mkdir -p /etc/my_init.d /etc/systlog-ng /usr/local/bin /etc/monit static/fonts static/ldn /usr/local/nginx/logs/ \
|
||||||
ADD conf/init/doupdatelimits.sh /etc/my_init.d/
|
&& cp $QUAYCONF/init/*.sh /etc/my_init.d/ \
|
||||||
ADD conf/init/copy_syslog_config.sh /etc/my_init.d/
|
&& cp $QUAYCONF/init/syslog-ng.conf /etc/syslog-ng/ \
|
||||||
ADD conf/init/certs_create.sh /etc/my_init.d/
|
&& cp -r $QUAYCONF/init/service/* /etc/service \
|
||||||
ADD conf/init/certs_install.sh /etc/my_init.d/
|
&& cp $QUAYCONF/kill-buildmanager.sh /usr/local/bin/kill-buildmanager.sh \
|
||||||
ADD conf/init/nginx_conf_create.sh /etc/my_init.d/
|
&& cp $QUAYCONF/monitrc /etc/monit/monitrc \
|
||||||
ADD conf/init/runmigration.sh /etc/my_init.d/
|
&& chmod 0600 /etc/monit/monitrc \
|
||||||
ADD conf/init/syslog-ng.conf /etc/syslog-ng/
|
&& cp $QUAYCONF/init/logrotate.conf /etc/logrotate.conf \
|
||||||
ADD conf/init/zz_boot.sh /etc/my_init.d/
|
&& cp .git/HEAD GIT_HEAD \
|
||||||
ADD conf/init/service/ /etc/service/
|
&& rm -rf /etc/service/syslog-forwarder
|
||||||
RUN rm -rf /etc/service/syslog-forwarder
|
|
||||||
|
|
||||||
ADD conf/kill-buildmanager.sh /usr/local/bin/kill-buildmanager.sh
|
RUN ln -s $QUAYCONF /conf
|
||||||
ADD conf/monitrc /etc/monit/monitrc
|
|
||||||
RUN chmod 0600 /etc/monit/monitrc
|
|
||||||
|
|
||||||
# remove after phusion/baseimage-docker#338 is fixed
|
|
||||||
ADD conf/init/logrotate.conf /etc/logrotate.conf
|
|
||||||
|
|
||||||
# Download any external libs.
|
|
||||||
RUN mkdir static/fonts static/ldn
|
|
||||||
ADD external_libraries.py external_libraries.py
|
|
||||||
RUN venv/bin/python -m external_libraries
|
|
||||||
RUN mkdir -p /usr/local/nginx/logs/
|
|
||||||
|
|
||||||
# TODO(ssewell): only works on a detached head, make work with ref
|
|
||||||
ADD .git/HEAD GIT_HEAD
|
|
||||||
|
|
||||||
# Add all of the files!
|
|
||||||
ADD . .
|
|
||||||
RUN pyclean .
|
|
||||||
|
|
||||||
# Cleanup any NPM-related stuff.
|
# Cleanup any NPM-related stuff.
|
||||||
RUN rm -rf /root/.npm
|
# RUN apt-get remove -y --auto-remove python-dev g++ libjpeg62-dev libevent-dev libldap2-dev libsasl2-dev libpq-dev libffi-dev libgpgme11-dev nodejs jpegoptim optipng w3m \
|
||||||
RUN rm -rf /.npm
|
# && apt-get autoremove -y \
|
||||||
RUN rm -rf /usr/local/lib/node_modules
|
# && apt-get clean
|
||||||
RUN rm -rf /root/node_modules
|
# && rm -rf /root/.npm /.npm /usr/local/lib/node_modules /usr/share/yarn/node_modules \
|
||||||
RUN rm -rf /node_modules
|
# /root/node_modules /node_modules /grunt
|
||||||
RUN rm -rf /grunt
|
RUN PYTHONPATH=$QUAYPATH venv/bin/alembic heads | grep -E '^[0-9a-f]+ \(head\)$' > ALEMBIC_HEAD
|
||||||
RUN rm package.json yarn.lock
|
|
||||||
|
|
||||||
# Run the tests
|
|
||||||
ENV RUN_ACI_TESTS False
|
|
||||||
ADD requirements-tests.txt requirements-tests.txt
|
|
||||||
|
|
||||||
RUN if [ "$RUN_TESTS" = true ]; then \
|
|
||||||
venv/bin/pip install -r requirements-tests.txt ;\
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
RUN if [ "$RUN_TESTS" = true ]; then \
|
|
||||||
TEST=true PYTHONPATH="." venv/bin/py.test --timeout=7200 --verbose \
|
|
||||||
--show-count -x --color=no ./; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
RUN if [ "$RUN_TESTS" = true ]; then \
|
|
||||||
TEST=true PYTHONPATH="." venv/bin/py.test --timeout=7200 --verbose \
|
|
||||||
--show-count -x --color=no test/registry_tests.py ; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
RUN PYTHONPATH=. venv/bin/alembic heads | grep -E '^[0-9a-f]+ \(head\)$' > ALEMBIC_HEAD
|
|
||||||
|
|
||||||
VOLUME ["/conf/stack", "/var/log", "/datastorage", "/tmp", "/conf/etcd"]
|
|
||||||
|
|
||||||
EXPOSE 443 8443 80
|
|
||||||
|
|
189
Dockerfile.old
Normal file
189
Dockerfile.old
Normal file
|
@ -0,0 +1,189 @@
|
||||||
|
# vim:ft=dockerfile
|
||||||
|
|
||||||
|
FROM phusion/baseimage:0.9.19
|
||||||
|
|
||||||
|
ENV DEBIAN_FRONTEND noninteractive
|
||||||
|
ENV HOME /root
|
||||||
|
ENV QUAYCONF /quay/conf
|
||||||
|
ENV QUAYDIR /quay
|
||||||
|
ENV QUAYPATH "."
|
||||||
|
|
||||||
|
RUN mkdir $QUAYDIR
|
||||||
|
WORKDIR $QUAYDIR
|
||||||
|
|
||||||
|
# This is so we don't break http golang/go#17066
|
||||||
|
# When Ubuntu has nginx >= 1.11.0 we can switch back.
|
||||||
|
RUN add-apt-repository ppa:nginx/development
|
||||||
|
|
||||||
|
# Add Yarn repository until it is officially added to Ubuntu
|
||||||
|
RUN curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add -
|
||||||
|
RUN echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list
|
||||||
|
|
||||||
|
# Install system packages
|
||||||
|
RUN apt-get update && apt-get upgrade -y # 26MAY2017
|
||||||
|
RUN apt-get install -y \
|
||||||
|
dnsmasq \
|
||||||
|
g++ \
|
||||||
|
gdb \
|
||||||
|
gdebi-core \
|
||||||
|
git \
|
||||||
|
jpegoptim \
|
||||||
|
libevent-2.0.5 \
|
||||||
|
libevent-dev \
|
||||||
|
libffi-dev \
|
||||||
|
libfreetype6-dev \
|
||||||
|
libgpgme11 \
|
||||||
|
libgpgme11-dev \
|
||||||
|
libjpeg62 \
|
||||||
|
libjpeg62-dev \
|
||||||
|
libjpeg8 \
|
||||||
|
libldap-2.4-2 \
|
||||||
|
libldap2-dev \
|
||||||
|
libmagic1 \
|
||||||
|
libpq-dev \
|
||||||
|
libpq5 \
|
||||||
|
libsasl2-dev \
|
||||||
|
libsasl2-modules \
|
||||||
|
monit \
|
||||||
|
nginx \
|
||||||
|
nodejs \
|
||||||
|
optipng \
|
||||||
|
openssl \
|
||||||
|
python-dbg \
|
||||||
|
python-dev \
|
||||||
|
python-pip \
|
||||||
|
python-virtualenv \
|
||||||
|
yarn=0.22.0-1 \
|
||||||
|
w3m
|
||||||
|
|
||||||
|
# Install python dependencies
|
||||||
|
|
||||||
|
ADD requirements.txt requirements.txt
|
||||||
|
RUN virtualenv --distribute venv
|
||||||
|
RUN venv/bin/pip install -r requirements.txt # 07SEP2016
|
||||||
|
RUN venv/bin/pip freeze
|
||||||
|
|
||||||
|
# Check python dependencies for the GPL
|
||||||
|
# Due to the following bug, pip results must be piped to a file before grepping:
|
||||||
|
# https://github.com/pypa/pip/pull/3304
|
||||||
|
RUN cat requirements.txt | grep -v "^-e" | awk -F'==' '{print $1}' | xargs venv/bin/pip --disable-pip-version-check show > pipinfo.txt && \
|
||||||
|
test -z $(cat pipinfo.txt | grep GPL | grep -v LGPL) && \
|
||||||
|
rm pipinfo.txt
|
||||||
|
|
||||||
|
# Install cfssl
|
||||||
|
RUN mkdir /gocode
|
||||||
|
ENV GOPATH /gocode
|
||||||
|
RUN curl -O https://storage.googleapis.com/golang/go1.6.linux-amd64.tar.gz && \
|
||||||
|
tar -xvf go1.6.linux-amd64.tar.gz && \
|
||||||
|
mv go /usr/local && \
|
||||||
|
rm -rf go1.6.linux-amd64.tar.gz && \
|
||||||
|
/usr/local/go/bin/go get -u github.com/cloudflare/cfssl/cmd/cfssl && \
|
||||||
|
/usr/local/go/bin/go get -u github.com/cloudflare/cfssl/cmd/cfssljson && \
|
||||||
|
cp /gocode/bin/cfssljson /bin/cfssljson && \
|
||||||
|
cp /gocode/bin/cfssl /bin/cfssl && \
|
||||||
|
rm -rf /gocode && rm -rf /usr/local/go
|
||||||
|
|
||||||
|
# Install jwtproxy
|
||||||
|
RUN curl -L -o /usr/local/bin/jwtproxy https://github.com/coreos/jwtproxy/releases/download/v0.0.1/jwtproxy-linux-x64
|
||||||
|
RUN chmod +x /usr/local/bin/jwtproxy
|
||||||
|
|
||||||
|
# Install prometheus-aggregator
|
||||||
|
RUN curl -L -o /usr/local/bin/prometheus-aggregator https://github.com/coreos/prometheus-aggregator/releases/download/v0.0.1-alpha/prometheus-aggregator
|
||||||
|
RUN chmod +x /usr/local/bin/prometheus-aggregator
|
||||||
|
|
||||||
|
# Install front-end dependencies
|
||||||
|
RUN ln -s /usr/bin/nodejs /usr/bin/node
|
||||||
|
ADD package.json package.json
|
||||||
|
ADD tsconfig.json tsconfig.json
|
||||||
|
ADD webpack.config.js webpack.config.js
|
||||||
|
ADD yarn.lock yarn.lock
|
||||||
|
RUN yarn install --ignore-engines
|
||||||
|
|
||||||
|
# Add static files
|
||||||
|
ADD static static
|
||||||
|
|
||||||
|
# Run Webpack
|
||||||
|
RUN yarn build
|
||||||
|
|
||||||
|
# Optimize our images
|
||||||
|
ADD static/img static/img
|
||||||
|
RUN jpegoptim static/img/**/*.jpg
|
||||||
|
RUN optipng -clobber -quiet static/img/**/*.png
|
||||||
|
|
||||||
|
RUN apt-get remove -y --auto-remove python-dev g++ libjpeg62-dev libevent-dev libldap2-dev libsasl2-dev libpq-dev libffi-dev libgpgme11-dev nodejs jpegoptim optipng w3m
|
||||||
|
RUN apt-get autoremove -y
|
||||||
|
RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||||
|
|
||||||
|
# Set up the init system
|
||||||
|
ADD conf/init/copy_config_files.sh /etc/my_init.d/
|
||||||
|
ADD conf/init/doupdatelimits.sh /etc/my_init.d/
|
||||||
|
ADD conf/init/copy_syslog_config.sh /etc/my_init.d/
|
||||||
|
ADD conf/init/certs_create.sh /etc/my_init.d/
|
||||||
|
ADD conf/init/certs_install.sh /etc/my_init.d/
|
||||||
|
ADD conf/init/nginx_conf_create.sh /etc/my_init.d/
|
||||||
|
ADD conf/init/runmigration.sh /etc/my_init.d/
|
||||||
|
ADD conf/init/syslog-ng.conf /etc/syslog-ng/
|
||||||
|
ADD conf/init/zz_boot.sh /etc/my_init.d/
|
||||||
|
ADD conf/init/service/ /etc/service/
|
||||||
|
RUN rm -rf /etc/service/syslog-forwarder
|
||||||
|
|
||||||
|
ADD conf/kill-buildmanager.sh /usr/local/bin/kill-buildmanager.sh
|
||||||
|
ADD conf/monitrc /etc/monit/monitrc
|
||||||
|
RUN chmod 0600 /etc/monit/monitrc
|
||||||
|
|
||||||
|
# remove after phusion/baseimage-docker#338 is fixed
|
||||||
|
ADD conf/init/logrotate.conf /etc/logrotate.conf
|
||||||
|
|
||||||
|
# TODO(ssewell): only works on a detached head, make work with ref
|
||||||
|
ADD .git/HEAD GIT_HEAD
|
||||||
|
|
||||||
|
# Add all of the files!
|
||||||
|
ADD . .
|
||||||
|
RUN mkdir static/fonts static/ldn
|
||||||
|
|
||||||
|
# Download any external libs.
|
||||||
|
RUN venv/bin/python -m external_libraries
|
||||||
|
RUN mkdir -p /usr/local/nginx/logs/
|
||||||
|
|
||||||
|
|
||||||
|
RUN pyclean .
|
||||||
|
|
||||||
|
# Cleanup any NPM-related stuff.
|
||||||
|
RUN rm -rf /root/.npm
|
||||||
|
RUN rm -rf .npm
|
||||||
|
RUN rm -rf /usr/local/lib/node_modules
|
||||||
|
RUN rm -rf /usr/share/yarn/node_modules
|
||||||
|
RUN rm -rf /root/node_modules
|
||||||
|
RUN rm -rf node_modules
|
||||||
|
RUN rm -rf grunt
|
||||||
|
RUN rm package.json yarn.lock
|
||||||
|
|
||||||
|
# Run the tests
|
||||||
|
ARG RUN_TESTS=true
|
||||||
|
ENV RUN_TESTS ${RUN_TESTS}
|
||||||
|
|
||||||
|
ENV RUN_ACI_TESTS False
|
||||||
|
ADD requirements-tests.txt requirements-tests.txt
|
||||||
|
|
||||||
|
RUN if [ "$RUN_TESTS" = true ]; then \
|
||||||
|
venv/bin/pip install -r requirements-tests.txt ;\
|
||||||
|
fi
|
||||||
|
|
||||||
|
RUN if [ "$RUN_TESTS" = true ]; then \
|
||||||
|
TEST=true PYTHONPATH="." venv/bin/py.test --timeout=7200 --verbose \
|
||||||
|
--show-count -x --color=no ./ && rm -rf /var/tmp/; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
RUN if [ "$RUN_TESTS" = true ]; then \
|
||||||
|
TEST=true PYTHONPATH="." venv/bin/py.test --timeout=7200 --verbose \
|
||||||
|
--show-count -x --color=no test/registry_tests.py && rm -rf /var/tmp/;\
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
|
RUN rm -rf /root/.cache
|
||||||
|
|
||||||
|
RUN PYTHONPATH=. venv/bin/alembic heads | grep -E '^[0-9a-f]+ \(head\)$' > ALEMBIC_HEAD
|
||||||
|
|
||||||
|
VOLUME ["/conf/stack", "/var/log", "/datastorage", "/tmp", "/conf/etcd"]
|
||||||
|
|
||||||
|
EXPOSE 443 8443 80
|
27
README.md
27
README.md
|
@ -55,12 +55,13 @@ High-level features include:
|
||||||
2. [Local Scripts](#local-scripts)
|
2. [Local Scripts](#local-scripts)
|
||||||
3. [Development inside Docker](#development-inside-docker)
|
3. [Development inside Docker](#development-inside-docker)
|
||||||
4. [Adding a Python Dependency](#adding-a-python-dependency)
|
4. [Adding a Python Dependency](#adding-a-python-dependency)
|
||||||
5. [Running the Build System](#running-the-build-system)
|
5. [Adding a Yarn Dependency](#adding-a-yarn-dependency)
|
||||||
6. [To run individual tests](#to-run-individual-tests)
|
6. [Running the Build System](#running-the-build-system)
|
||||||
|
7. [To run individual tests](#to-run-individual-tests)
|
||||||
1. [Pytest](#pytest)
|
1. [Pytest](#pytest)
|
||||||
2. [Tox](#tox)
|
2. [Tox](#tox)
|
||||||
7. [Running Migrations](#running-migrations)
|
8. [Running Migrations](#running-migrations)
|
||||||
8. [How to run a build with tests for a push or merge](#how-to-run-a-build-with-tests-for-a-push-or-merge)
|
9. [How to run a build with tests for a push or merge](#how-to-run-a-build-with-tests-for-a-push-or-merge)
|
||||||
4. **[Documentation](#documentation)**
|
4. **[Documentation](#documentation)**
|
||||||
1. [Architecture at a Glance](#architecture-at-a-glance)
|
1. [Architecture at a Glance](#architecture-at-a-glance)
|
||||||
2. [Terminology](#terminology)
|
2. [Terminology](#terminology)
|
||||||
|
@ -95,6 +96,7 @@ docker-machine create -d virtualbox default
|
||||||
eval "$(pyenv virtualenv-init -)"
|
eval "$(pyenv virtualenv-init -)"
|
||||||
eval "$(pyenv init -)"
|
eval "$(pyenv init -)"
|
||||||
eval $(/usr/local/bin/docker-machine env default)
|
eval $(/usr/local/bin/docker-machine env default)
|
||||||
|
export PYTHONPATH="."
|
||||||
|
|
||||||
# Some installs don't have /usr/include, required for finding SASL header files
|
# Some installs don't have /usr/include, required for finding SASL header files
|
||||||
# This command might fail because of the rootfs is read-only. Refer to the following:
|
# This command might fail because of the rootfs is read-only. Refer to the following:
|
||||||
|
@ -206,6 +208,23 @@ pip freeze > requirements.txt
|
||||||
pyenv uninstall quay-deps
|
pyenv uninstall quay-deps
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Adding a Yarn Dependency
|
||||||
|
|
||||||
|
We use [Yarn](https://yarnpkg.com/) for frontend dependency management. The `yarn.lock` file ensures
|
||||||
|
that we get consistant version installs using the `yarn install` command. However, new dependencies
|
||||||
|
should be added using `yarn add <npm package>`. This will add an entry to `package.json` and `yarn.lock`.
|
||||||
|
|
||||||
|
Occassionally there will be merge conflicts with `yarn.lock`. To resolve them, use the following (taken
|
||||||
|
from [here](https://github.com/yarnpkg/yarn/issues/1776#issuecomment-269539948)).
|
||||||
|
|
||||||
|
```sh
|
||||||
|
git rebase origin/master
|
||||||
|
git checkout origin/master -- yarn.lock
|
||||||
|
yarn install
|
||||||
|
git add yarn.lock
|
||||||
|
git rebase --continue
|
||||||
|
```
|
||||||
|
|
||||||
### Running the Build System
|
### Running the Build System
|
||||||
|
|
||||||
TODO
|
TODO
|
||||||
|
|
35
_init.py
Normal file
35
_init.py
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
|
||||||
|
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
CONF_DIR = os.getenv("QUAYCONF", os.path.join(ROOT_DIR, "conf/"))
|
||||||
|
STATIC_DIR = os.path.join(ROOT_DIR, 'static/')
|
||||||
|
STATIC_LDN_DIR = os.path.join(STATIC_DIR, 'ldn/')
|
||||||
|
STATIC_FONTS_DIR = os.path.join(STATIC_DIR, 'fonts/')
|
||||||
|
TEMPLATE_DIR = os.path.join(ROOT_DIR, 'templates/')
|
||||||
|
|
||||||
|
|
||||||
|
def _get_version_number_changelog():
|
||||||
|
try:
|
||||||
|
with open(os.path.join(ROOT_DIR, 'CHANGELOG.md')) as f:
|
||||||
|
return re.search(r'(v[0-9]+\.[0-9]+\.[0-9]+)', f.readline()).group(0)
|
||||||
|
except IOError:
|
||||||
|
return ''
|
||||||
|
|
||||||
|
|
||||||
|
def _get_git_sha():
|
||||||
|
if os.path.exists("GIT_HEAD"):
|
||||||
|
with open(os.path.join(ROOT_DIR, "GIT_HEAD")) as f:
|
||||||
|
return f.read()
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
return subprocess.check_output(["git", "rev-parse", "HEAD"]).strip()[0:8]
|
||||||
|
except (OSError, subprocess.CalledProcessError):
|
||||||
|
pass
|
||||||
|
return "unknown"
|
||||||
|
|
||||||
|
|
||||||
|
__version__ = _get_version_number_changelog()
|
||||||
|
__gitrev__ = _get_git_sha()
|
72
app.py
72
app.py
|
@ -1,3 +1,4 @@
|
||||||
|
import hashlib
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
@ -13,7 +14,8 @@ from jwkest.jwk import RSAKey
|
||||||
from werkzeug.routing import BaseConverter
|
from werkzeug.routing import BaseConverter
|
||||||
|
|
||||||
import features
|
import features
|
||||||
|
from _init import CONF_DIR
|
||||||
|
from auth.auth_context import get_authenticated_user
|
||||||
from avatars.avatars import Avatar
|
from avatars.avatars import Avatar
|
||||||
from buildman.manager.buildcanceller import BuildCanceller
|
from buildman.manager.buildcanceller import BuildCanceller
|
||||||
from data import database
|
from data import database
|
||||||
|
@ -31,6 +33,7 @@ from oauth.services.github import GithubOAuthService
|
||||||
from oauth.services.gitlab import GitLabOAuthService
|
from oauth.services.gitlab import GitLabOAuthService
|
||||||
from oauth.loginmanager import OAuthLoginManager
|
from oauth.loginmanager import OAuthLoginManager
|
||||||
from storage import Storage
|
from storage import Storage
|
||||||
|
from util.log import filter_logs
|
||||||
from util import get_app_url
|
from util import get_app_url
|
||||||
from util.saas.analytics import Analytics
|
from util.saas.analytics import Analytics
|
||||||
from util.saas.useranalytics import UserAnalytics
|
from util.saas.useranalytics import UserAnalytics
|
||||||
|
@ -49,9 +52,10 @@ from util.tufmetadata.api import TUFMetadataAPI
|
||||||
from util.security.instancekeys import InstanceKeys
|
from util.security.instancekeys import InstanceKeys
|
||||||
from util.security.signing import Signer
|
from util.security.signing import Signer
|
||||||
|
|
||||||
OVERRIDE_CONFIG_DIRECTORY = 'conf/stack/'
|
|
||||||
OVERRIDE_CONFIG_YAML_FILENAME = 'conf/stack/config.yaml'
|
OVERRIDE_CONFIG_DIRECTORY = os.path.join(CONF_DIR, 'stack/')
|
||||||
OVERRIDE_CONFIG_PY_FILENAME = 'conf/stack/config.py'
|
OVERRIDE_CONFIG_YAML_FILENAME = os.path.join(CONF_DIR, 'stack/config.yaml')
|
||||||
|
OVERRIDE_CONFIG_PY_FILENAME = os.path.join(CONF_DIR, 'stack/config.py')
|
||||||
|
|
||||||
OVERRIDE_CONFIG_KEY = 'QUAY_OVERRIDE_CONFIG'
|
OVERRIDE_CONFIG_KEY = 'QUAY_OVERRIDE_CONFIG'
|
||||||
|
|
||||||
|
@ -102,6 +106,10 @@ if (app.config['PREFERRED_URL_SCHEME'] == 'https' and
|
||||||
# Load features from config.
|
# Load features from config.
|
||||||
features.import_features(app.config)
|
features.import_features(app.config)
|
||||||
|
|
||||||
|
CONFIG_DIGEST = hashlib.sha256(json.dumps(app.config, default=str)).hexdigest()[0:8]
|
||||||
|
|
||||||
|
logger.debug("Loaded config", extra={"config": app.config})
|
||||||
|
|
||||||
|
|
||||||
class RequestWithId(Request):
|
class RequestWithId(Request):
|
||||||
request_gen = staticmethod(urn_generator(['request']))
|
request_gen = staticmethod(urn_generator(['request']))
|
||||||
|
@ -114,26 +122,60 @@ class RequestWithId(Request):
|
||||||
@app.before_request
|
@app.before_request
|
||||||
def _request_start():
|
def _request_start():
|
||||||
logger.debug('Starting request: %s', request.path)
|
logger.debug('Starting request: %s', request.path)
|
||||||
|
logger.info("request-start", extra={"request_id": request.request_id})
|
||||||
|
|
||||||
|
|
||||||
|
DEFAULT_FILTER = lambda x: '[FILTERED]'
|
||||||
|
FILTERED_VALUES = [
|
||||||
|
{'key': ['password'], 'fn': DEFAULT_FILTER},
|
||||||
|
{'key': ['user', 'password'], 'fn': DEFAULT_FILTER},
|
||||||
|
{'key': ['blob'], 'fn': lambda x: x[0:8]}
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
@app.after_request
|
@app.after_request
|
||||||
def _request_end(r):
|
def _request_end(resp):
|
||||||
|
jsonbody = request.get_json(force=True, silent=True)
|
||||||
|
values = request.values.to_dict()
|
||||||
|
|
||||||
|
if jsonbody and not isinstance(jsonbody, dict):
|
||||||
|
jsonbody = {'_parsererror': jsonbody}
|
||||||
|
|
||||||
|
if isinstance(values, dict):
|
||||||
|
filter_logs(values, FILTERED_VALUES)
|
||||||
|
|
||||||
|
extra = {
|
||||||
|
"endpoint": request.endpoint,
|
||||||
|
"request_id" : request.request_id,
|
||||||
|
"remote_addr": request.remote_addr,
|
||||||
|
"http_method": request.method,
|
||||||
|
"original_url": request.url,
|
||||||
|
"path": request.path,
|
||||||
|
"parameters": values,
|
||||||
|
"json_body": jsonbody,
|
||||||
|
"confsha": CONFIG_DIGEST,
|
||||||
|
}
|
||||||
|
|
||||||
|
if request.user_agent is not None:
|
||||||
|
extra["user-agent"] = request.user_agent.string
|
||||||
|
|
||||||
|
user = get_authenticated_user()
|
||||||
|
|
||||||
|
if user:
|
||||||
|
extra['user'] = {'email': user.email,
|
||||||
|
'uuid': user.uuid,
|
||||||
|
'org': user.organization,
|
||||||
|
'robot': user.robot}
|
||||||
|
|
||||||
|
logger.info("request-end", extra=extra)
|
||||||
|
|
||||||
logger.debug('Ending request: %s', request.path)
|
logger.debug('Ending request: %s', request.path)
|
||||||
return r
|
return resp
|
||||||
|
|
||||||
|
|
||||||
class InjectingFilter(logging.Filter):
|
|
||||||
def filter(self, record):
|
|
||||||
if _request_ctx_stack.top is not None:
|
|
||||||
record.msg = '[%s] %s' % (request.request_id, record.msg)
|
|
||||||
return True
|
|
||||||
|
|
||||||
root_logger = logging.getLogger()
|
root_logger = logging.getLogger()
|
||||||
|
|
||||||
# Add the request id filter to all handlers of the root logger
|
|
||||||
for handler in root_logger.handlers:
|
|
||||||
handler.addFilter(InjectingFilter())
|
|
||||||
|
|
||||||
app.request_class = RequestWithId
|
app.request_class = RequestWithId
|
||||||
|
|
||||||
# Register custom converters.
|
# Register custom converters.
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
|
import os
|
||||||
import logging
|
import logging
|
||||||
import logging.config
|
import logging.config
|
||||||
|
|
||||||
|
from util.log import logfile_path
|
||||||
from app import app as application
|
from app import app as application
|
||||||
|
|
||||||
|
|
||||||
|
@ -12,5 +14,5 @@ import secscan
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
logging.config.fileConfig('conf/logging_debug.conf', disable_existing_loggers=False)
|
logging.config.fileConfig(logfile_path(debug=True), disable_existing_loggers=False)
|
||||||
application.run(port=5000, debug=True, threaded=True, host='0.0.0.0')
|
application.run(port=5000, debug=True, threaded=True, host='0.0.0.0')
|
||||||
|
|
|
@ -7,10 +7,10 @@ from flask import request, url_for
|
||||||
from flask_principal import identity_changed, Identity
|
from flask_principal import identity_changed, Identity
|
||||||
|
|
||||||
from app import app, get_app_url, instance_keys
|
from app import app, get_app_url, instance_keys
|
||||||
from .auth_context import set_grant_context, get_grant_context
|
from auth.auth_context import (set_grant_context, get_grant_context)
|
||||||
from .permissions import repository_read_grant, repository_write_grant, repository_admin_grant
|
from auth.permissions import repository_read_grant, repository_write_grant, repository_admin_grant
|
||||||
from util.names import parse_namespace_repository
|
|
||||||
from util.http import abort
|
from util.http import abort
|
||||||
|
from util.names import parse_namespace_repository
|
||||||
from util.security.registry_jwt import (ANONYMOUS_SUB, decode_bearer_header,
|
from util.security.registry_jwt import (ANONYMOUS_SUB, decode_bearer_header,
|
||||||
InvalidBearerTokenException)
|
InvalidBearerTokenException)
|
||||||
from data import model
|
from data import model
|
||||||
|
@ -18,8 +18,10 @@ from data import model
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
CONTEXT_KINDS = ['user', 'token', 'oauth']
|
CONTEXT_KINDS = ['user', 'token', 'oauth']
|
||||||
|
|
||||||
|
|
||||||
ACCESS_SCHEMA = {
|
ACCESS_SCHEMA = {
|
||||||
'type': 'array',
|
'type': 'array',
|
||||||
'description': 'List of access granted to the subject',
|
'description': 'List of access granted to the subject',
|
||||||
|
|
8
boot.py
Normal file → Executable file
8
boot.py
Normal file → Executable file
|
@ -13,6 +13,7 @@ from app import app
|
||||||
from data.model.release import set_region_release
|
from data.model.release import set_region_release
|
||||||
from util.config.database import sync_database_with_config
|
from util.config.database import sync_database_with_config
|
||||||
from util.generatepresharedkey import generate_key
|
from util.generatepresharedkey import generate_key
|
||||||
|
from _init import CONF_DIR
|
||||||
|
|
||||||
|
|
||||||
@lru_cache(maxsize=1)
|
@lru_cache(maxsize=1)
|
||||||
|
@ -42,7 +43,7 @@ def setup_jwt_proxy():
|
||||||
"""
|
"""
|
||||||
Creates a service key for quay to use in the jwtproxy and generates the JWT proxy configuration.
|
Creates a service key for quay to use in the jwtproxy and generates the JWT proxy configuration.
|
||||||
"""
|
"""
|
||||||
if os.path.exists('conf/jwtproxy_conf.yaml'):
|
if os.path.exists(os.path.join(CONF_DIR, 'jwtproxy_conf.yaml')):
|
||||||
# Proxy is already setup.
|
# Proxy is already setup.
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -65,16 +66,17 @@ def setup_jwt_proxy():
|
||||||
registry = audience + '/keys'
|
registry = audience + '/keys'
|
||||||
security_issuer = app.config.get('SECURITY_SCANNER_ISSUER_NAME', 'security_scanner')
|
security_issuer = app.config.get('SECURITY_SCANNER_ISSUER_NAME', 'security_scanner')
|
||||||
|
|
||||||
with open("conf/jwtproxy_conf.yaml.jnj") as f:
|
with open(os.path.join(CONF_DIR, 'jwtproxy_conf.yaml.jnj')) as f:
|
||||||
template = Template(f.read())
|
template = Template(f.read())
|
||||||
rendered = template.render(
|
rendered = template.render(
|
||||||
|
conf_dir=CONF_DIR,
|
||||||
audience=audience,
|
audience=audience,
|
||||||
registry=registry,
|
registry=registry,
|
||||||
key_id=quay_key_id,
|
key_id=quay_key_id,
|
||||||
security_issuer=security_issuer,
|
security_issuer=security_issuer,
|
||||||
)
|
)
|
||||||
|
|
||||||
with open('conf/jwtproxy_conf.yaml', 'w') as f:
|
with open(os.path.join(CONF_DIR, 'jwtproxy_conf.yaml'), 'w') as f:
|
||||||
f.write(rendered)
|
f.write(rendered)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,7 @@ from buildman.asyncutil import AsyncWrapper
|
||||||
from container_cloud_config import CloudConfigContext
|
from container_cloud_config import CloudConfigContext
|
||||||
from app import metric_queue, app
|
from app import metric_queue, app
|
||||||
from util.metrics.metricqueue import duration_collector_async
|
from util.metrics.metricqueue import duration_collector_async
|
||||||
|
from _init import ROOT_DIR
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ ONE_HOUR = 60*60
|
||||||
_TAG_RETRY_COUNT = 3 # Number of times to retry adding tags.
|
_TAG_RETRY_COUNT = 3 # Number of times to retry adding tags.
|
||||||
_TAG_RETRY_SLEEP = 2 # Number of seconds to wait between tag retries.
|
_TAG_RETRY_SLEEP = 2 # Number of seconds to wait between tag retries.
|
||||||
|
|
||||||
ENV = Environment(loader=FileSystemLoader('buildman/templates'))
|
ENV = Environment(loader=FileSystemLoader(os.path.join(ROOT_DIR, "buildman/templates")))
|
||||||
TEMPLATE = ENV.get_template('cloudconfig.yaml')
|
TEMPLATE = ENV.get_template('cloudconfig.yaml')
|
||||||
CloudConfigContext().populate_jinja_environment(ENV)
|
CloudConfigContext().populate_jinja_environment(ENV)
|
||||||
|
|
||||||
|
|
|
@ -1,10 +1,16 @@
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
|
||||||
|
|
||||||
|
from util.log import logfile_path
|
||||||
from Crypto import Random
|
from Crypto import Random
|
||||||
|
|
||||||
|
|
||||||
|
logconfig = logfile_path(debug=True)
|
||||||
bind = '0.0.0.0:5000'
|
bind = '0.0.0.0:5000'
|
||||||
workers = 2
|
workers = 2
|
||||||
worker_class = 'gevent'
|
worker_class = 'gevent'
|
||||||
daemon = False
|
daemon = False
|
||||||
logconfig = 'conf/logging_debug.conf'
|
|
||||||
pythonpath = '.'
|
pythonpath = '.'
|
||||||
preload_app = True
|
preload_app = True
|
||||||
|
|
||||||
|
|
|
@ -1,12 +1,19 @@
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
|
||||||
|
|
||||||
|
from util.log import logfile_path
|
||||||
from Crypto import Random
|
from Crypto import Random
|
||||||
|
|
||||||
|
|
||||||
|
logconfig = logfile_path(debug=False)
|
||||||
bind = 'unix:/tmp/gunicorn_registry.sock'
|
bind = 'unix:/tmp/gunicorn_registry.sock'
|
||||||
workers = 8
|
workers = 8
|
||||||
worker_class = 'gevent'
|
worker_class = 'gevent'
|
||||||
logconfig = 'conf/logging.conf'
|
|
||||||
pythonpath = '.'
|
pythonpath = '.'
|
||||||
preload_app = True
|
preload_app = True
|
||||||
|
|
||||||
|
|
||||||
def post_fork(server, worker):
|
def post_fork(server, worker):
|
||||||
# Reset the Random library to ensure it won't raise the "PID check failed." error after
|
# Reset the Random library to ensure it won't raise the "PID check failed." error after
|
||||||
# gunicorn forks.
|
# gunicorn forks.
|
||||||
|
|
|
@ -1,12 +1,19 @@
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
|
||||||
|
|
||||||
|
from util.log import logfile_path
|
||||||
from Crypto import Random
|
from Crypto import Random
|
||||||
|
|
||||||
|
|
||||||
|
logconfig = logfile_path(debug=False)
|
||||||
bind = 'unix:/tmp/gunicorn_secscan.sock'
|
bind = 'unix:/tmp/gunicorn_secscan.sock'
|
||||||
workers = 2
|
workers = 2
|
||||||
worker_class = 'gevent'
|
worker_class = 'gevent'
|
||||||
logconfig = 'conf/logging.conf'
|
|
||||||
pythonpath = '.'
|
pythonpath = '.'
|
||||||
preload_app = True
|
preload_app = True
|
||||||
|
|
||||||
|
|
||||||
def post_fork(server, worker):
|
def post_fork(server, worker):
|
||||||
# Reset the Random library to ensure it won't raise the "PID check failed." error after
|
# Reset the Random library to ensure it won't raise the "PID check failed." error after
|
||||||
# gunicorn forks.
|
# gunicorn forks.
|
||||||
|
|
|
@ -1,12 +1,20 @@
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
|
||||||
|
|
||||||
|
from util.log import logfile_path
|
||||||
from Crypto import Random
|
from Crypto import Random
|
||||||
|
|
||||||
|
|
||||||
|
logconfig = logfile_path(debug=False)
|
||||||
|
|
||||||
bind = 'unix:/tmp/gunicorn_verbs.sock'
|
bind = 'unix:/tmp/gunicorn_verbs.sock'
|
||||||
workers = 4
|
workers = 4
|
||||||
logconfig = 'conf/logging.conf'
|
|
||||||
pythonpath = '.'
|
pythonpath = '.'
|
||||||
preload_app = True
|
preload_app = True
|
||||||
timeout = 2000 # Because sync workers
|
timeout = 2000 # Because sync workers
|
||||||
|
|
||||||
|
|
||||||
def post_fork(server, worker):
|
def post_fork(server, worker):
|
||||||
# Reset the Random library to ensure it won't raise the "PID check failed." error after
|
# Reset the Random library to ensure it won't raise the "PID check failed." error after
|
||||||
# gunicorn forks.
|
# gunicorn forks.
|
||||||
|
|
|
@ -1,9 +1,16 @@
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
|
||||||
|
|
||||||
|
from util.log import logfile_path
|
||||||
from Crypto import Random
|
from Crypto import Random
|
||||||
|
|
||||||
|
|
||||||
|
logconfig = logfile_path(debug=False)
|
||||||
|
|
||||||
bind = 'unix:/tmp/gunicorn_web.sock'
|
bind = 'unix:/tmp/gunicorn_web.sock'
|
||||||
workers = 2
|
workers = 2
|
||||||
worker_class = 'gevent'
|
worker_class = 'gevent'
|
||||||
logconfig = 'conf/logging.conf'
|
|
||||||
pythonpath = '.'
|
pythonpath = '.'
|
||||||
preload_app = True
|
preload_app = True
|
||||||
|
|
||||||
|
|
|
@ -1,8 +1,10 @@
|
||||||
#! /bin/bash
|
#! /bin/bash
|
||||||
set -e
|
set -e
|
||||||
|
QUAYPATH=${QUAYPATH:-"."}
|
||||||
|
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
|
||||||
|
cd ${QUAYDIR:-"/"}
|
||||||
# Create certs for jwtproxy to mitm outgoing TLS connections
|
# Create certs for jwtproxy to mitm outgoing TLS connections
|
||||||
echo '{"CN":"CA","key":{"algo":"rsa","size":2048}}' | cfssl gencert -initca - | cfssljson -bare mitm
|
echo '{"CN":"CA","key":{"algo":"rsa","size":2048}}' | cfssl gencert -initca - | cfssljson -bare mitm
|
||||||
cp mitm-key.pem /conf/mitm.key
|
cp mitm-key.pem $QUAYCONF/mitm.key
|
||||||
cp mitm.pem /conf/mitm.cert
|
cp mitm.pem $QUAYCONF/mitm.cert
|
||||||
cp mitm.pem /usr/local/share/ca-certificates/mitm.crt
|
cp mitm.pem /usr/local/share/ca-certificates/mitm.crt
|
||||||
|
|
|
@ -1,27 +1,39 @@
|
||||||
#! /bin/bash
|
#! /bin/bash
|
||||||
set -e
|
set -e
|
||||||
|
QUAYPATH=${QUAYPATH:-"."}
|
||||||
|
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
|
||||||
|
|
||||||
|
cd ${QUAYDIR:-"/"}
|
||||||
|
|
||||||
# Add the custom LDAP certificate
|
# Add the custom LDAP certificate
|
||||||
if [ -e /conf/stack/ldap.crt ]
|
if [ -e $QUAYCONF/stack/ldap.crt ]
|
||||||
then
|
then
|
||||||
cp /conf/stack/ldap.crt /usr/local/share/ca-certificates/ldap.crt
|
cp $QUAYCONF/stack/ldap.crt /usr/local/share/ca-certificates/ldap.crt
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Add extra trusted certificates (as a directory)
|
# Add extra trusted certificates (as a directory)
|
||||||
if [ -d /conf/stack/extra_ca_certs ]; then
|
if [ -d $QUAYCONF/stack/extra_ca_certs ]; then
|
||||||
if test "$(ls -A "/conf/stack/extra_ca_certs")"; then
|
if test "$(ls -A "$QUAYCONF/stack/extra_ca_certs")"; then
|
||||||
echo "Installing extra certificates found in /conf/stack/extra_ca_certs directory"
|
echo "Installing extra certificates found in $QUAYCONF/stack/extra_ca_certs directory"
|
||||||
cp /conf/stack/extra_ca_certs/* /usr/local/share/ca-certificates/
|
cp $QUAYCONF/stack/extra_ca_certs/* /usr/local/share/ca-certificates/
|
||||||
cat /conf/stack/extra_ca_certs/* >> /venv/lib/python2.7/site-packages/requests/cacert.pem
|
cat $QUAYCONF/stack/extra_ca_certs/* >> venv/lib/python2.7/site-packages/requests/cacert.pem
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Add extra trusted certificates (as a file)
|
# Add extra trusted certificates (as a file)
|
||||||
if [ -f /conf/stack/extra_ca_certs ]; then
|
if [ -f $QUAYCONF/stack/extra_ca_certs ]; then
|
||||||
echo "Installing extra certificates found in /conf/stack/extra_ca_certs file"
|
echo "Installing extra certificates found in $QUAYCONF/stack/extra_ca_certs file"
|
||||||
csplit -z -f /usr/local/share/ca-certificates/extra-ca- /conf/stack/extra_ca_certs '/-----BEGIN CERTIFICATE-----/' '{*}'
|
csplit -z -f /usr/local/share/ca-certificates/extra-ca- $QUAYCONF/stack/extra_ca_certs '/-----BEGIN CERTIFICATE-----/' '{*}'
|
||||||
cat /conf/stack/extra_ca_certs >> /venv/lib/python2.7/site-packages/requests/cacert.pem
|
cat $QUAYCONF/stack/extra_ca_certs >> venv/lib/python2.7/site-packages/requests/cacert.pem
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Add extra trusted certificates (prefixed)
|
||||||
|
for f in $(find $QUAYCONF/stack/ -maxdepth 1 -type f -name "extra_ca*")
|
||||||
|
do
|
||||||
|
echo "Installing extra cert $f"
|
||||||
|
cp "$f" /usr/local/share/ca-certificates/
|
||||||
|
cat "$f" >> venv/lib/python2.7/site-packages/requests/cacert.pem
|
||||||
|
done
|
||||||
|
|
||||||
# Update all CA certificates.
|
# Update all CA certificates.
|
||||||
update-ca-certificates
|
update-ca-certificates
|
||||||
|
|
|
@ -1,11 +1,16 @@
|
||||||
#! /bin/sh
|
#! /bin/sh
|
||||||
|
QUAYPATH=${QUAYPATH:-"."}
|
||||||
|
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
|
||||||
|
|
||||||
if [ -e /conf/stack/robots.txt ]
|
cd ${QUAYDIR:-"/"}
|
||||||
|
|
||||||
|
|
||||||
|
if [ -e $QUAYCONF/stack/robots.txt ]
|
||||||
then
|
then
|
||||||
cp /conf/stack/robots.txt /templates/robots.txt
|
cp $QUAYCONF/stack/robots.txt $QUAYPATH/templates/robots.txt
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -e /conf/stack/favicon.ico ]
|
if [ -e $QUAYCONF/stack/favicon.ico ]
|
||||||
then
|
then
|
||||||
cp /conf/stack/favicon.ico /static/favicon.ico
|
cp $QUAYCONF/stack/favicon.ico $QUAYPATH/static/favicon.ico
|
||||||
fi
|
fi
|
|
@ -1,6 +1,10 @@
|
||||||
#! /bin/sh
|
#! /bin/sh
|
||||||
|
QUAYPATH=${QUAYPATH:-"."}
|
||||||
|
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
|
||||||
|
|
||||||
if [ -e /conf/stack/syslog-ng-extra.conf ]
|
cd ${QUAYDIR:-"/"}
|
||||||
|
|
||||||
|
if [ -e $QUAYCONF/stack/syslog-ng-extra.conf ]
|
||||||
then
|
then
|
||||||
cp /conf/stack/syslog-ng-extra.conf /etc/syslog-ng/conf.d/
|
cp $QUAYCONF/stack/syslog-ng-extra.conf /etc/syslog-ng/conf.d/
|
||||||
fi
|
fi
|
||||||
|
|
51
conf/init/nginx_conf_create.py
Normal file
51
conf/init/nginx_conf_create.py
Normal file
|
@ -0,0 +1,51 @@
|
||||||
|
import os
|
||||||
|
import os.path
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
import jinja2
|
||||||
|
|
||||||
|
QUAYPATH = os.getenv("QUAYPATH", ".")
|
||||||
|
QUAYDIR = os.getenv("QUAYDIR", "/")
|
||||||
|
QUAYCONF_DIR = os.getenv("QUAYCONF", os.path.join(QUAYDIR, QUAYPATH, "conf"))
|
||||||
|
STATIC_DIR = os.path.join(QUAYDIR, 'static/')
|
||||||
|
|
||||||
|
def write_config(filename, **kwargs):
|
||||||
|
with open(filename + ".jnj") as f:
|
||||||
|
template = jinja2.Template(f.read())
|
||||||
|
rendered = template.render(kwargs)
|
||||||
|
|
||||||
|
with open(filename, 'w') as f:
|
||||||
|
f.write(rendered)
|
||||||
|
|
||||||
|
|
||||||
|
def generate_nginx_config():
|
||||||
|
"""
|
||||||
|
Generates nginx config from the app config
|
||||||
|
"""
|
||||||
|
use_https = os.path.exists(os.path.join(QUAYCONF_DIR, 'stack/ssl.key'))
|
||||||
|
write_config(os.path.join(QUAYCONF_DIR, 'nginx/nginx.conf'), use_https=use_https)
|
||||||
|
|
||||||
|
|
||||||
|
def generate_server_config(config):
|
||||||
|
"""
|
||||||
|
Generates server config from the app config
|
||||||
|
"""
|
||||||
|
config = config or {}
|
||||||
|
tuf_server = config.get('TUF_SERVER', None)
|
||||||
|
tuf_host = config.get('TUF_HOST', None)
|
||||||
|
signing_enabled = config.get('FEATURE_SIGNING', False)
|
||||||
|
maximum_layer_size = config.get('MAXIMUM_LAYER_SIZE', '20G')
|
||||||
|
|
||||||
|
write_config(
|
||||||
|
os.path.join(QUAYCONF_DIR, 'nginx/server-base.conf'), tuf_server=tuf_server, tuf_host=tuf_host,
|
||||||
|
signing_enabled=signing_enabled, maximum_layer_size=maximum_layer_size, static_dir=STATIC_DIR)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
if os.path.exists(os.path.join(QUAYCONF_DIR, 'stack/config.yaml')):
|
||||||
|
with open(os.path.join(QUAYCONF_DIR, 'stack/config.yaml'), 'r') as f:
|
||||||
|
config = yaml.load(f)
|
||||||
|
else:
|
||||||
|
config = None
|
||||||
|
generate_server_config(config)
|
||||||
|
generate_nginx_config()
|
|
@ -1,51 +1,8 @@
|
||||||
#!/venv/bin/python
|
#!/bin/bash
|
||||||
|
|
||||||
import os.path
|
QUAYDIR=${QUAYDIR:-"/"}
|
||||||
|
QUAYPATH=${QUAYPATH:-"."}
|
||||||
|
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
|
||||||
|
|
||||||
import yaml
|
cd $QUAYDIR
|
||||||
import jinja2
|
venv/bin/python $QUAYCONF/init/nginx_conf_create.py
|
||||||
|
|
||||||
|
|
||||||
def write_config(filename, **kwargs):
|
|
||||||
with open(filename + ".jnj") as f:
|
|
||||||
template = jinja2.Template(f.read())
|
|
||||||
rendered = template.render(kwargs)
|
|
||||||
|
|
||||||
with open(filename, 'w') as f:
|
|
||||||
f.write(rendered)
|
|
||||||
|
|
||||||
|
|
||||||
def generate_nginx_config():
|
|
||||||
"""
|
|
||||||
Generates nginx config from the app config
|
|
||||||
"""
|
|
||||||
use_https = os.path.exists('conf/stack/ssl.key')
|
|
||||||
write_config('conf/nginx/nginx.conf',
|
|
||||||
use_https=use_https)
|
|
||||||
|
|
||||||
|
|
||||||
def generate_server_config(config):
|
|
||||||
"""
|
|
||||||
Generates server config from the app config
|
|
||||||
"""
|
|
||||||
config = config or {}
|
|
||||||
tuf_server = config.get('TUF_SERVER', None)
|
|
||||||
tuf_host = config.get('TUF_HOST', None)
|
|
||||||
signing_enabled = config.get('FEATURE_SIGNING', False)
|
|
||||||
maximum_layer_size = config.get('MAXIMUM_LAYER_SIZE', '20G')
|
|
||||||
|
|
||||||
write_config('conf/nginx/server-base.conf',
|
|
||||||
tuf_server=tuf_server,
|
|
||||||
tuf_host=tuf_host,
|
|
||||||
signing_enabled=signing_enabled,
|
|
||||||
maximum_layer_size=maximum_layer_size)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
if os.path.exists('conf/stack/config.yaml'):
|
|
||||||
with open('conf/stack/config.yaml', 'r') as f:
|
|
||||||
config = yaml.load(f)
|
|
||||||
else:
|
|
||||||
config = None
|
|
||||||
generate_server_config(config)
|
|
||||||
generate_nginx_config()
|
|
|
@ -1,5 +1,6 @@
|
||||||
#! /bin/bash
|
#!/bin/bash
|
||||||
set -e
|
set -e
|
||||||
|
cd ${QUAYDIR:-"/"}
|
||||||
|
|
||||||
# Run the database migration
|
# Run the database migration
|
||||||
PYTHONPATH=. venv/bin/alembic upgrade head
|
PYTHONPATH=${QUAYPATH:-"."} venv/bin/alembic upgrade head
|
|
@ -2,7 +2,9 @@
|
||||||
|
|
||||||
echo 'Starting Blob upload cleanup worker'
|
echo 'Starting Blob upload cleanup worker'
|
||||||
|
|
||||||
cd /
|
QUAYPATH=${QUAYPATH:-"."}
|
||||||
venv/bin/python -m workers.blobuploadcleanupworker 2>&1
|
|
||||||
|
cd ${QUAYDIR:-"/"}
|
||||||
|
PYTHONPATH=$QUAYPATH venv/bin/python -m workers.blobuploadcleanupworker.blobuploadcleanupworker 2>&1
|
||||||
|
|
||||||
echo 'Blob upload cleanup exited'
|
echo 'Blob upload cleanup exited'
|
|
@ -2,7 +2,8 @@
|
||||||
|
|
||||||
echo 'Starting build logs archiver worker'
|
echo 'Starting build logs archiver worker'
|
||||||
|
|
||||||
cd /
|
QUAYPATH=${QUAYPATH:-"."}
|
||||||
venv/bin/python -m workers.buildlogsarchiver 2>&1
|
cd ${QUAYDIR:-"/"}
|
||||||
|
PYTHONPATH=$QUAYPATH venv/bin/python -m workers.buildlogsarchiver 2>&1
|
||||||
|
|
||||||
echo 'Diffs worker exited'
|
echo 'Diffs worker exited'
|
|
@ -6,7 +6,9 @@ echo 'Starting internal build manager'
|
||||||
monit
|
monit
|
||||||
|
|
||||||
# Run the build manager.
|
# Run the build manager.
|
||||||
cd /
|
QUAYPATH=${QUAYPATH:-"."}
|
||||||
|
cd ${QUAYDIR:-"/"}
|
||||||
|
export PYTHONPATH=$QUAYPATH
|
||||||
exec venv/bin/python -m buildman.builder 2>&1
|
exec venv/bin/python -m buildman.builder 2>&1
|
||||||
|
|
||||||
echo 'Internal build manager exited'
|
echo 'Internal build manager exited'
|
|
@ -2,7 +2,8 @@
|
||||||
|
|
||||||
echo 'Starting chunk cleanup worker'
|
echo 'Starting chunk cleanup worker'
|
||||||
|
|
||||||
cd /
|
QUAYPATH=${QUAYPATH:-"."}
|
||||||
venv/bin/python -m workers.chunkcleanupworker 2>&1
|
cd ${QUAYDIR:-"/"}
|
||||||
|
PYTHONPATH=$QUAYPATH venv/bin/python -m workers.chunkcleanupworker 2>&1
|
||||||
|
|
||||||
echo 'Chunk cleanup worker exited'
|
echo 'Chunk cleanup worker exited'
|
|
@ -2,7 +2,8 @@
|
||||||
|
|
||||||
echo 'Starting GC worker'
|
echo 'Starting GC worker'
|
||||||
|
|
||||||
cd /
|
QUAYPATH=${QUAYPATH:-"."}
|
||||||
venv/bin/python -m workers.gcworker 2>&1
|
cd ${QUAYDIR:-"/"}
|
||||||
|
PYTHONPATH=$QUAYPATH venv/bin/python -m workers.gc.gcworker 2>&1
|
||||||
|
|
||||||
echo 'Repository GC exited'
|
echo 'Repository GC exited'
|
||||||
|
|
|
@ -2,7 +2,8 @@
|
||||||
|
|
||||||
echo 'Starting global prometheus stats worker'
|
echo 'Starting global prometheus stats worker'
|
||||||
|
|
||||||
cd /
|
QUAYPATH=${QUAYPATH:-"."}
|
||||||
venv/bin/python -m workers.globalpromstats
|
cd ${QUAYDIR:-"/"}
|
||||||
|
PYTHONPATH=$QUAYPATH venv/bin/python -m workers.globalpromstats
|
||||||
|
|
||||||
echo 'Global prometheus stats exited'
|
echo 'Global prometheus stats exited'
|
||||||
|
|
|
@ -2,7 +2,10 @@
|
||||||
|
|
||||||
echo 'Starting gunicon'
|
echo 'Starting gunicon'
|
||||||
|
|
||||||
cd /
|
QUAYPATH=${QUAYPATH:-"."}
|
||||||
nice -n 10 venv/bin/gunicorn -c conf/gunicorn_registry.py registry:application
|
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
|
||||||
|
|
||||||
|
cd ${QUAYDIR:-"/"}
|
||||||
|
PYTHONPATH=$QUAYPATH nice -n 10 venv/bin/gunicorn -c $QUAYCONF/gunicorn_registry.py registry:application
|
||||||
|
|
||||||
echo 'Gunicorn exited'
|
echo 'Gunicorn exited'
|
|
@ -2,7 +2,10 @@
|
||||||
|
|
||||||
echo 'Starting gunicon'
|
echo 'Starting gunicon'
|
||||||
|
|
||||||
cd /
|
QUAYPATH=${QUAYPATH:-"."}
|
||||||
venv/bin/gunicorn -c conf/gunicorn_secscan.py secscan:application
|
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
|
||||||
|
|
||||||
|
cd ${QUAYDIR:-"/"}
|
||||||
|
PYTHONPATH=$QUAYPATH venv/bin/gunicorn -c $QUAYCONF/gunicorn_secscan.py secscan:application
|
||||||
|
|
||||||
echo 'Gunicorn exited'
|
echo 'Gunicorn exited'
|
|
@ -2,7 +2,10 @@
|
||||||
|
|
||||||
echo 'Starting gunicon'
|
echo 'Starting gunicon'
|
||||||
|
|
||||||
cd /
|
QUAYPATH=${QUAYPATH:-"."}
|
||||||
nice -n 10 venv/bin/gunicorn -c conf/gunicorn_verbs.py verbs:application
|
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
|
||||||
|
|
||||||
|
cd ${QUAYDIR:-"/"}
|
||||||
|
PYTHONPATH=$QUAYPATH nice -n 10 venv/bin/gunicorn -c $QUAYCONF/gunicorn_verbs.py verbs:application
|
||||||
|
|
||||||
echo 'Gunicorn exited'
|
echo 'Gunicorn exited'
|
|
@ -2,7 +2,10 @@
|
||||||
|
|
||||||
echo 'Starting gunicon'
|
echo 'Starting gunicon'
|
||||||
|
|
||||||
cd /
|
QUAYPATH=${QUAYPATH:-"."}
|
||||||
venv/bin/gunicorn -c conf/gunicorn_web.py web:application
|
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
|
||||||
|
|
||||||
|
cd ${QUAYDIR:-"/"}
|
||||||
|
PYTHONPATH=$QUAYPATH venv/bin/gunicorn -c $QUAYCONF/gunicorn_web.py web:application
|
||||||
|
|
||||||
echo 'Gunicorn exited'
|
echo 'Gunicorn exited'
|
|
@ -1,12 +1,16 @@
|
||||||
#! /bin/bash
|
#! /bin/bash
|
||||||
cd /
|
|
||||||
|
|
||||||
if [ -f conf/jwtproxy_conf.yaml ];
|
QUAYPATH=${QUAYPATH:-"."}
|
||||||
|
cd ${QUAYDIR:-"/"}
|
||||||
|
PYTHONPATH=$QUAYPATH
|
||||||
|
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
|
||||||
|
|
||||||
|
if [ -f $QUAYCONF/jwtproxy_conf.yaml ];
|
||||||
then
|
then
|
||||||
echo 'Starting jwtproxy'
|
echo 'Starting jwtproxy'
|
||||||
/usr/local/bin/jwtproxy --config conf/jwtproxy_conf.yaml
|
/usr/local/bin/jwtproxy --config $QUAYCONF/jwtproxy_conf.yaml
|
||||||
rm /tmp/jwtproxy_secscan.sock
|
rm /tmp/jwtproxy_secscan.sock
|
||||||
echo 'Jwtproxy exited'
|
echo 'Jwtproxy exited'
|
||||||
else
|
else
|
||||||
sleep 1
|
sleep 1
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -2,7 +2,8 @@
|
||||||
|
|
||||||
echo 'Starting log rotation worker'
|
echo 'Starting log rotation worker'
|
||||||
|
|
||||||
cd /
|
QUAYPATH=${QUAYPATH:-"."}
|
||||||
venv/bin/python -m workers.logrotateworker
|
cd ${QUAYDIR:-"/"}
|
||||||
|
PYTHONPATH=$QUAYPATH venv/bin/python -m workers.logrotateworker
|
||||||
|
|
||||||
echo 'Log rotation worker exited'
|
echo 'Log rotation worker exited'
|
||||||
|
|
|
@ -2,6 +2,11 @@
|
||||||
|
|
||||||
echo 'Starting nginx'
|
echo 'Starting nginx'
|
||||||
|
|
||||||
/usr/sbin/nginx -c /conf/nginx/nginx.conf
|
QUAYPATH=${QUAYPATH:-"."}
|
||||||
|
cd ${QUAYDIR:-"/"}
|
||||||
|
PYTHONPATH=$QUAYPATH
|
||||||
|
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
|
||||||
|
|
||||||
|
/usr/sbin/nginx -c $QUAYCONF/nginx/nginx.conf
|
||||||
|
|
||||||
echo 'Nginx exited'
|
echo 'Nginx exited'
|
||||||
|
|
|
@ -2,7 +2,9 @@
|
||||||
|
|
||||||
echo 'Starting notification worker'
|
echo 'Starting notification worker'
|
||||||
|
|
||||||
cd /
|
QUAYPATH=${QUAYPATH:-"."}
|
||||||
venv/bin/python -m workers.notificationworker
|
cd ${QUAYDIR:-"/"}
|
||||||
|
|
||||||
|
PYTHONPATH=$QUAYPATH venv/bin/python -m workers.notificationworker
|
||||||
|
|
||||||
echo 'Notification worker exited'
|
echo 'Notification worker exited'
|
|
@ -2,7 +2,8 @@
|
||||||
|
|
||||||
echo 'Starting Queue cleanup worker'
|
echo 'Starting Queue cleanup worker'
|
||||||
|
|
||||||
cd /
|
QUAYPATH=${QUAYPATH:-"."}
|
||||||
venv/bin/python -m workers.queuecleanupworker 2>&1
|
cd ${QUAYDIR:-"/"}
|
||||||
|
PYTHONPATH=$QUAYPATH venv/bin/python -m workers.queuecleanupworker 2>&1
|
||||||
|
|
||||||
echo 'Repository Queue cleanup exited'
|
echo 'Repository Queue cleanup exited'
|
|
@ -2,7 +2,8 @@
|
||||||
|
|
||||||
echo 'Starting repository action count worker'
|
echo 'Starting repository action count worker'
|
||||||
|
|
||||||
cd /
|
QUAYPATH=${QUAYPATH:-"."}
|
||||||
venv/bin/python -m workers.repositoryactioncounter 2>&1
|
cd ${QUAYDIR:-"/"}
|
||||||
|
PYTHONPATH=$QUAYPATH venv/bin/python -m workers.repositoryactioncounter 2>&1
|
||||||
|
|
||||||
echo 'Repository action worker exited'
|
echo 'Repository action worker exited'
|
|
@ -2,7 +2,8 @@
|
||||||
|
|
||||||
echo 'Starting security scanner notification worker'
|
echo 'Starting security scanner notification worker'
|
||||||
|
|
||||||
cd /
|
QUAYPATH=${QUAYPATH:-"."}
|
||||||
venv/bin/python -m workers.security_notification_worker 2>&1
|
cd ${QUAYDIR:-"/"}
|
||||||
|
PYTHONPATH=$QUAYPATH venv/bin/python -m workers.security_notification_worker 2>&1
|
||||||
|
|
||||||
echo 'Security scanner notification worker exited'
|
echo 'Security scanner notification worker exited'
|
||||||
|
|
|
@ -2,7 +2,8 @@
|
||||||
|
|
||||||
echo 'Starting security scanner worker'
|
echo 'Starting security scanner worker'
|
||||||
|
|
||||||
cd /
|
QUAYPATH=${QUAYPATH:-"."}
|
||||||
venv/bin/python -m workers.securityworker 2>&1
|
cd ${QUAYDIR:-"/"}
|
||||||
|
PYTHONPATH=$QUAYPATH venv/bin/python -m workers.securityworker.securityworker 2>&1
|
||||||
|
|
||||||
echo 'Security scanner worker exited'
|
echo 'Security scanner worker exited'
|
||||||
|
|
|
@ -2,7 +2,8 @@
|
||||||
|
|
||||||
echo 'Starting service key worker'
|
echo 'Starting service key worker'
|
||||||
|
|
||||||
cd /
|
QUAYPATH=${QUAYPATH:-"."}
|
||||||
venv/bin/python -m workers.service_key_worker 2>&1
|
cd ${QUAYDIR:-"/"}
|
||||||
|
PYTHONPATH=$QUAYPATH venv/bin/python -m workers.service_key_worker 2>&1
|
||||||
|
|
||||||
echo 'Service key worker exited'
|
echo 'Service key worker exited'
|
||||||
|
|
|
@ -2,7 +2,8 @@
|
||||||
|
|
||||||
echo 'Starting storage replication worker'
|
echo 'Starting storage replication worker'
|
||||||
|
|
||||||
cd /
|
QUAYPATH=${QUAYPATH:-"."}
|
||||||
venv/bin/python -m workers.storagereplication 2>&1
|
cd ${QUAYDIR:-"/"}
|
||||||
|
PYTHONPATH=$QUAYPATH venv/bin/python -m workers.storagereplication 2>&1
|
||||||
|
|
||||||
echo 'Repository storage replication exited'
|
echo 'Repository storage replication exited'
|
|
@ -2,7 +2,8 @@
|
||||||
|
|
||||||
echo 'Starting team synchronization worker'
|
echo 'Starting team synchronization worker'
|
||||||
|
|
||||||
cd /
|
QUAYPATH=${QUAYPATH:-"."}
|
||||||
venv/bin/python -m workers.teamsyncworker 2>&1
|
cd ${QUAYDIR:-"/"}
|
||||||
|
PYTHONPATH=$QUAYPATH venv/bin/python -m workers.teamsyncworker 2>&1
|
||||||
|
|
||||||
echo 'Team synchronization worker exited'
|
echo 'Team synchronization worker exited'
|
|
@ -1,3 +1,4 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
cd ${QUAYDIR:-"/"}
|
||||||
|
|
||||||
/venv/bin/python /boot.py
|
venv/bin/python ${QUAYPATH:-"."}/boot.py
|
||||||
|
|
|
@ -2,8 +2,8 @@ jwtproxy:
|
||||||
signer_proxy:
|
signer_proxy:
|
||||||
enabled: true
|
enabled: true
|
||||||
listen_addr: :8080
|
listen_addr: :8080
|
||||||
ca_key_file: /conf/mitm.key
|
ca_key_file: {{ conf_dir }}/mitm.key
|
||||||
ca_crt_file: /conf/mitm.cert
|
ca_crt_file: {{ conf_dir }}/mitm.cert
|
||||||
|
|
||||||
signer:
|
signer:
|
||||||
issuer: quay
|
issuer: quay
|
||||||
|
@ -13,7 +13,7 @@ jwtproxy:
|
||||||
type: preshared
|
type: preshared
|
||||||
options:
|
options:
|
||||||
key_id: {{ key_id }}
|
key_id: {{ key_id }}
|
||||||
private_key_path: /conf/quay.pem
|
private_key_path: {{ conf_dir }}/quay.pem
|
||||||
verifier_proxies:
|
verifier_proxies:
|
||||||
- enabled: true
|
- enabled: true
|
||||||
listen_addr: unix:/tmp/jwtproxy_secscan.sock
|
listen_addr: unix:/tmp/jwtproxy_secscan.sock
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
[loggers]
|
[loggers]
|
||||||
keys=root
|
keys=root,gunicorn.error,gunicorn.access
|
||||||
|
|
||||||
[handlers]
|
[handlers]
|
||||||
keys=console
|
keys=console
|
||||||
|
|
||||||
[formatters]
|
[formatters]
|
||||||
keys=generic
|
keys=generic,json
|
||||||
|
|
||||||
[logger_root]
|
[logger_root]
|
||||||
level=INFO
|
level=INFO
|
||||||
|
@ -19,3 +19,18 @@ args=(sys.stdout, )
|
||||||
[formatter_generic]
|
[formatter_generic]
|
||||||
format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s
|
format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s
|
||||||
class=logging.Formatter
|
class=logging.Formatter
|
||||||
|
|
||||||
|
[formatter_json]
|
||||||
|
class=loghandler.JsonFormatter
|
||||||
|
|
||||||
|
[logger_gunicorn.error]
|
||||||
|
level=ERROR
|
||||||
|
handlers=console
|
||||||
|
propagate=0
|
||||||
|
qualname=gunicorn.error
|
||||||
|
|
||||||
|
[logger_gunicorn.access]
|
||||||
|
handlers=console
|
||||||
|
propagate=0
|
||||||
|
qualname=gunicorn.access
|
||||||
|
level=DEBUG
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
[loggers]
|
[loggers]
|
||||||
keys=root,boto
|
keys=root,boto,gunicorn.error,gunicorn.access
|
||||||
|
|
||||||
[handlers]
|
[handlers]
|
||||||
keys=console
|
keys=console
|
||||||
|
|
||||||
[formatters]
|
[formatters]
|
||||||
keys=generic
|
keys=generic,json
|
||||||
|
|
||||||
[logger_root]
|
[logger_root]
|
||||||
level=DEBUG
|
level=DEBUG
|
||||||
|
@ -16,11 +16,26 @@ level=INFO
|
||||||
handlers=console
|
handlers=console
|
||||||
qualname=boto
|
qualname=boto
|
||||||
|
|
||||||
|
[logger_gunicorn.access]
|
||||||
|
handlers=console
|
||||||
|
propagate=0
|
||||||
|
qualname=gunicorn.access
|
||||||
|
level=DEBUG
|
||||||
|
|
||||||
[handler_console]
|
[handler_console]
|
||||||
class=StreamHandler
|
class=StreamHandler
|
||||||
formatter=generic
|
formatter=generic
|
||||||
args=(sys.stdout, )
|
args=(sys.stdout, )
|
||||||
|
|
||||||
|
[logger_gunicorn.error]
|
||||||
|
level=ERROR
|
||||||
|
handlers=console
|
||||||
|
propagate=0
|
||||||
|
qualname=gunicorn.error
|
||||||
|
|
||||||
[formatter_generic]
|
[formatter_generic]
|
||||||
format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s
|
format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s
|
||||||
class=logging.Formatter
|
class=logging.Formatter
|
||||||
|
|
||||||
|
[formatter_json]
|
||||||
|
class=loghandler.JsonFormatter
|
||||||
|
|
41
conf/logging_debug_json.conf
Normal file
41
conf/logging_debug_json.conf
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
[loggers]
|
||||||
|
keys=root,boto,gunicorn.error,gunicorn.access
|
||||||
|
|
||||||
|
[handlers]
|
||||||
|
keys=console
|
||||||
|
|
||||||
|
[formatters]
|
||||||
|
keys=generic,json
|
||||||
|
|
||||||
|
[logger_root]
|
||||||
|
level=DEBUG
|
||||||
|
handlers=console
|
||||||
|
|
||||||
|
[logger_boto]
|
||||||
|
level=INFO
|
||||||
|
handlers=console
|
||||||
|
qualname=boto
|
||||||
|
|
||||||
|
[logger_gunicorn.access]
|
||||||
|
handlers=console
|
||||||
|
propagate=0
|
||||||
|
qualname=gunicorn.access
|
||||||
|
level=DEBUG
|
||||||
|
|
||||||
|
[handler_console]
|
||||||
|
class=StreamHandler
|
||||||
|
formatter=json
|
||||||
|
args=(sys.stdout, )
|
||||||
|
|
||||||
|
[logger_gunicorn.error]
|
||||||
|
level=ERROR
|
||||||
|
handlers=console
|
||||||
|
propagate=0
|
||||||
|
qualname=gunicorn.error
|
||||||
|
|
||||||
|
[formatter_generic]
|
||||||
|
format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s
|
||||||
|
class=logging.Formatter
|
||||||
|
|
||||||
|
[formatter_json]
|
||||||
|
class=loghandler.JsonFormatter
|
36
conf/logging_json.conf
Normal file
36
conf/logging_json.conf
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
[loggers]
|
||||||
|
keys=root,gunicorn.error,gunicorn.access
|
||||||
|
|
||||||
|
[handlers]
|
||||||
|
keys=console
|
||||||
|
|
||||||
|
[formatters]
|
||||||
|
keys=json,generic
|
||||||
|
|
||||||
|
[logger_root]
|
||||||
|
level=INFO
|
||||||
|
handlers=console
|
||||||
|
|
||||||
|
[handler_console]
|
||||||
|
class=StreamHandler
|
||||||
|
formatter=json
|
||||||
|
args=(sys.stdout, )
|
||||||
|
|
||||||
|
[formatter_generic]
|
||||||
|
format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s
|
||||||
|
class=logging.Formatter
|
||||||
|
|
||||||
|
[formatter_json]
|
||||||
|
class=loghandler.JsonFormatter
|
||||||
|
|
||||||
|
[logger_gunicorn.error]
|
||||||
|
level=ERROR
|
||||||
|
handlers=console
|
||||||
|
propagate=0
|
||||||
|
qualname=gunicorn.error
|
||||||
|
|
||||||
|
[logger_gunicorn.access]
|
||||||
|
handlers=console
|
||||||
|
propagate=0
|
||||||
|
qualname=gunicorn.access
|
||||||
|
level=DEBUG
|
|
@ -166,11 +166,11 @@ location /c1/ {
|
||||||
|
|
||||||
location /static/ {
|
location /static/ {
|
||||||
# checks for static file, if not found proxy to app
|
# checks for static file, if not found proxy to app
|
||||||
alias /static/;
|
alias {{static_dir}};
|
||||||
error_page 404 /404;
|
error_page 404 /404;
|
||||||
}
|
}
|
||||||
|
|
||||||
error_page 502 /static/502.html;
|
error_page 502 {{static_dir}}/502.html;
|
||||||
|
|
||||||
location ~ ^/b1/controller(/?)(.*) {
|
location ~ ^/b1/controller(/?)(.*) {
|
||||||
proxy_pass http://build_manager_controller_server/$2;
|
proxy_pass http://build_manager_controller_server/$2;
|
||||||
|
|
16
config.py
16
config.py
|
@ -3,6 +3,8 @@ from uuid import uuid4
|
||||||
import os.path
|
import os.path
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
|
from _init import ROOT_DIR, CONF_DIR
|
||||||
|
|
||||||
|
|
||||||
def build_requests_session():
|
def build_requests_session():
|
||||||
sess = requests.Session()
|
sess = requests.Session()
|
||||||
|
@ -45,7 +47,7 @@ class ImmutableConfig(object):
|
||||||
# Status tag config
|
# Status tag config
|
||||||
STATUS_TAGS = {}
|
STATUS_TAGS = {}
|
||||||
for tag_name in ['building', 'failed', 'none', 'ready', 'cancelled']:
|
for tag_name in ['building', 'failed', 'none', 'ready', 'cancelled']:
|
||||||
tag_path = os.path.join('buildstatus', tag_name + '.svg')
|
tag_path = os.path.join(ROOT_DIR, 'buildstatus', tag_name + '.svg')
|
||||||
with open(tag_path) as tag_svg:
|
with open(tag_path) as tag_svg:
|
||||||
STATUS_TAGS[tag_name] = tag_svg.read()
|
STATUS_TAGS[tag_name] = tag_svg.read()
|
||||||
|
|
||||||
|
@ -263,6 +265,10 @@ class DefaultConfig(ImmutableConfig):
|
||||||
# Feature Flag: Whether to enable support for App repositories.
|
# Feature Flag: Whether to enable support for App repositories.
|
||||||
FEATURE_APP_REGISTRY = False
|
FEATURE_APP_REGISTRY = False
|
||||||
|
|
||||||
|
# Feature Flag: If set to true, the _catalog endpoint returns public repositories. Otherwise,
|
||||||
|
# only private repositories can be returned.
|
||||||
|
FEATURE_PUBLIC_CATALOG = False
|
||||||
|
|
||||||
# The namespace to use for library repositories.
|
# The namespace to use for library repositories.
|
||||||
# Note: This must remain 'library' until Docker removes their hard-coded namespace for libraries.
|
# Note: This must remain 'library' until Docker removes their hard-coded namespace for libraries.
|
||||||
# See: https://github.com/docker/docker/blob/master/registry/session.go#L320
|
# See: https://github.com/docker/docker/blob/master/registry/session.go#L320
|
||||||
|
@ -296,7 +302,7 @@ class DefaultConfig(ImmutableConfig):
|
||||||
# System logs.
|
# System logs.
|
||||||
SYSTEM_LOGS_PATH = "/var/log/"
|
SYSTEM_LOGS_PATH = "/var/log/"
|
||||||
SYSTEM_LOGS_FILE = "/var/log/syslog"
|
SYSTEM_LOGS_FILE = "/var/log/syslog"
|
||||||
SYSTEM_SERVICES_PATH = "conf/init/service/"
|
SYSTEM_SERVICES_PATH = os.path.join(CONF_DIR, "init/service/")
|
||||||
|
|
||||||
# Allow registry pulls when unable to write to the audit log
|
# Allow registry pulls when unable to write to the audit log
|
||||||
ALLOW_PULLS_WITHOUT_STRICT_LOGGING = False
|
ALLOW_PULLS_WITHOUT_STRICT_LOGGING = False
|
||||||
|
@ -349,7 +355,7 @@ class DefaultConfig(ImmutableConfig):
|
||||||
SECURITY_SCANNER_READONLY_FAILOVER_ENDPOINTS = []
|
SECURITY_SCANNER_READONLY_FAILOVER_ENDPOINTS = []
|
||||||
|
|
||||||
# The indexing engine version running inside the security scanner.
|
# The indexing engine version running inside the security scanner.
|
||||||
SECURITY_SCANNER_ENGINE_VERSION_TARGET = 2
|
SECURITY_SCANNER_ENGINE_VERSION_TARGET = 3
|
||||||
|
|
||||||
# The version of the API to use for the security scanner.
|
# The version of the API to use for the security scanner.
|
||||||
SECURITY_SCANNER_API_VERSION = 'v1'
|
SECURITY_SCANNER_API_VERSION = 'v1'
|
||||||
|
@ -400,11 +406,11 @@ class DefaultConfig(ImmutableConfig):
|
||||||
INSTANCE_SERVICE_KEY_SERVICE = 'quay'
|
INSTANCE_SERVICE_KEY_SERVICE = 'quay'
|
||||||
|
|
||||||
# The location of the key ID file generated for this instance.
|
# The location of the key ID file generated for this instance.
|
||||||
INSTANCE_SERVICE_KEY_KID_LOCATION = 'conf/quay.kid'
|
INSTANCE_SERVICE_KEY_KID_LOCATION = os.path.join(CONF_DIR, 'quay.kid')
|
||||||
|
|
||||||
# The location of the private key generated for this instance.
|
# The location of the private key generated for this instance.
|
||||||
# NOTE: If changed, jwtproxy_conf.yaml.jnj must also be updated.
|
# NOTE: If changed, jwtproxy_conf.yaml.jnj must also be updated.
|
||||||
INSTANCE_SERVICE_KEY_LOCATION = 'conf/quay.pem'
|
INSTANCE_SERVICE_KEY_LOCATION = os.path.join(CONF_DIR, 'quay.pem')
|
||||||
|
|
||||||
# This instance's service key expiration in minutes.
|
# This instance's service key expiration in minutes.
|
||||||
INSTANCE_SERVICE_KEY_EXPIRATION = 120
|
INSTANCE_SERVICE_KEY_EXPIRATION = 120
|
||||||
|
|
|
@ -1006,6 +1006,7 @@ class RepositoryNotification(BaseModel):
|
||||||
title = CharField(null=True)
|
title = CharField(null=True)
|
||||||
config_json = TextField()
|
config_json = TextField()
|
||||||
event_config_json = TextField(default='{}')
|
event_config_json = TextField(default='{}')
|
||||||
|
number_of_failures = IntegerField(default=0)
|
||||||
|
|
||||||
|
|
||||||
class RepositoryAuthorizedEmail(BaseModel):
|
class RepositoryAuthorizedEmail(BaseModel):
|
||||||
|
|
|
@ -10,8 +10,11 @@ from six import add_metaclass
|
||||||
from app import storage, authentication
|
from app import storage, authentication
|
||||||
from data import model, oci_model
|
from data import model, oci_model
|
||||||
from data.database import Tag, Manifest, MediaType, Blob, Repository, Channel
|
from data.database import Tag, Manifest, MediaType, Blob, Repository, Channel
|
||||||
|
from util.audit import track_and_log
|
||||||
|
from util.morecollections import AttrDict
|
||||||
from util.names import parse_robot_username
|
from util.names import parse_robot_username
|
||||||
|
|
||||||
|
|
||||||
class BlobDescriptor(namedtuple('Blob', ['mediaType', 'size', 'digest', 'urls'])):
|
class BlobDescriptor(namedtuple('Blob', ['mediaType', 'size', 'digest', 'urls'])):
|
||||||
""" BlobDescriptor describes a blob with its mediatype, size and digest.
|
""" BlobDescriptor describes a blob with its mediatype, size and digest.
|
||||||
A BlobDescriptor is used to retrieves the actual blob.
|
A BlobDescriptor is used to retrieves the actual blob.
|
||||||
|
@ -55,10 +58,6 @@ class AppRegistryDataInterface(object):
|
||||||
""" Interface that represents all data store interactions required by a App Registry.
|
""" Interface that represents all data store interactions required by a App Registry.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def _application(self, package_name):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def list_applications(self, namespace=None, media_type=None, search=None, username=None,
|
def list_applications(self, namespace=None, media_type=None, search=None, username=None,
|
||||||
with_channels=False):
|
with_channels=False):
|
||||||
|
@ -175,6 +174,11 @@ class AppRegistryDataInterface(object):
|
||||||
Raises: ChannelNotFound, PackageNotFound
|
Raises: ChannelNotFound, PackageNotFound
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def log_action(self, event_name, namespace_name, repo_name=None, analytics_name=None,
|
||||||
|
analytics_sample=1, **kwargs):
|
||||||
|
""" Logs an action to the audit log. """
|
||||||
|
|
||||||
|
|
||||||
def _split_package_name(package):
|
def _split_package_name(package):
|
||||||
""" Returns the namespace and package-name """
|
""" Returns the namespace and package-name """
|
||||||
|
@ -200,6 +204,22 @@ class OCIAppModel(AppRegistryDataInterface):
|
||||||
raise_package_not_found(package)
|
raise_package_not_found(package)
|
||||||
return repo
|
return repo
|
||||||
|
|
||||||
|
def log_action(self, event_name, namespace_name, repo_name=None, analytics_name=None,
|
||||||
|
analytics_sample=1, metadata=None):
|
||||||
|
metadata = {} if metadata is None else metadata
|
||||||
|
|
||||||
|
repo = None
|
||||||
|
if repo_name is not None:
|
||||||
|
db_repo = model.repository.get_repository(namespace_name, repo_name,
|
||||||
|
kind_filter='application')
|
||||||
|
repo = AttrDict({
|
||||||
|
'id': db_repo.id,
|
||||||
|
'name': db_repo.name,
|
||||||
|
'namespace_name': db_repo.namespace_user.username,
|
||||||
|
})
|
||||||
|
track_and_log(event_name, repo, analytics_name=analytics_name,
|
||||||
|
analytics_sample=analytics_sample, **metadata)
|
||||||
|
|
||||||
def list_applications(self, namespace=None, media_type=None, search=None, username=None,
|
def list_applications(self, namespace=None, media_type=None, search=None, username=None,
|
||||||
with_channels=False):
|
with_channels=False):
|
||||||
""" Lists all repositories that contain applications, with optional filtering to a specific
|
""" Lists all repositories that contain applications, with optional filtering to a specific
|
||||||
|
@ -248,7 +268,7 @@ class OCIAppModel(AppRegistryDataInterface):
|
||||||
def create_application(self, package_name, visibility, owner):
|
def create_application(self, package_name, visibility, owner):
|
||||||
""" Create a new app repository, owner is the user who creates it """
|
""" Create a new app repository, owner is the user who creates it """
|
||||||
ns, name = _split_package_name(package_name)
|
ns, name = _split_package_name(package_name)
|
||||||
model.repository.create_repository(ns, name, owner, visibility, "application")
|
model.repository.create_repository(ns, name, owner, visibility, 'application')
|
||||||
|
|
||||||
def application_exists(self, package_name):
|
def application_exists(self, package_name):
|
||||||
""" Create a new app repository, owner is the user who creates it """
|
""" Create a new app repository, owner is the user who creates it """
|
||||||
|
|
|
@ -0,0 +1,32 @@
|
||||||
|
"""add notification number of failures column
|
||||||
|
|
||||||
|
Revision ID: dc4af11a5f90
|
||||||
|
Revises: 53e2ac668296
|
||||||
|
Create Date: 2017-05-16 17:24:02.630365
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
# revision identifiers, used by Alembic.
|
||||||
|
revision = 'dc4af11a5f90'
|
||||||
|
down_revision = '53e2ac668296'
|
||||||
|
|
||||||
|
import sqlalchemy as sa
|
||||||
|
from alembic import op
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(tables):
|
||||||
|
op.add_column('repositorynotification', sa.Column('number_of_failures',
|
||||||
|
sa.Integer(),
|
||||||
|
nullable=False,
|
||||||
|
server_default='0'))
|
||||||
|
op.bulk_insert(tables.logentrykind, [
|
||||||
|
{'name': 'reset_repo_notification'},
|
||||||
|
])
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(tables):
|
||||||
|
op.drop_column('repositorynotification', 'number_of_failures')
|
||||||
|
op.execute(tables
|
||||||
|
.logentrykind
|
||||||
|
.delete()
|
||||||
|
.where(tables.logentrykind.c.name == op.inline_literal('reset_repo_notification')))
|
|
@ -54,9 +54,13 @@ def get_public_repo_visibility():
|
||||||
return Visibility.get(name='public')
|
return Visibility.get(name='public')
|
||||||
|
|
||||||
|
|
||||||
@lru_cache(maxsize=3)
|
|
||||||
def _lookup_team_role(name):
|
def _lookup_team_role(name):
|
||||||
return TeamRole.get(name=name)
|
return _lookup_team_roles()[name]
|
||||||
|
|
||||||
|
|
||||||
|
@lru_cache(maxsize=1)
|
||||||
|
def _lookup_team_roles():
|
||||||
|
return {role.name:role for role in TeamRole.select()}
|
||||||
|
|
||||||
|
|
||||||
def filter_to_repos_for_user(query, username=None, namespace=None, repo_kind='image',
|
def filter_to_repos_for_user(query, username=None, namespace=None, repo_kind='image',
|
||||||
|
|
|
@ -88,6 +88,17 @@ def get_stale_blob_upload(stale_timespan):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def get_blob_upload_by_uuid(upload_uuid):
|
||||||
|
""" Loads the upload with the given UUID, if any. """
|
||||||
|
try:
|
||||||
|
return (BlobUpload
|
||||||
|
.select()
|
||||||
|
.where(BlobUpload.uuid == upload_uuid)
|
||||||
|
.get())
|
||||||
|
except BlobUpload.DoesNotExist:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
def get_blob_upload(namespace, repo_name, upload_uuid):
|
def get_blob_upload(namespace, repo_name, upload_uuid):
|
||||||
""" Load the upload which is already in progress.
|
""" Load the upload which is already in progress.
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -62,7 +62,7 @@ def create_manifest_label(tag_manifest, key, value, source_type_name, media_type
|
||||||
|
|
||||||
media_type_id = _get_media_type_id(media_type_name)
|
media_type_id = _get_media_type_id(media_type_name)
|
||||||
if media_type_id is None:
|
if media_type_id is None:
|
||||||
raise InvalidMediaTypeException
|
raise InvalidMediaTypeException()
|
||||||
|
|
||||||
source_type_id = _get_label_source_type_id(source_type_name)
|
source_type_id = _get_label_source_type_id(source_type_name)
|
||||||
|
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
import json
|
import json
|
||||||
|
|
||||||
from data.model import InvalidNotificationException, db_transaction
|
|
||||||
from data.database import (Notification, NotificationKind, User, Team, TeamMember, TeamRole,
|
from data.database import (Notification, NotificationKind, User, Team, TeamMember, TeamRole,
|
||||||
RepositoryNotification, ExternalNotificationEvent, Repository,
|
RepositoryNotification, ExternalNotificationEvent, Repository,
|
||||||
ExternalNotificationMethod, Namespace)
|
ExternalNotificationMethod, Namespace, db_for_update)
|
||||||
|
from data.model import InvalidNotificationException, db_transaction
|
||||||
|
|
||||||
|
|
||||||
def create_notification(kind_name, target, metadata={}, lookup_path=None):
|
def create_notification(kind_name, target, metadata={}, lookup_path=None):
|
||||||
|
@ -125,6 +125,30 @@ def delete_matching_notifications(target, kind_name, **kwargs):
|
||||||
notification.delete_instance()
|
notification.delete_instance()
|
||||||
|
|
||||||
|
|
||||||
|
def increment_notification_failure_count(notification_id):
|
||||||
|
""" This increments the number of failures by one """
|
||||||
|
RepositoryNotification.update(number_of_failures=RepositoryNotification.number_of_failures + 1).where(
|
||||||
|
RepositoryNotification.id == notification_id).execute()
|
||||||
|
|
||||||
|
|
||||||
|
def reset_notification_number_of_failures(namespace_name, repository_name, uuid):
|
||||||
|
""" This resets the number of failures for a repo notification to 0 """
|
||||||
|
try:
|
||||||
|
notification = RepositoryNotification.select().where(RepositoryNotification.uuid == uuid).get()
|
||||||
|
if (notification.repository.namespace_user.username != namespace_name or
|
||||||
|
notification.repository.name != repository_name):
|
||||||
|
raise InvalidNotificationException('No repository notification found with uuid: %s' % uuid)
|
||||||
|
reset_number_of_failures_to_zero(notification.id)
|
||||||
|
return notification
|
||||||
|
except RepositoryNotification.DoesNotExist:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def reset_number_of_failures_to_zero(notification_id):
|
||||||
|
""" This resets the number of failures for a repo notification to 0 """
|
||||||
|
RepositoryNotification.update(number_of_failures=0).where(RepositoryNotification.id == notification_id).execute()
|
||||||
|
|
||||||
|
|
||||||
def create_repo_notification(repo, event_name, method_name, method_config, event_config, title=None):
|
def create_repo_notification(repo, event_name, method_name, method_config, event_config, title=None):
|
||||||
event = ExternalNotificationEvent.get(ExternalNotificationEvent.name == event_name)
|
event = ExternalNotificationEvent.get(ExternalNotificationEvent.name == event_name)
|
||||||
method = ExternalNotificationMethod.get(ExternalNotificationMethod.name == method_name)
|
method = ExternalNotificationMethod.get(ExternalNotificationMethod.name == method_name)
|
||||||
|
@ -134,23 +158,34 @@ def create_repo_notification(repo, event_name, method_name, method_config, event
|
||||||
event_config_json=json.dumps(event_config))
|
event_config_json=json.dumps(event_config))
|
||||||
|
|
||||||
|
|
||||||
|
def _base_get_notification(uuid):
|
||||||
|
""" This is a base query for get statements """
|
||||||
|
return (RepositoryNotification
|
||||||
|
.select(RepositoryNotification, Repository, Namespace)
|
||||||
|
.join(Repository)
|
||||||
|
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
|
||||||
|
.where(RepositoryNotification.uuid == uuid))
|
||||||
|
|
||||||
|
|
||||||
|
def get_enabled_notification(uuid):
|
||||||
|
""" This returns a notification with less than 3 failures """
|
||||||
|
try:
|
||||||
|
return _base_get_notification(uuid).where(RepositoryNotification.number_of_failures < 3).get()
|
||||||
|
except RepositoryNotification.DoesNotExist:
|
||||||
|
raise InvalidNotificationException('No repository notification found with uuid: %s' % uuid)
|
||||||
|
|
||||||
|
|
||||||
def get_repo_notification(uuid):
|
def get_repo_notification(uuid):
|
||||||
try:
|
try:
|
||||||
return (RepositoryNotification
|
return _base_get_notification(uuid).get()
|
||||||
.select(RepositoryNotification, Repository, Namespace)
|
|
||||||
.join(Repository)
|
|
||||||
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
|
|
||||||
.where(RepositoryNotification.uuid == uuid)
|
|
||||||
.get())
|
|
||||||
except RepositoryNotification.DoesNotExist:
|
except RepositoryNotification.DoesNotExist:
|
||||||
raise InvalidNotificationException('No repository notification found with id: %s' % uuid)
|
raise InvalidNotificationException('No repository notification found with uuid: %s' % uuid)
|
||||||
|
|
||||||
|
|
||||||
def delete_repo_notification(namespace_name, repository_name, uuid):
|
def delete_repo_notification(namespace_name, repository_name, uuid):
|
||||||
found = get_repo_notification(uuid)
|
found = get_repo_notification(uuid)
|
||||||
if (found.repository.namespace_user.username != namespace_name or
|
if found.repository.namespace_user.username != namespace_name or found.repository.name != repository_name:
|
||||||
found.repository.name != repository_name):
|
raise InvalidNotificationException('No repository notifiation found with uuid: %s' % uuid)
|
||||||
raise InvalidNotificationException('No repository notifiation found with id: %s' % uuid)
|
|
||||||
found.delete_instance()
|
found.delete_instance()
|
||||||
return found
|
return found
|
||||||
|
|
||||||
|
|
|
@ -106,15 +106,28 @@ def remove_organization_member(org, user_obj):
|
||||||
TeamMember.delete().where(TeamMember.id << members).execute()
|
TeamMember.delete().where(TeamMember.id << members).execute()
|
||||||
|
|
||||||
|
|
||||||
def get_organization_member_set(orgname):
|
def get_organization_member_set(org, include_robots=False, users_filter=None):
|
||||||
|
""" Returns the set of all member usernames under the given organization, with optional
|
||||||
|
filtering by robots and/or by a specific set of User objects.
|
||||||
|
"""
|
||||||
Org = User.alias()
|
Org = User.alias()
|
||||||
org_users = (User
|
org_users = (User
|
||||||
.select(User.username)
|
.select(User.username)
|
||||||
.join(TeamMember)
|
.join(TeamMember)
|
||||||
.join(Team)
|
.join(Team)
|
||||||
.join(Org, on=(Org.id == Team.organization))
|
.where(Team.organization == org)
|
||||||
.where(Org.username == orgname)
|
|
||||||
.distinct())
|
.distinct())
|
||||||
|
|
||||||
|
if not include_robots:
|
||||||
|
org_users = org_users.where(User.robot == False)
|
||||||
|
|
||||||
|
if users_filter is not None:
|
||||||
|
ids_list = [u.id for u in users_filter if u is not None]
|
||||||
|
if not ids_list:
|
||||||
|
return set()
|
||||||
|
|
||||||
|
org_users = org_users.where(User.id << ids_list)
|
||||||
|
|
||||||
return {user.username for user in org_users}
|
return {user.username for user in org_users}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -181,45 +181,59 @@ def garbage_collect_repo(repo, extra_candidate_set=None):
|
||||||
logger.debug('No candidate images for GC for repo: %s', repo.id)
|
logger.debug('No candidate images for GC for repo: %s', repo.id)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
candidates_orphans = list(candidate_orphan_image_set)
|
all_images_removed = set()
|
||||||
|
all_storage_id_whitelist = set()
|
||||||
|
all_unreferenced_candidates = set()
|
||||||
|
|
||||||
with db_transaction():
|
# Remove any images directly referenced by tags, to prune the working set.
|
||||||
Candidate = Image.alias()
|
direct_referenced = (RepositoryTag
|
||||||
Tagged = Image.alias()
|
.select(RepositoryTag.image)
|
||||||
ancestor_superset = Tagged.ancestors ** db_concat_func(Candidate.ancestors, Candidate.id, '/%')
|
.where(RepositoryTag.repository == repo.id,
|
||||||
|
RepositoryTag.image << list(candidate_orphan_image_set)))
|
||||||
|
candidate_orphan_image_set.difference_update([t.image_id for t in direct_referenced])
|
||||||
|
|
||||||
# We are going to compute all images which are being referenced in two ways:
|
# Iteratively try to remove images from the database. The only images we can remove are those
|
||||||
# First, we will find all images which have their ancestor paths appear in
|
# that are not referenced by tags AND not the parents of other images. We continue removing images
|
||||||
# another image. Secondly, we union in all of the candidate images which are
|
# until no changes are found.
|
||||||
# directly referenced by a tag. This can be used in a subquery to directly
|
iteration = 0
|
||||||
# find which candidates are being referenced without any client side
|
making_progress = True
|
||||||
# computation or extra round trips.
|
while candidate_orphan_image_set and making_progress:
|
||||||
ancestor_referenced = (Candidate
|
iteration = iteration + 1
|
||||||
.select(Candidate.id)
|
logger.debug('Starting iteration #%s for GC of repository %s with candidates: %s', iteration,
|
||||||
.join(Tagged, on=ancestor_superset)
|
repo.id, candidate_orphan_image_set)
|
||||||
.join(RepositoryTag, on=(Tagged.id == RepositoryTag.image))
|
candidates_orphans = list(candidate_orphan_image_set)
|
||||||
|
|
||||||
|
with db_transaction():
|
||||||
|
# Any image directly referenced by a tag that still exists, cannot be GCed.
|
||||||
|
direct_referenced = (RepositoryTag
|
||||||
|
.select(RepositoryTag.image)
|
||||||
.where(RepositoryTag.repository == repo.id,
|
.where(RepositoryTag.repository == repo.id,
|
||||||
Candidate.id << candidates_orphans))
|
RepositoryTag.image << candidates_orphans))
|
||||||
|
|
||||||
direct_referenced = (RepositoryTag
|
# Any image which is the parent of another image, cannot be GCed.
|
||||||
.select(RepositoryTag.image)
|
parent_referenced = (Image
|
||||||
.where(RepositoryTag.repository == repo.id,
|
.select(Image.parent)
|
||||||
RepositoryTag.image << candidates_orphans))
|
.where(Image.repository == repo.id,
|
||||||
|
Image.parent << candidates_orphans))
|
||||||
|
|
||||||
referenced_candidates = (direct_referenced | ancestor_referenced)
|
referenced_candidates = (direct_referenced | parent_referenced)
|
||||||
|
|
||||||
# We desire a few pieces of information from the database from the following
|
# We desire a few pieces of information from the database from the following
|
||||||
# query: all of the image ids which are associated with this repository,
|
# query: all of the image ids which are associated with this repository,
|
||||||
# and the storages which are associated with those images.
|
# and the storages which are associated with those images.
|
||||||
unreferenced_candidates = (Image
|
unreferenced_candidates = (Image
|
||||||
.select(Image.id, Image.docker_image_id,
|
.select(Image.id, Image.docker_image_id,
|
||||||
ImageStorage.id, ImageStorage.uuid)
|
ImageStorage.id, ImageStorage.uuid)
|
||||||
.join(ImageStorage)
|
.join(ImageStorage)
|
||||||
.where(Image.id << candidates_orphans,
|
.where(Image.id << candidates_orphans,
|
||||||
~(Image.id << referenced_candidates)))
|
~(Image.id << referenced_candidates)))
|
||||||
|
|
||||||
|
image_ids_to_remove = [candidate.id for candidate in unreferenced_candidates]
|
||||||
|
making_progress = bool(len(image_ids_to_remove))
|
||||||
|
if len(image_ids_to_remove) == 0:
|
||||||
|
# No more candidates to remove.
|
||||||
|
break
|
||||||
|
|
||||||
image_ids_to_remove = [candidate.id for candidate in unreferenced_candidates]
|
|
||||||
if len(image_ids_to_remove) > 0:
|
|
||||||
logger.info('Cleaning up unreferenced images: %s', image_ids_to_remove)
|
logger.info('Cleaning up unreferenced images: %s', image_ids_to_remove)
|
||||||
storage_id_whitelist = set([candidate.storage_id for candidate in unreferenced_candidates])
|
storage_id_whitelist = set([candidate.storage_id for candidate in unreferenced_candidates])
|
||||||
|
|
||||||
|
@ -249,15 +263,22 @@ def garbage_collect_repo(repo, extra_candidate_set=None):
|
||||||
logger.info('Could not GC images %s; will try again soon', image_ids_to_remove)
|
logger.info('Could not GC images %s; will try again soon', image_ids_to_remove)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
# Add the images to the removed set and remove them from the candidate set.
|
||||||
|
all_images_removed.update(image_ids_to_remove)
|
||||||
|
all_storage_id_whitelist.update(storage_id_whitelist)
|
||||||
|
all_unreferenced_candidates.update(unreferenced_candidates)
|
||||||
|
|
||||||
|
candidate_orphan_image_set.difference_update(image_ids_to_remove)
|
||||||
|
|
||||||
# If any images were removed, GC any orphaned storages.
|
# If any images were removed, GC any orphaned storages.
|
||||||
if len(image_ids_to_remove) > 0:
|
if len(all_images_removed) > 0:
|
||||||
logger.info('Garbage collecting storage for images: %s', image_ids_to_remove)
|
logger.info('Garbage collecting storage for images: %s', all_images_removed)
|
||||||
storage_ids_removed = set(storage.garbage_collect_storage(storage_id_whitelist))
|
storage_ids_removed = set(storage.garbage_collect_storage(all_storage_id_whitelist))
|
||||||
|
|
||||||
# If any storages were removed and cleanup callbacks are registered, call them with
|
# If any storages were removed and cleanup callbacks are registered, call them with
|
||||||
# the images+storages removed.
|
# the images+storages removed.
|
||||||
if storage_ids_removed and config.image_cleanup_callbacks:
|
if storage_ids_removed and config.image_cleanup_callbacks:
|
||||||
image_storages_removed = [candidate for candidate in unreferenced_candidates
|
image_storages_removed = [candidate for candidate in all_unreferenced_candidates
|
||||||
if candidate.storage_id in storage_ids_removed]
|
if candidate.storage_id in storage_ids_removed]
|
||||||
for callback in config.image_cleanup_callbacks:
|
for callback in config.image_cleanup_callbacks:
|
||||||
callback(image_storages_removed)
|
callback(image_storages_removed)
|
||||||
|
@ -616,3 +637,14 @@ def list_popular_public_repos(action_count_threshold, time_span, repo_kind='imag
|
||||||
.group_by(RepositoryActionCount.repository, Repository.name, Namespace.username)
|
.group_by(RepositoryActionCount.repository, Repository.name, Namespace.username)
|
||||||
.having(fn.Sum(RepositoryActionCount.count) >= action_count_threshold)
|
.having(fn.Sum(RepositoryActionCount.count) >= action_count_threshold)
|
||||||
.tuples())
|
.tuples())
|
||||||
|
|
||||||
|
|
||||||
|
def is_empty(namespace_name, repository_name):
|
||||||
|
""" Returns if the repository referenced by the given namespace and name is empty. If the repo
|
||||||
|
doesn't exist, returns True.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
tag.list_repository_tags(namespace_name, repository_name).limit(1).get()
|
||||||
|
return False
|
||||||
|
except RepositoryTag.DoesNotExist:
|
||||||
|
return True
|
||||||
|
|
622
data/model/test/test_gc.py
Normal file
622
data/model/test/test_gc.py
Normal file
|
@ -0,0 +1,622 @@
|
||||||
|
import hashlib
|
||||||
|
import pytest
|
||||||
|
import time
|
||||||
|
|
||||||
|
from mock import patch
|
||||||
|
|
||||||
|
from app import storage
|
||||||
|
from contextlib import contextmanager
|
||||||
|
from playhouse.test_utils import assert_query_count
|
||||||
|
|
||||||
|
from data import model, database
|
||||||
|
from data.database import Image, ImageStorage, DerivedStorageForImage, Label, TagManifestLabel, Blob
|
||||||
|
from test.fixtures import *
|
||||||
|
|
||||||
|
|
||||||
|
ADMIN_ACCESS_USER = 'devtable'
|
||||||
|
PUBLIC_USER = 'public'
|
||||||
|
|
||||||
|
REPO = 'somerepo'
|
||||||
|
|
||||||
|
def _set_tag_expiration_policy(namespace, expiration_s):
|
||||||
|
namespace_user = model.user.get_user(namespace)
|
||||||
|
model.user.change_user_tag_expiration(namespace_user, expiration_s)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture()
|
||||||
|
def default_tag_policy(initialized_db):
|
||||||
|
_set_tag_expiration_policy(ADMIN_ACCESS_USER, 0)
|
||||||
|
_set_tag_expiration_policy(PUBLIC_USER, 0)
|
||||||
|
|
||||||
|
|
||||||
|
def create_image(docker_image_id, repository_obj, username):
|
||||||
|
preferred = storage.preferred_locations[0]
|
||||||
|
image = model.image.find_create_or_link_image(docker_image_id, repository_obj, username, {},
|
||||||
|
preferred)
|
||||||
|
image.storage.uploading = False
|
||||||
|
image.storage.save()
|
||||||
|
|
||||||
|
# Create derived images as well.
|
||||||
|
model.image.find_or_create_derived_storage(image, 'squash', preferred)
|
||||||
|
model.image.find_or_create_derived_storage(image, 'aci', preferred)
|
||||||
|
|
||||||
|
# Add some torrent info.
|
||||||
|
try:
|
||||||
|
database.TorrentInfo.get(storage=image.storage)
|
||||||
|
except database.TorrentInfo.DoesNotExist:
|
||||||
|
model.storage.save_torrent_info(image.storage, 1, 'helloworld')
|
||||||
|
|
||||||
|
# Add some additional placements to the image.
|
||||||
|
for location_name in ['local_eu']:
|
||||||
|
location = database.ImageStorageLocation.get(name=location_name)
|
||||||
|
|
||||||
|
try:
|
||||||
|
database.ImageStoragePlacement.get(location=location, storage=image.storage)
|
||||||
|
except:
|
||||||
|
continue
|
||||||
|
|
||||||
|
database.ImageStoragePlacement.create(location=location, storage=image.storage)
|
||||||
|
|
||||||
|
return image.storage
|
||||||
|
|
||||||
|
|
||||||
|
def create_repository(namespace=ADMIN_ACCESS_USER, name=REPO, **kwargs):
|
||||||
|
user = model.user.get_user(namespace)
|
||||||
|
repo = model.repository.create_repository(namespace, name, user)
|
||||||
|
|
||||||
|
# Populate the repository with the tags.
|
||||||
|
image_map = {}
|
||||||
|
for tag_name in kwargs:
|
||||||
|
image_ids = kwargs[tag_name]
|
||||||
|
parent = None
|
||||||
|
|
||||||
|
for image_id in image_ids:
|
||||||
|
if not image_id in image_map:
|
||||||
|
image_map[image_id] = create_image(image_id, repo, namespace)
|
||||||
|
|
||||||
|
v1_metadata = {
|
||||||
|
'id': image_id,
|
||||||
|
}
|
||||||
|
if parent is not None:
|
||||||
|
v1_metadata['parent'] = parent.docker_image_id
|
||||||
|
|
||||||
|
# Set the ancestors for the image.
|
||||||
|
parent = model.image.set_image_metadata(image_id, namespace, name, '', '', '', v1_metadata,
|
||||||
|
parent=parent)
|
||||||
|
|
||||||
|
# Set the tag for the image.
|
||||||
|
tag_manifest, _ = model.tag.store_tag_manifest(namespace, name, tag_name, image_ids[-1],
|
||||||
|
'sha:someshahere', '{}')
|
||||||
|
|
||||||
|
# Add some labels to the tag.
|
||||||
|
model.label.create_manifest_label(tag_manifest, 'foo', 'bar', 'manifest')
|
||||||
|
model.label.create_manifest_label(tag_manifest, 'meh', 'grah', 'manifest')
|
||||||
|
|
||||||
|
return repo
|
||||||
|
|
||||||
|
|
||||||
|
def gc_now(repository):
|
||||||
|
assert model.repository.garbage_collect_repo(repository)
|
||||||
|
|
||||||
|
|
||||||
|
def delete_tag(repository, tag, perform_gc=True):
|
||||||
|
model.tag.delete_tag(repository.namespace_user.username, repository.name, tag)
|
||||||
|
if perform_gc:
|
||||||
|
assert model.repository.garbage_collect_repo(repository)
|
||||||
|
|
||||||
|
|
||||||
|
def move_tag(repository, tag, docker_image_id):
|
||||||
|
model.tag.create_or_update_tag(repository.namespace_user.username, repository.name, tag,
|
||||||
|
docker_image_id)
|
||||||
|
assert model.repository.garbage_collect_repo(repository)
|
||||||
|
|
||||||
|
|
||||||
|
def assert_not_deleted(repository, *args):
|
||||||
|
for docker_image_id in args:
|
||||||
|
assert model.image.get_image_by_id(repository.namespace_user.username, repository.name,
|
||||||
|
docker_image_id)
|
||||||
|
|
||||||
|
|
||||||
|
def assert_deleted(repository, *args):
|
||||||
|
for docker_image_id in args:
|
||||||
|
try:
|
||||||
|
# Verify the image is missing when accessed by the repository.
|
||||||
|
model.image.get_image_by_id(repository.namespace_user.username, repository.name,
|
||||||
|
docker_image_id)
|
||||||
|
except model.DataModelException:
|
||||||
|
return
|
||||||
|
|
||||||
|
assert False, 'Expected image %s to be deleted' % docker_image_id
|
||||||
|
|
||||||
|
|
||||||
|
def _get_dangling_storage_count():
|
||||||
|
storage_ids = set([current.id for current in ImageStorage.select()])
|
||||||
|
referenced_by_image = set([image.storage_id for image in Image.select()])
|
||||||
|
referenced_by_derived = set([derived.derivative_id
|
||||||
|
for derived in DerivedStorageForImage.select()])
|
||||||
|
|
||||||
|
return len(storage_ids - referenced_by_image - referenced_by_derived)
|
||||||
|
|
||||||
|
|
||||||
|
def _get_dangling_label_count():
|
||||||
|
label_ids = set([current.id for current in Label.select()])
|
||||||
|
referenced_by_manifest = set([mlabel.label_id for mlabel in TagManifestLabel.select()])
|
||||||
|
return len(label_ids - referenced_by_manifest)
|
||||||
|
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def assert_gc_integrity(expect_storage_removed=True):
|
||||||
|
""" Specialized assertion for ensuring that GC cleans up all dangling storages
|
||||||
|
and labels, invokes the callback for images removed and doesn't invoke the
|
||||||
|
callback for images *not* removed.
|
||||||
|
"""
|
||||||
|
# Add a callback for when images are removed.
|
||||||
|
removed_image_storages = []
|
||||||
|
model.config.register_image_cleanup_callback(removed_image_storages.extend)
|
||||||
|
|
||||||
|
# Store the number of dangling storages and labels.
|
||||||
|
existing_storage_count = _get_dangling_storage_count()
|
||||||
|
existing_label_count = _get_dangling_label_count()
|
||||||
|
yield
|
||||||
|
|
||||||
|
# Ensure the number of dangling storages and labels has not changed.
|
||||||
|
updated_storage_count = _get_dangling_storage_count()
|
||||||
|
assert updated_storage_count == existing_storage_count
|
||||||
|
|
||||||
|
updated_label_count = _get_dangling_label_count()
|
||||||
|
assert updated_label_count == existing_label_count
|
||||||
|
|
||||||
|
# Ensure that for each call to the image+storage cleanup callback, the image and its
|
||||||
|
# storage is not found *anywhere* in the database.
|
||||||
|
for removed_image_and_storage in removed_image_storages:
|
||||||
|
with pytest.raises(Image.DoesNotExist):
|
||||||
|
Image.get(id=removed_image_and_storage.id)
|
||||||
|
|
||||||
|
with pytest.raises(ImageStorage.DoesNotExist):
|
||||||
|
ImageStorage.get(id=removed_image_and_storage.storage_id)
|
||||||
|
|
||||||
|
with pytest.raises(ImageStorage.DoesNotExist):
|
||||||
|
ImageStorage.get(uuid=removed_image_and_storage.storage.uuid)
|
||||||
|
|
||||||
|
assert expect_storage_removed == bool(removed_image_storages)
|
||||||
|
|
||||||
|
# Ensure all CAS storage is in the storage engine.
|
||||||
|
preferred = storage.preferred_locations[0]
|
||||||
|
for storage_row in ImageStorage.select():
|
||||||
|
if storage_row.cas_path:
|
||||||
|
storage.get_content({preferred}, storage.blob_path(storage_row.content_checksum))
|
||||||
|
|
||||||
|
for blob_row in Blob.select():
|
||||||
|
storage.get_content({preferred}, storage.blob_path(blob_row.digest))
|
||||||
|
|
||||||
|
|
||||||
|
def test_has_garbage(default_tag_policy, initialized_db):
|
||||||
|
""" Remove all existing repositories, then add one without garbage, check, then add one with
|
||||||
|
garbage, and check again.
|
||||||
|
"""
|
||||||
|
# Delete all existing repos.
|
||||||
|
for repo in database.Repository.select().order_by(database.Repository.id):
|
||||||
|
assert model.repository.purge_repository(repo.namespace_user.username, repo.name)
|
||||||
|
|
||||||
|
# Change the time machine expiration on the namespace.
|
||||||
|
(database.User
|
||||||
|
.update(removed_tag_expiration_s=1000000000)
|
||||||
|
.where(database.User.username == ADMIN_ACCESS_USER)
|
||||||
|
.execute())
|
||||||
|
|
||||||
|
# Create a repository without any garbage.
|
||||||
|
repository = create_repository(latest=['i1', 'i2', 'i3'])
|
||||||
|
|
||||||
|
# Ensure that no repositories are returned by the has garbage check.
|
||||||
|
assert model.repository.find_repository_with_garbage(1000000000) is None
|
||||||
|
|
||||||
|
# Delete a tag.
|
||||||
|
delete_tag(repository, 'latest', perform_gc=False)
|
||||||
|
|
||||||
|
# There should still not be any repositories with garbage, due to time machine.
|
||||||
|
assert model.repository.find_repository_with_garbage(1000000000) is None
|
||||||
|
|
||||||
|
# Change the time machine expiration on the namespace.
|
||||||
|
(database.User
|
||||||
|
.update(removed_tag_expiration_s=0)
|
||||||
|
.where(database.User.username == ADMIN_ACCESS_USER)
|
||||||
|
.execute())
|
||||||
|
|
||||||
|
# Now we should find the repository for GC.
|
||||||
|
repository = model.repository.find_repository_with_garbage(0)
|
||||||
|
assert repository is not None
|
||||||
|
assert repository.name == REPO
|
||||||
|
|
||||||
|
# GC the repository.
|
||||||
|
assert model.repository.garbage_collect_repo(repository)
|
||||||
|
|
||||||
|
# There should now be no repositories with garbage.
|
||||||
|
assert model.repository.find_repository_with_garbage(0) is None
|
||||||
|
|
||||||
|
|
||||||
|
def test_find_garbage_policy_functions(default_tag_policy, initialized_db):
|
||||||
|
with assert_query_count(1):
|
||||||
|
one_policy = model.repository.get_random_gc_policy()
|
||||||
|
all_policies = model.repository._get_gc_expiration_policies()
|
||||||
|
assert one_policy in all_policies
|
||||||
|
|
||||||
|
|
||||||
|
def test_one_tag(default_tag_policy, initialized_db):
|
||||||
|
""" Create a repository with a single tag, then remove that tag and verify that the repository
|
||||||
|
is now empty. """
|
||||||
|
with assert_gc_integrity():
|
||||||
|
repository = create_repository(latest=['i1', 'i2', 'i3'])
|
||||||
|
delete_tag(repository, 'latest')
|
||||||
|
assert_deleted(repository, 'i1', 'i2', 'i3')
|
||||||
|
|
||||||
|
|
||||||
|
def test_two_tags_unshared_images(default_tag_policy, initialized_db):
|
||||||
|
""" Repository has two tags with no shared images between them. """
|
||||||
|
with assert_gc_integrity():
|
||||||
|
repository = create_repository(latest=['i1', 'i2', 'i3'], other=['f1', 'f2'])
|
||||||
|
delete_tag(repository, 'latest')
|
||||||
|
assert_deleted(repository, 'i1', 'i2', 'i3')
|
||||||
|
assert_not_deleted(repository, 'f1', 'f2')
|
||||||
|
|
||||||
|
|
||||||
|
def test_two_tags_shared_images(default_tag_policy, initialized_db):
|
||||||
|
""" Repository has two tags with shared images. Deleting the tag should only remove the
|
||||||
|
unshared images.
|
||||||
|
"""
|
||||||
|
with assert_gc_integrity():
|
||||||
|
repository = create_repository(latest=['i1', 'i2', 'i3'], other=['i1', 'f1'])
|
||||||
|
delete_tag(repository, 'latest')
|
||||||
|
assert_deleted(repository, 'i2', 'i3')
|
||||||
|
assert_not_deleted(repository, 'i1', 'f1')
|
||||||
|
|
||||||
|
|
||||||
|
def test_unrelated_repositories(default_tag_policy, initialized_db):
|
||||||
|
""" Two repositories with different images. Removing the tag from one leaves the other's
|
||||||
|
images intact.
|
||||||
|
"""
|
||||||
|
with assert_gc_integrity():
|
||||||
|
repository1 = create_repository(latest=['i1', 'i2', 'i3'], name='repo1')
|
||||||
|
repository2 = create_repository(latest=['j1', 'j2', 'j3'], name='repo2')
|
||||||
|
|
||||||
|
delete_tag(repository1, 'latest')
|
||||||
|
|
||||||
|
assert_deleted(repository1, 'i1', 'i2', 'i3')
|
||||||
|
assert_not_deleted(repository2, 'j1', 'j2', 'j3')
|
||||||
|
|
||||||
|
|
||||||
|
def test_related_repositories(default_tag_policy, initialized_db):
|
||||||
|
""" Two repositories with shared images. Removing the tag from one leaves the other's
|
||||||
|
images intact.
|
||||||
|
"""
|
||||||
|
with assert_gc_integrity():
|
||||||
|
repository1 = create_repository(latest=['i1', 'i2', 'i3'], name='repo1')
|
||||||
|
repository2 = create_repository(latest=['i1', 'i2', 'j1'], name='repo2')
|
||||||
|
|
||||||
|
delete_tag(repository1, 'latest')
|
||||||
|
|
||||||
|
assert_deleted(repository1, 'i3')
|
||||||
|
assert_not_deleted(repository2, 'i1', 'i2', 'j1')
|
||||||
|
|
||||||
|
|
||||||
|
def test_inaccessible_repositories(default_tag_policy, initialized_db):
|
||||||
|
""" Two repositories under different namespaces should result in the images being deleted
|
||||||
|
but not completely removed from the database.
|
||||||
|
"""
|
||||||
|
with assert_gc_integrity():
|
||||||
|
repository1 = create_repository(namespace=ADMIN_ACCESS_USER, latest=['i1', 'i2', 'i3'])
|
||||||
|
repository2 = create_repository(namespace=PUBLIC_USER, latest=['i1', 'i2', 'i3'])
|
||||||
|
|
||||||
|
delete_tag(repository1, 'latest')
|
||||||
|
assert_deleted(repository1, 'i1', 'i2', 'i3')
|
||||||
|
assert_not_deleted(repository2, 'i1', 'i2', 'i3')
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def test_many_multiple_shared_images(default_tag_policy, initialized_db):
|
||||||
|
""" Repository has multiple tags with shared images. Delete all but one tag.
|
||||||
|
"""
|
||||||
|
with assert_gc_integrity():
|
||||||
|
repository = create_repository(latest=['i1', 'i2', 'i3', 'i4', 'i5', 'i6', 'i7', 'i8', 'j0'],
|
||||||
|
master=['i1', 'i2', 'i3', 'i4', 'i5', 'i6', 'i7', 'i8', 'j1'])
|
||||||
|
|
||||||
|
# Delete tag latest. Should only delete j0, since it is not shared.
|
||||||
|
delete_tag(repository, 'latest')
|
||||||
|
|
||||||
|
assert_deleted(repository, 'j0')
|
||||||
|
assert_not_deleted(repository, 'i1', 'i2', 'i3', 'i4', 'i5', 'i6', 'i7', 'i8', 'j1')
|
||||||
|
|
||||||
|
# Delete tag master. Should delete the rest of the images.
|
||||||
|
delete_tag(repository, 'master')
|
||||||
|
|
||||||
|
assert_deleted(repository, 'i1', 'i2', 'i3', 'i4', 'i5', 'i6', 'i7', 'i8', 'j1')
|
||||||
|
|
||||||
|
|
||||||
|
def test_multiple_shared_images(default_tag_policy, initialized_db):
|
||||||
|
""" Repository has multiple tags with shared images. Selectively deleting the tags, and
|
||||||
|
verifying at each step.
|
||||||
|
"""
|
||||||
|
with assert_gc_integrity():
|
||||||
|
repository = create_repository(latest=['i1', 'i2', 'i3'], other=['i1', 'f1', 'f2'],
|
||||||
|
third=['t1', 't2', 't3'], fourth=['i1', 'f1'])
|
||||||
|
|
||||||
|
# Current state:
|
||||||
|
# latest -> i3->i2->i1
|
||||||
|
# other -> f2->f1->i1
|
||||||
|
# third -> t3->t2->t1
|
||||||
|
# fourth -> f1->i1
|
||||||
|
|
||||||
|
# Delete tag other. Should delete f2, since it is not shared.
|
||||||
|
delete_tag(repository, 'other')
|
||||||
|
assert_deleted(repository, 'f2')
|
||||||
|
assert_not_deleted(repository, 'i1', 'i2', 'i3', 't1', 't2', 't3', 'f1')
|
||||||
|
|
||||||
|
# Current state:
|
||||||
|
# latest -> i3->i2->i1
|
||||||
|
# third -> t3->t2->t1
|
||||||
|
# fourth -> f1->i1
|
||||||
|
|
||||||
|
# Move tag fourth to i3. This should remove f1 since it is no longer referenced.
|
||||||
|
move_tag(repository, 'fourth', 'i3')
|
||||||
|
assert_deleted(repository, 'f1')
|
||||||
|
assert_not_deleted(repository, 'i1', 'i2', 'i3', 't1', 't2', 't3')
|
||||||
|
|
||||||
|
# Current state:
|
||||||
|
# latest -> i3->i2->i1
|
||||||
|
# third -> t3->t2->t1
|
||||||
|
# fourth -> i3->i2->i1
|
||||||
|
|
||||||
|
# Delete tag 'latest'. This should do nothing since fourth is on the same branch.
|
||||||
|
delete_tag(repository, 'latest')
|
||||||
|
assert_not_deleted(repository, 'i1', 'i2', 'i3', 't1', 't2', 't3')
|
||||||
|
|
||||||
|
# Current state:
|
||||||
|
# third -> t3->t2->t1
|
||||||
|
# fourth -> i3->i2->i1
|
||||||
|
|
||||||
|
# Delete tag 'third'. This should remove t1->t3.
|
||||||
|
delete_tag(repository, 'third')
|
||||||
|
assert_deleted(repository, 't1', 't2', 't3')
|
||||||
|
assert_not_deleted(repository, 'i1', 'i2', 'i3')
|
||||||
|
|
||||||
|
# Current state:
|
||||||
|
# fourth -> i3->i2->i1
|
||||||
|
|
||||||
|
# Add tag to i1.
|
||||||
|
move_tag(repository, 'newtag', 'i1')
|
||||||
|
assert_not_deleted(repository, 'i1', 'i2', 'i3')
|
||||||
|
|
||||||
|
# Current state:
|
||||||
|
# fourth -> i3->i2->i1
|
||||||
|
# newtag -> i1
|
||||||
|
|
||||||
|
# Delete tag 'fourth'. This should remove i2 and i3.
|
||||||
|
delete_tag(repository, 'fourth')
|
||||||
|
assert_deleted(repository, 'i2', 'i3')
|
||||||
|
assert_not_deleted(repository, 'i1')
|
||||||
|
|
||||||
|
# Current state:
|
||||||
|
# newtag -> i1
|
||||||
|
|
||||||
|
# Delete tag 'newtag'. This should remove the remaining image.
|
||||||
|
delete_tag(repository, 'newtag')
|
||||||
|
assert_deleted(repository, 'i1')
|
||||||
|
|
||||||
|
# Current state:
|
||||||
|
# (Empty)
|
||||||
|
|
||||||
|
|
||||||
|
def test_empty_gc(default_tag_policy, initialized_db):
|
||||||
|
with assert_gc_integrity(expect_storage_removed=False):
|
||||||
|
repository = create_repository(latest=['i1', 'i2', 'i3'], other=['i1', 'f1', 'f2'],
|
||||||
|
third=['t1', 't2', 't3'], fourth=['i1', 'f1'])
|
||||||
|
|
||||||
|
gc_now(repository)
|
||||||
|
assert_not_deleted(repository, 'i1', 'i2', 'i3', 't1', 't2', 't3', 'f1', 'f2')
|
||||||
|
|
||||||
|
|
||||||
|
def test_time_machine_no_gc(default_tag_policy, initialized_db):
|
||||||
|
""" Repository has two tags with shared images. Deleting the tag should not remove any images
|
||||||
|
"""
|
||||||
|
with assert_gc_integrity(expect_storage_removed=False):
|
||||||
|
repository = create_repository(latest=['i1', 'i2', 'i3'], other=['i1', 'f1'])
|
||||||
|
_set_tag_expiration_policy(repository.namespace_user.username, 60*60*24)
|
||||||
|
|
||||||
|
delete_tag(repository, 'latest')
|
||||||
|
assert_not_deleted(repository, 'i2', 'i3')
|
||||||
|
assert_not_deleted(repository, 'i1', 'f1')
|
||||||
|
|
||||||
|
|
||||||
|
def test_time_machine_gc(default_tag_policy, initialized_db):
|
||||||
|
""" Repository has two tags with shared images. Deleting the second tag should cause the images
|
||||||
|
for the first deleted tag to gc.
|
||||||
|
"""
|
||||||
|
with assert_gc_integrity():
|
||||||
|
repository = create_repository(latest=['i1', 'i2', 'i3'], other=['i1', 'f1'])
|
||||||
|
|
||||||
|
_set_tag_expiration_policy(repository.namespace_user.username, 1)
|
||||||
|
|
||||||
|
delete_tag(repository, 'latest')
|
||||||
|
assert_not_deleted(repository, 'i2', 'i3')
|
||||||
|
assert_not_deleted(repository, 'i1', 'f1')
|
||||||
|
|
||||||
|
time.sleep(2)
|
||||||
|
|
||||||
|
# This will cause the images associated with latest to gc
|
||||||
|
delete_tag(repository, 'other')
|
||||||
|
assert_deleted(repository, 'i2', 'i3')
|
||||||
|
assert_not_deleted(repository, 'i1', 'f1')
|
||||||
|
|
||||||
|
|
||||||
|
def test_images_shared_storage(default_tag_policy, initialized_db):
|
||||||
|
""" Repository with two tags, both with the same shared storage. Deleting the first
|
||||||
|
tag should delete the first image, but *not* its storage.
|
||||||
|
"""
|
||||||
|
with assert_gc_integrity(expect_storage_removed=False):
|
||||||
|
repository = create_repository()
|
||||||
|
|
||||||
|
# Add two tags, each with their own image, but with the same storage.
|
||||||
|
image_storage = model.storage.create_v1_storage(storage.preferred_locations[0])
|
||||||
|
|
||||||
|
first_image = Image.create(docker_image_id='i1',
|
||||||
|
repository=repository, storage=image_storage,
|
||||||
|
ancestors='/')
|
||||||
|
|
||||||
|
second_image = Image.create(docker_image_id='i2',
|
||||||
|
repository=repository, storage=image_storage,
|
||||||
|
ancestors='/')
|
||||||
|
|
||||||
|
model.tag.store_tag_manifest(repository.namespace_user.username, repository.name,
|
||||||
|
'first', first_image.docker_image_id,
|
||||||
|
'sha:someshahere', '{}')
|
||||||
|
|
||||||
|
model.tag.store_tag_manifest(repository.namespace_user.username, repository.name,
|
||||||
|
'second', second_image.docker_image_id,
|
||||||
|
'sha:someshahere', '{}')
|
||||||
|
|
||||||
|
# Delete the first tag.
|
||||||
|
delete_tag(repository, 'first')
|
||||||
|
assert_deleted(repository, 'i1')
|
||||||
|
assert_not_deleted(repository, 'i2')
|
||||||
|
|
||||||
|
|
||||||
|
def test_image_with_cas(default_tag_policy, initialized_db):
|
||||||
|
""" A repository with a tag pointing to an image backed by CAS. Deleting and GCing the tag
|
||||||
|
should result in the storage and its CAS data being removed.
|
||||||
|
"""
|
||||||
|
with assert_gc_integrity(expect_storage_removed=True):
|
||||||
|
repository = create_repository()
|
||||||
|
|
||||||
|
# Create an image storage record under CAS.
|
||||||
|
content = 'hello world'
|
||||||
|
digest = 'sha256:' + hashlib.sha256(content).hexdigest()
|
||||||
|
preferred = storage.preferred_locations[0]
|
||||||
|
storage.put_content({preferred}, storage.blob_path(digest), content)
|
||||||
|
|
||||||
|
image_storage = database.ImageStorage.create(content_checksum=digest, uploading=False)
|
||||||
|
location = database.ImageStorageLocation.get(name=preferred)
|
||||||
|
database.ImageStoragePlacement.create(location=location, storage=image_storage)
|
||||||
|
|
||||||
|
# Ensure the CAS path exists.
|
||||||
|
assert storage.exists({preferred}, storage.blob_path(digest))
|
||||||
|
|
||||||
|
# Create the image and the tag.
|
||||||
|
first_image = Image.create(docker_image_id='i1',
|
||||||
|
repository=repository, storage=image_storage,
|
||||||
|
ancestors='/')
|
||||||
|
|
||||||
|
model.tag.store_tag_manifest(repository.namespace_user.username, repository.name,
|
||||||
|
'first', first_image.docker_image_id,
|
||||||
|
'sha:someshahere1', '{}')
|
||||||
|
|
||||||
|
assert_not_deleted(repository, 'i1')
|
||||||
|
|
||||||
|
# Delete the tag.
|
||||||
|
delete_tag(repository, 'first')
|
||||||
|
assert_deleted(repository, 'i1')
|
||||||
|
|
||||||
|
# Ensure the CAS path is gone.
|
||||||
|
assert not storage.exists({preferred}, storage.blob_path(digest))
|
||||||
|
|
||||||
|
|
||||||
|
def test_images_shared_cas(default_tag_policy, initialized_db):
|
||||||
|
""" A repository, each two tags, pointing to the same image, which has image storage
|
||||||
|
with the same *CAS path*, but *distinct records*. Deleting the first tag should delete the
|
||||||
|
first image, and its storage, but not the file in storage, as it shares its CAS path.
|
||||||
|
"""
|
||||||
|
with assert_gc_integrity(expect_storage_removed=True):
|
||||||
|
repository = create_repository()
|
||||||
|
|
||||||
|
# Create two image storage records with the same content checksum.
|
||||||
|
content = 'hello world'
|
||||||
|
digest = 'sha256:' + hashlib.sha256(content).hexdigest()
|
||||||
|
preferred = storage.preferred_locations[0]
|
||||||
|
storage.put_content({preferred}, storage.blob_path(digest), content)
|
||||||
|
|
||||||
|
is1 = database.ImageStorage.create(content_checksum=digest, uploading=False)
|
||||||
|
is2 = database.ImageStorage.create(content_checksum=digest, uploading=False)
|
||||||
|
|
||||||
|
location = database.ImageStorageLocation.get(name=preferred)
|
||||||
|
|
||||||
|
database.ImageStoragePlacement.create(location=location, storage=is1)
|
||||||
|
database.ImageStoragePlacement.create(location=location, storage=is2)
|
||||||
|
|
||||||
|
# Ensure the CAS path exists.
|
||||||
|
assert storage.exists({preferred}, storage.blob_path(digest))
|
||||||
|
|
||||||
|
# Create two images in the repository, and two tags, each pointing to one of the storages.
|
||||||
|
first_image = Image.create(docker_image_id='i1',
|
||||||
|
repository=repository, storage=is1,
|
||||||
|
ancestors='/')
|
||||||
|
|
||||||
|
second_image = Image.create(docker_image_id='i2',
|
||||||
|
repository=repository, storage=is2,
|
||||||
|
ancestors='/')
|
||||||
|
|
||||||
|
model.tag.store_tag_manifest(repository.namespace_user.username, repository.name,
|
||||||
|
'first', first_image.docker_image_id,
|
||||||
|
'sha:someshahere1', '{}')
|
||||||
|
|
||||||
|
model.tag.store_tag_manifest(repository.namespace_user.username, repository.name,
|
||||||
|
'second', second_image.docker_image_id,
|
||||||
|
'sha:someshahere2', '{}')
|
||||||
|
|
||||||
|
assert_not_deleted(repository, 'i1', 'i2')
|
||||||
|
|
||||||
|
# Delete the first tag.
|
||||||
|
delete_tag(repository, 'first')
|
||||||
|
assert_deleted(repository, 'i1')
|
||||||
|
assert_not_deleted(repository, 'i2')
|
||||||
|
|
||||||
|
# Ensure the CAS path still exists.
|
||||||
|
assert storage.exists({preferred}, storage.blob_path(digest))
|
||||||
|
|
||||||
|
|
||||||
|
def test_images_shared_cas_with_new_blob_table(default_tag_policy, initialized_db):
|
||||||
|
""" A repository with a tag and image that shares its CAS path with a record in the new Blob
|
||||||
|
table. Deleting the first tag should delete the first image, and its storage, but not the
|
||||||
|
file in storage, as it shares its CAS path with the blob row.
|
||||||
|
"""
|
||||||
|
with assert_gc_integrity(expect_storage_removed=True):
|
||||||
|
repository = create_repository()
|
||||||
|
|
||||||
|
# Create two image storage records with the same content checksum.
|
||||||
|
content = 'hello world'
|
||||||
|
digest = 'sha256:' + hashlib.sha256(content).hexdigest()
|
||||||
|
preferred = storage.preferred_locations[0]
|
||||||
|
storage.put_content({preferred}, storage.blob_path(digest), content)
|
||||||
|
|
||||||
|
media_type = database.MediaType.get(name='text/plain')
|
||||||
|
|
||||||
|
is1 = database.ImageStorage.create(content_checksum=digest, uploading=False)
|
||||||
|
database.Blob.create(digest=digest, size=0, media_type=media_type)
|
||||||
|
|
||||||
|
location = database.ImageStorageLocation.get(name=preferred)
|
||||||
|
database.ImageStoragePlacement.create(location=location, storage=is1)
|
||||||
|
|
||||||
|
# Ensure the CAS path exists.
|
||||||
|
assert storage.exists({preferred}, storage.blob_path(digest))
|
||||||
|
|
||||||
|
# Create the image in the repository, and the tag.
|
||||||
|
first_image = Image.create(docker_image_id='i1',
|
||||||
|
repository=repository, storage=is1,
|
||||||
|
ancestors='/')
|
||||||
|
|
||||||
|
model.tag.store_tag_manifest(repository.namespace_user.username, repository.name,
|
||||||
|
'first', first_image.docker_image_id,
|
||||||
|
'sha:someshahere1', '{}')
|
||||||
|
|
||||||
|
assert_not_deleted(repository, 'i1')
|
||||||
|
|
||||||
|
# Delete the tag.
|
||||||
|
delete_tag(repository, 'first')
|
||||||
|
assert_deleted(repository, 'i1')
|
||||||
|
|
||||||
|
# Ensure the CAS path still exists, as it is referenced by the Blob table
|
||||||
|
assert storage.exists({preferred}, storage.blob_path(digest))
|
||||||
|
|
||||||
|
|
||||||
|
def test_purge_repo(app):
|
||||||
|
""" Test that app registers delete_metadata function on repository deletions """
|
||||||
|
with patch('app.tuf_metadata_api') as mock_tuf:
|
||||||
|
model.repository.purge_repository("ns", "repo")
|
||||||
|
assert mock_tuf.delete_metadata.called_with("ns", "repo")
|
|
@ -2,7 +2,7 @@ import pytest
|
||||||
|
|
||||||
from peewee import IntegrityError
|
from peewee import IntegrityError
|
||||||
|
|
||||||
from data.model.repository import create_repository, purge_repository
|
from data.model.repository import create_repository, purge_repository, is_empty
|
||||||
from test.fixtures import *
|
from test.fixtures import *
|
||||||
|
|
||||||
def test_duplicate_repository_different_kinds(initialized_db):
|
def test_duplicate_repository_different_kinds(initialized_db):
|
||||||
|
@ -12,3 +12,10 @@ def test_duplicate_repository_different_kinds(initialized_db):
|
||||||
# Try to create an app repo with the same name, which should fail.
|
# Try to create an app repo with the same name, which should fail.
|
||||||
with pytest.raises(IntegrityError):
|
with pytest.raises(IntegrityError):
|
||||||
create_repository('devtable', 'somenewrepo', None, repo_kind='application')
|
create_repository('devtable', 'somenewrepo', None, repo_kind='application')
|
||||||
|
|
||||||
|
|
||||||
|
def test_is_empty(initialized_db):
|
||||||
|
create_repository('devtable', 'somenewrepo', None, repo_kind='image')
|
||||||
|
|
||||||
|
assert is_empty('devtable', 'somenewrepo')
|
||||||
|
assert not is_empty('devtable', 'simple')
|
||||||
|
|
|
@ -16,6 +16,9 @@ def list_packages_query(namespace=None, media_type=None, search_query=None, user
|
||||||
username=username,
|
username=username,
|
||||||
search_fields=fields,
|
search_fields=fields,
|
||||||
limit=50)
|
limit=50)
|
||||||
|
if not repositories:
|
||||||
|
return []
|
||||||
|
|
||||||
repo_query = (Repository
|
repo_query = (Repository
|
||||||
.select(Repository, Namespace.username)
|
.select(Repository, Namespace.username)
|
||||||
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
|
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
|
||||||
|
|
|
@ -10,6 +10,8 @@ logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
UserInformation = namedtuple('UserInformation', ['username', 'email', 'id'])
|
UserInformation = namedtuple('UserInformation', ['username', 'email', 'id'])
|
||||||
|
|
||||||
|
DISABLED_MESSAGE = 'User creation is disabled. Please contact your adminstrator to gain access.'
|
||||||
|
|
||||||
class FederatedUsers(object):
|
class FederatedUsers(object):
|
||||||
""" Base class for all federated users systems. """
|
""" Base class for all federated users systems. """
|
||||||
|
|
||||||
|
@ -96,7 +98,10 @@ class FederatedUsers(object):
|
||||||
def _get_and_link_federated_user_info(self, username, email):
|
def _get_and_link_federated_user_info(self, username, email):
|
||||||
db_user = model.user.verify_federated_login(self._federated_service, username)
|
db_user = model.user.verify_federated_login(self._federated_service, username)
|
||||||
if not db_user:
|
if not db_user:
|
||||||
# We must create the user in our db
|
# We must create the user in our db. Check to see if this is allowed.
|
||||||
|
if not features.USER_CREATION:
|
||||||
|
return (None, DISABLED_MESSAGE)
|
||||||
|
|
||||||
valid_username = None
|
valid_username = None
|
||||||
for valid_username in generate_valid_usernames(username):
|
for valid_username in generate_valid_usernames(username):
|
||||||
if model.user.is_username_unique(valid_username):
|
if model.user.is_username_unique(valid_username):
|
||||||
|
|
36
data/users/test/test_users.py
Normal file
36
data/users/test/test_users.py
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from mock import patch
|
||||||
|
|
||||||
|
from data.database import model
|
||||||
|
from data.users.federated import DISABLED_MESSAGE
|
||||||
|
from test.test_ldap import mock_ldap
|
||||||
|
from test.test_keystone_auth import fake_keystone
|
||||||
|
|
||||||
|
from test.fixtures import *
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('auth_system_builder, user1, user2', [
|
||||||
|
(mock_ldap, ('someuser', 'somepass'), ('testy', 'password')),
|
||||||
|
(fake_keystone, ('cool.user', 'password'), ('some.neat.user', 'foobar')),
|
||||||
|
])
|
||||||
|
def test_auth_createuser(auth_system_builder, user1, user2, config, app):
|
||||||
|
with auth_system_builder() as auth:
|
||||||
|
# Login as a user and ensure a row in the database is created for them.
|
||||||
|
user, err = auth.verify_and_link_user(*user1)
|
||||||
|
assert err is None
|
||||||
|
assert user
|
||||||
|
|
||||||
|
federated_info = model.user.lookup_federated_login(user, auth.federated_service)
|
||||||
|
assert federated_info is not None
|
||||||
|
|
||||||
|
# Disable user creation.
|
||||||
|
with patch('features.USER_CREATION', False):
|
||||||
|
# Ensure that the existing user can login.
|
||||||
|
user_again, err = auth.verify_and_link_user(*user1)
|
||||||
|
assert err is None
|
||||||
|
assert user_again.id == user.id
|
||||||
|
|
||||||
|
# Ensure that a new user cannot.
|
||||||
|
new_user, err = auth.verify_and_link_user(*user2)
|
||||||
|
assert new_user is None
|
||||||
|
assert err == DISABLED_MESSAGE
|
|
@ -387,23 +387,6 @@ def define_json_response(schema_name):
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
def disallow_under_trust(func):
|
|
||||||
""" Disallows the decorated operation for repository when it has trust enabled.
|
|
||||||
"""
|
|
||||||
@wraps(func)
|
|
||||||
def wrapper(self, *args, **kwargs):
|
|
||||||
if features.SIGNING:
|
|
||||||
namespace = args[0]
|
|
||||||
repository = args[1]
|
|
||||||
|
|
||||||
repo = model.repository.get_repository(namespace, repository)
|
|
||||||
if repo is not None and repo.trust_enabled:
|
|
||||||
raise InvalidRequest('Cannot call this method on a repostory with trust enabled')
|
|
||||||
|
|
||||||
return func(self, *args, **kwargs)
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
|
|
||||||
import endpoints.api.billing
|
import endpoints.api.billing
|
||||||
import endpoints.api.build
|
import endpoints.api.build
|
||||||
import endpoints.api.discovery
|
import endpoints.api.discovery
|
||||||
|
@ -429,4 +412,3 @@ import endpoints.api.trigger
|
||||||
import endpoints.api.user
|
import endpoints.api.user
|
||||||
import endpoints.api.secscan
|
import endpoints.api.secscan
|
||||||
import endpoints.api.signing
|
import endpoints.api.signing
|
||||||
|
|
||||||
|
|
|
@ -19,8 +19,7 @@ from data.buildlogs import BuildStatusRetrievalError
|
||||||
from endpoints.api import (RepositoryParamResource, parse_args, query_param, nickname, resource,
|
from endpoints.api import (RepositoryParamResource, parse_args, query_param, nickname, resource,
|
||||||
require_repo_read, require_repo_write, validate_json_request,
|
require_repo_read, require_repo_write, validate_json_request,
|
||||||
ApiResource, internal_only, format_date, api, path_param,
|
ApiResource, internal_only, format_date, api, path_param,
|
||||||
require_repo_admin, abort, disallow_for_app_repositories,
|
require_repo_admin, abort, disallow_for_app_repositories)
|
||||||
disallow_under_trust)
|
|
||||||
from endpoints.building import start_build, PreparedBuild, MaximumBuildsQueuedException
|
from endpoints.building import start_build, PreparedBuild, MaximumBuildsQueuedException
|
||||||
from endpoints.exception import Unauthorized, NotFound, InvalidRequest
|
from endpoints.exception import Unauthorized, NotFound, InvalidRequest
|
||||||
from util.names import parse_robot_username
|
from util.names import parse_robot_username
|
||||||
|
@ -226,7 +225,6 @@ class RepositoryBuildList(RepositoryParamResource):
|
||||||
@require_repo_write
|
@require_repo_write
|
||||||
@nickname('requestRepoBuild')
|
@nickname('requestRepoBuild')
|
||||||
@disallow_for_app_repositories
|
@disallow_for_app_repositories
|
||||||
@disallow_under_trust
|
|
||||||
@validate_json_request('RepositoryBuildRequest')
|
@validate_json_request('RepositoryBuildRequest')
|
||||||
def post(self, namespace, repository):
|
def post(self, namespace, repository):
|
||||||
""" Request that a repository be built and pushed from the specified input. """
|
""" Request that a repository be built and pushed from the specified input. """
|
||||||
|
@ -363,7 +361,6 @@ class RepositoryBuildResource(RepositoryParamResource):
|
||||||
|
|
||||||
@require_repo_admin
|
@require_repo_admin
|
||||||
@nickname('cancelRepoBuild')
|
@nickname('cancelRepoBuild')
|
||||||
@disallow_under_trust
|
|
||||||
@disallow_for_app_repositories
|
@disallow_for_app_repositories
|
||||||
def delete(self, namespace, repository, build_uuid):
|
def delete(self, namespace, repository, build_uuid):
|
||||||
""" Cancels a repository build. """
|
""" Cancels a repository build. """
|
||||||
|
|
|
@ -58,6 +58,11 @@ class GlobalUserMessages(ApiResource):
|
||||||
'message': {
|
'message': {
|
||||||
'type': 'object',
|
'type': 'object',
|
||||||
'description': 'A single message',
|
'description': 'A single message',
|
||||||
|
'required': [
|
||||||
|
'content',
|
||||||
|
'media_type',
|
||||||
|
'severity',
|
||||||
|
],
|
||||||
'properties': {
|
'properties': {
|
||||||
'content': {
|
'content': {
|
||||||
'type': 'string',
|
'type': 'string',
|
||||||
|
|
|
@ -10,6 +10,7 @@ from endpoints.exception import NotFound
|
||||||
from data import model
|
from data import model
|
||||||
|
|
||||||
from digest import digest_tools
|
from digest import digest_tools
|
||||||
|
from util.validation import VALID_LABEL_KEY_REGEX
|
||||||
|
|
||||||
BASE_MANIFEST_ROUTE = '/v1/repository/<apirepopath:repository>/manifest/<regex("{0}"):manifestref>'
|
BASE_MANIFEST_ROUTE = '/v1/repository/<apirepopath:repository>/manifest/<regex("{0}"):manifestref>'
|
||||||
MANIFEST_DIGEST_ROUTE = BASE_MANIFEST_ROUTE.format(digest_tools.DIGEST_PATTERN)
|
MANIFEST_DIGEST_ROUTE = BASE_MANIFEST_ROUTE.format(digest_tools.DIGEST_PATTERN)
|
||||||
|
@ -92,9 +93,17 @@ class RepositoryManifestLabels(RepositoryParamResource):
|
||||||
if label_validator.has_reserved_prefix(label_data['key']):
|
if label_validator.has_reserved_prefix(label_data['key']):
|
||||||
abort(400, message='Label has a reserved prefix')
|
abort(400, message='Label has a reserved prefix')
|
||||||
|
|
||||||
label = model.label.create_manifest_label(tag_manifest, label_data['key'],
|
label = None
|
||||||
label_data['value'], 'api',
|
try:
|
||||||
media_type_name=label_data['media_type'])
|
label = model.label.create_manifest_label(tag_manifest, label_data['key'],
|
||||||
|
label_data['value'], 'api',
|
||||||
|
media_type_name=label_data['media_type'])
|
||||||
|
except model.InvalidLabelKeyException:
|
||||||
|
abort(400, message='Label is of an invalid format or missing please use %s format for labels'.format(
|
||||||
|
VALID_LABEL_KEY_REGEX))
|
||||||
|
except model.InvalidMediaTypeException:
|
||||||
|
abort(400, message='Media type is invalid please use a valid media type of text/plain or application/json')
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
'id': label.uuid,
|
'id': label.uuid,
|
||||||
'key': label_data['key'],
|
'key': label_data['key'],
|
||||||
|
|
|
@ -73,6 +73,9 @@ class RepositoryUserPermissionList(RepositoryParamResource):
|
||||||
# This repository isn't under an org
|
# This repository isn't under an org
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
# Load the permissions.
|
||||||
|
repo_perms = model.user.get_all_repo_users(namespace, repository)
|
||||||
|
|
||||||
# Determine how to wrap the role(s).
|
# Determine how to wrap the role(s).
|
||||||
def wrapped_role_view(repo_perm):
|
def wrapped_role_view(repo_perm):
|
||||||
return wrap_role_view_user(role_view(repo_perm), repo_perm.user)
|
return wrap_role_view_user(role_view(repo_perm), repo_perm.user)
|
||||||
|
@ -80,20 +83,17 @@ class RepositoryUserPermissionList(RepositoryParamResource):
|
||||||
role_view_func = wrapped_role_view
|
role_view_func = wrapped_role_view
|
||||||
|
|
||||||
if org:
|
if org:
|
||||||
org_members = model.organization.get_organization_member_set(namespace)
|
users_filter = {perm.user for perm in repo_perms}
|
||||||
|
org_members = model.organization.get_organization_member_set(org, users_filter=users_filter)
|
||||||
current_func = role_view_func
|
current_func = role_view_func
|
||||||
|
|
||||||
def wrapped_role_org_view(repo_perm):
|
def wrapped_role_org_view(repo_perm):
|
||||||
return wrap_role_view_org(current_func(repo_perm), repo_perm.user,
|
return wrap_role_view_org(current_func(repo_perm), repo_perm.user, org_members)
|
||||||
org_members)
|
|
||||||
|
|
||||||
role_view_func = wrapped_role_org_view
|
role_view_func = wrapped_role_org_view
|
||||||
|
|
||||||
# Load and return the permissions.
|
|
||||||
repo_perms = model.user.get_all_repo_users(namespace, repository)
|
|
||||||
return {
|
return {
|
||||||
'permissions': {perm.user.username: role_view_func(perm)
|
'permissions': {perm.user.username: role_view_func(perm) for perm in repo_perms}
|
||||||
for perm in repo_perms}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -156,8 +156,8 @@ class RepositoryUserPermission(RepositoryParamResource):
|
||||||
perm_view = wrap_role_view_user(role_view(perm), perm.user)
|
perm_view = wrap_role_view_user(role_view(perm), perm.user)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
model.organization.get_organization(namespace)
|
org = model.organization.get_organization(namespace)
|
||||||
org_members = model.organization.get_organization_member_set(namespace)
|
org_members = model.organization.get_organization_member_set(org, users_filter={perm.user})
|
||||||
perm_view = wrap_role_view_org(perm_view, perm.user, org_members)
|
perm_view = wrap_role_view_org(perm_view, perm.user, org_members)
|
||||||
except model.InvalidOrganizationException:
|
except model.InvalidOrganizationException:
|
||||||
# This repository is not part of an organization
|
# This repository is not part of an organization
|
||||||
|
@ -183,8 +183,8 @@ class RepositoryUserPermission(RepositoryParamResource):
|
||||||
perm_view = wrap_role_view_user(role_view(perm), perm.user)
|
perm_view = wrap_role_view_user(role_view(perm), perm.user)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
model.organization.get_organization(namespace)
|
org = model.organization.get_organization(namespace)
|
||||||
org_members = model.organization.get_organization_member_set(namespace)
|
org_members = model.organization.get_organization_member_set(org, users_filter={perm.user})
|
||||||
perm_view = wrap_role_view_org(perm_view, perm.user, org_members)
|
perm_view = wrap_role_view_org(perm_view, perm.user, org_members)
|
||||||
except model.InvalidOrganizationException:
|
except model.InvalidOrganizationException:
|
||||||
# This repository is not part of an organization
|
# This repository is not part of an organization
|
||||||
|
|
|
@ -133,7 +133,10 @@ class PermissionPrototypeList(ApiResource):
|
||||||
raise NotFound()
|
raise NotFound()
|
||||||
|
|
||||||
permissions = model.permission.get_prototype_permissions(org)
|
permissions = model.permission.get_prototype_permissions(org)
|
||||||
org_members = model.organization.get_organization_member_set(orgname)
|
|
||||||
|
users_filter = ({p.activating_user for p in permissions} |
|
||||||
|
{p.delegate_user for p in permissions})
|
||||||
|
org_members = model.organization.get_organization_member_set(org, users_filter=users_filter)
|
||||||
return {'prototypes': [prototype_view(p, org_members) for p in permissions]}
|
return {'prototypes': [prototype_view(p, org_members) for p in permissions]}
|
||||||
|
|
||||||
raise Unauthorized()
|
raise Unauthorized()
|
||||||
|
@ -180,7 +183,9 @@ class PermissionPrototypeList(ApiResource):
|
||||||
prototype = model.permission.add_prototype_permission(org, role_name, activating_user,
|
prototype = model.permission.add_prototype_permission(org, role_name, activating_user,
|
||||||
delegate_user, delegate_team)
|
delegate_user, delegate_team)
|
||||||
log_prototype_action('create_prototype_permission', orgname, prototype)
|
log_prototype_action('create_prototype_permission', orgname, prototype)
|
||||||
org_members = model.organization.get_organization_member_set(orgname)
|
|
||||||
|
users_filter = {prototype.activating_user, prototype.delegate_user}
|
||||||
|
org_members = model.organization.get_organization_member_set(org, users_filter=users_filter)
|
||||||
return prototype_view(prototype, org_members)
|
return prototype_view(prototype, org_members)
|
||||||
|
|
||||||
raise Unauthorized()
|
raise Unauthorized()
|
||||||
|
@ -257,7 +262,9 @@ class PermissionPrototype(ApiResource):
|
||||||
|
|
||||||
log_prototype_action('modify_prototype_permission', orgname, prototype,
|
log_prototype_action('modify_prototype_permission', orgname, prototype,
|
||||||
original_role=existing.role.name)
|
original_role=existing.role.name)
|
||||||
org_members = model.organization.get_organization_member_set(orgname)
|
|
||||||
|
users_filter = {prototype.activating_user, prototype.delegate_user}
|
||||||
|
org_members = model.organization.get_organization_member_set(org, users_filter=users_filter)
|
||||||
return prototype_view(prototype, org_members)
|
return prototype_view(prototype, org_members)
|
||||||
|
|
||||||
raise Unauthorized()
|
raise Unauthorized()
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
|
|
||||||
import json
|
import json
|
||||||
|
|
||||||
|
import logging
|
||||||
from flask import request
|
from flask import request
|
||||||
|
|
||||||
from app import notification_queue
|
from app import notification_queue
|
||||||
|
@ -14,7 +15,7 @@ from endpoints.notificationmethod import (NotificationMethod,
|
||||||
CannotValidateNotificationMethodException)
|
CannotValidateNotificationMethodException)
|
||||||
from endpoints.notificationhelper import build_notification_data
|
from endpoints.notificationhelper import build_notification_data
|
||||||
from data import model
|
from data import model
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
def notification_view(note):
|
def notification_view(note):
|
||||||
config = {}
|
config = {}
|
||||||
|
@ -36,6 +37,7 @@ def notification_view(note):
|
||||||
'config': config,
|
'config': config,
|
||||||
'title': note.title,
|
'title': note.title,
|
||||||
'event_config': event_config,
|
'event_config': event_config,
|
||||||
|
'number_of_failures': note.number_of_failures,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -154,6 +156,20 @@ class RepositoryNotification(RepositoryParamResource):
|
||||||
|
|
||||||
return 'No Content', 204
|
return 'No Content', 204
|
||||||
|
|
||||||
|
@require_repo_admin
|
||||||
|
@nickname('resetRepositoryNotificationFailures')
|
||||||
|
@disallow_for_app_repositories
|
||||||
|
def post(self, namespace, repository, uuid):
|
||||||
|
""" Resets repository notification to 0 failures. """
|
||||||
|
reset = model.notification.reset_notification_number_of_failures(namespace, repository, uuid)
|
||||||
|
if reset is not None:
|
||||||
|
log_action('reset_repo_notification', namespace,
|
||||||
|
{'repo': repository, 'namespace': namespace, 'notification_id': uuid,
|
||||||
|
'event': reset.event.name, 'method': reset.method.name},
|
||||||
|
repo=model.repository.get_repository(namespace, repository))
|
||||||
|
|
||||||
|
return 'No Content', 204
|
||||||
|
|
||||||
|
|
||||||
@resource('/v1/repository/<apirepopath:repository>/notification/<uuid>/test')
|
@resource('/v1/repository/<apirepopath:repository>/notification/<uuid>/test')
|
||||||
@path_param('repository', 'The full path of the repository. e.g. namespace/name')
|
@path_param('repository', 'The full path of the repository. e.g. namespace/name')
|
||||||
|
|
|
@ -78,7 +78,7 @@ class UserRobotList(ApiResource):
|
||||||
@nickname('getUserRobots')
|
@nickname('getUserRobots')
|
||||||
@parse_args()
|
@parse_args()
|
||||||
@query_param('permissions',
|
@query_param('permissions',
|
||||||
'Whether to include repostories and teams in which the robots have permission.',
|
'Whether to include repositories and teams in which the robots have permission.',
|
||||||
type=truthy_bool, default=False)
|
type=truthy_bool, default=False)
|
||||||
def get(self, parsed_args):
|
def get(self, parsed_args):
|
||||||
""" List the available robots for the user. """
|
""" List the available robots for the user. """
|
||||||
|
|
|
@ -27,9 +27,4 @@ class RepositorySignatures(RepositoryParamResource):
|
||||||
if repo is None or not repo.trust_enabled:
|
if repo is None or not repo.trust_enabled:
|
||||||
raise NotFound()
|
raise NotFound()
|
||||||
|
|
||||||
tag_data, expiration = tuf_metadata_api.get_default_tags_with_expiration(namespace, repository)
|
return {'delegations': tuf_metadata_api.get_all_tags_with_expiration(namespace, repository)}
|
||||||
return {
|
|
||||||
'tags': tag_data,
|
|
||||||
'expiration': expiration
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
|
@ -32,6 +32,7 @@ from util.useremails import send_confirmation_email, send_recovery_email
|
||||||
from util.license import decode_license, LicenseDecodeError
|
from util.license import decode_license, LicenseDecodeError
|
||||||
from util.security.ssl import load_certificate, CertInvalidException
|
from util.security.ssl import load_certificate, CertInvalidException
|
||||||
from util.config.validator import EXTRA_CA_DIRECTORY
|
from util.config.validator import EXTRA_CA_DIRECTORY
|
||||||
|
from _init import ROOT_DIR
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
@ -179,7 +180,7 @@ class ChangeLog(ApiResource):
|
||||||
def get(self):
|
def get(self):
|
||||||
""" Returns the change log for this installation. """
|
""" Returns the change log for this installation. """
|
||||||
if SuperUserPermission().can():
|
if SuperUserPermission().can():
|
||||||
with open('CHANGELOG.md', 'r') as f:
|
with open(os.path.join(ROOT_DIR, 'CHANGELOG.md'), 'r') as f:
|
||||||
return {
|
return {
|
||||||
'log': f.read()
|
'log': f.read()
|
||||||
}
|
}
|
||||||
|
@ -852,7 +853,7 @@ class SuperUserCustomCertificates(ApiResource):
|
||||||
cert_views = []
|
cert_views = []
|
||||||
for extra_cert_path in extra_certs_found:
|
for extra_cert_path in extra_certs_found:
|
||||||
try:
|
try:
|
||||||
cert_full_path = os.path.join(EXTRA_CA_DIRECTORY, extra_cert_path)
|
cert_full_path = config_provider.get_volume_path(EXTRA_CA_DIRECTORY, extra_cert_path)
|
||||||
with config_provider.get_volume_file(cert_full_path) as f:
|
with config_provider.get_volume_file(cert_full_path) as f:
|
||||||
certificate = load_certificate(f.read())
|
certificate = load_certificate(f.read())
|
||||||
cert_views.append({
|
cert_views.append({
|
||||||
|
@ -900,7 +901,7 @@ class SuperUserCustomCertificate(ApiResource):
|
||||||
abort(400)
|
abort(400)
|
||||||
|
|
||||||
logger.debug('Saving custom certificate %s', certpath)
|
logger.debug('Saving custom certificate %s', certpath)
|
||||||
cert_full_path = os.path.join(EXTRA_CA_DIRECTORY, certpath)
|
cert_full_path = config_provider.get_volume_path(EXTRA_CA_DIRECTORY, certpath)
|
||||||
config_provider.save_volume_file(cert_full_path, uploaded_file)
|
config_provider.save_volume_file(cert_full_path, uploaded_file)
|
||||||
logger.debug('Saved custom certificate %s', certpath)
|
logger.debug('Saved custom certificate %s', certpath)
|
||||||
|
|
||||||
|
@ -934,7 +935,7 @@ class SuperUserCustomCertificate(ApiResource):
|
||||||
@verify_not_prod
|
@verify_not_prod
|
||||||
def delete(self, certpath):
|
def delete(self, certpath):
|
||||||
if SuperUserPermission().can():
|
if SuperUserPermission().can():
|
||||||
cert_full_path = os.path.join(EXTRA_CA_DIRECTORY, certpath)
|
cert_full_path = config_provider.get_volume_path(EXTRA_CA_DIRECTORY, certpath)
|
||||||
config_provider.remove_volume_file(cert_full_path)
|
config_provider.remove_volume_file(cert_full_path)
|
||||||
return '', 204
|
return '', 204
|
||||||
|
|
||||||
|
|
|
@ -2,18 +2,37 @@
|
||||||
|
|
||||||
from flask import request, abort
|
from flask import request, abort
|
||||||
|
|
||||||
from endpoints.api import (
|
|
||||||
resource, nickname, require_repo_read, require_repo_write, RepositoryParamResource, log_action,
|
|
||||||
validate_json_request, path_param, parse_args, query_param, truthy_bool,
|
|
||||||
disallow_for_app_repositories, disallow_under_trust)
|
|
||||||
from endpoints.exception import NotFound
|
|
||||||
from endpoints.api.image import image_view
|
|
||||||
from endpoints.v2.manifest import _generate_and_store_manifest
|
|
||||||
from data import model
|
|
||||||
from auth.auth_context import get_authenticated_user
|
from auth.auth_context import get_authenticated_user
|
||||||
|
from data import model
|
||||||
|
from endpoints.api import (resource, nickname, require_repo_read, require_repo_write,
|
||||||
|
RepositoryParamResource, log_action, validate_json_request, path_param,
|
||||||
|
parse_args, query_param, truthy_bool, disallow_for_app_repositories)
|
||||||
|
from endpoints.api.image import image_view
|
||||||
|
from endpoints.api.tag_models_pre_oci import pre_oci_model
|
||||||
|
from endpoints.exception import NotFound
|
||||||
|
from endpoints.v2.manifest import _generate_and_store_manifest
|
||||||
from util.names import TAG_ERROR, TAG_REGEX
|
from util.names import TAG_ERROR, TAG_REGEX
|
||||||
|
|
||||||
|
|
||||||
|
def tag_view(tag):
|
||||||
|
tag_info = {
|
||||||
|
'name': tag.name,
|
||||||
|
'docker_image_id': tag.docker_image_id,
|
||||||
|
'reversion': tag.reversion,
|
||||||
|
}
|
||||||
|
|
||||||
|
if tag.lifetime_start_ts > 0:
|
||||||
|
tag_info['start_ts'] = tag.lifetime_start_ts
|
||||||
|
|
||||||
|
if tag.lifetime_end_ts > 0:
|
||||||
|
tag_info['end_ts'] = tag.lifetime_end_ts
|
||||||
|
|
||||||
|
if tag.manifest_list:
|
||||||
|
tag_info['manifest_digest'] = tag.manifest_list
|
||||||
|
|
||||||
|
return tag_info
|
||||||
|
|
||||||
|
|
||||||
@resource('/v1/repository/<apirepopath:repository>/tag/')
|
@resource('/v1/repository/<apirepopath:repository>/tag/')
|
||||||
@path_param('repository', 'The full path of the repository. e.g. namespace/name')
|
@path_param('repository', 'The full path of the repository. e.g. namespace/name')
|
||||||
class ListRepositoryTags(RepositoryParamResource):
|
class ListRepositoryTags(RepositoryParamResource):
|
||||||
|
@ -28,39 +47,21 @@ class ListRepositoryTags(RepositoryParamResource):
|
||||||
@query_param('page', 'Page index for the results. Default 1.', type=int, default=1)
|
@query_param('page', 'Page index for the results. Default 1.', type=int, default=1)
|
||||||
@nickname('listRepoTags')
|
@nickname('listRepoTags')
|
||||||
def get(self, namespace, repository, parsed_args):
|
def get(self, namespace, repository, parsed_args):
|
||||||
repo = model.repository.get_repository(namespace, repository)
|
|
||||||
if not repo:
|
|
||||||
raise NotFound()
|
|
||||||
|
|
||||||
def tag_view(tag):
|
|
||||||
tag_info = {
|
|
||||||
'name': tag.name,
|
|
||||||
'docker_image_id': tag.image.docker_image_id,
|
|
||||||
'reversion': tag.reversion,
|
|
||||||
}
|
|
||||||
|
|
||||||
if tag.lifetime_start_ts > 0:
|
|
||||||
tag_info['start_ts'] = tag.lifetime_start_ts
|
|
||||||
|
|
||||||
if tag.lifetime_end_ts > 0:
|
|
||||||
tag_info['end_ts'] = tag.lifetime_end_ts
|
|
||||||
|
|
||||||
if tag.id in manifest_map:
|
|
||||||
tag_info['manifest_digest'] = manifest_map[tag.id]
|
|
||||||
|
|
||||||
return tag_info
|
|
||||||
|
|
||||||
specific_tag = parsed_args.get('specificTag') or None
|
specific_tag = parsed_args.get('specificTag') or None
|
||||||
|
|
||||||
page = max(1, parsed_args.get('page', 1))
|
page = max(1, parsed_args.get('page', 1))
|
||||||
limit = min(100, max(1, parsed_args.get('limit', 50)))
|
limit = min(100, max(1, parsed_args.get('limit', 50)))
|
||||||
tags, manifest_map, more = model.tag.list_repository_tag_history(repo, page=page, size=limit,
|
|
||||||
specific_tag=specific_tag)
|
tag_history = pre_oci_model.list_repository_tag_history(namespace_name=namespace,
|
||||||
|
repository_name=repository, page=page,
|
||||||
|
size=limit, specific_tag=specific_tag)
|
||||||
|
|
||||||
|
if not tag_history:
|
||||||
|
raise NotFound()
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'tags': [tag_view(tag) for tag in tags],
|
'tags': [tag_view(tag) for tag in tag_history.tags],
|
||||||
'page': page,
|
'page': page,
|
||||||
'has_additional': more,
|
'has_additional': tag_history.more,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -73,9 +74,7 @@ class RepositoryTag(RepositoryParamResource):
|
||||||
'MoveTag': {
|
'MoveTag': {
|
||||||
'type': 'object',
|
'type': 'object',
|
||||||
'description': 'Description of to which image a new or existing tag should point',
|
'description': 'Description of to which image a new or existing tag should point',
|
||||||
'required': [
|
'required': ['image',],
|
||||||
'image',
|
|
||||||
],
|
|
||||||
'properties': {
|
'properties': {
|
||||||
'image': {
|
'image': {
|
||||||
'type': 'string',
|
'type': 'string',
|
||||||
|
@ -87,7 +86,6 @@ class RepositoryTag(RepositoryParamResource):
|
||||||
|
|
||||||
@require_repo_write
|
@require_repo_write
|
||||||
@disallow_for_app_repositories
|
@disallow_for_app_repositories
|
||||||
@disallow_under_trust
|
|
||||||
@nickname('changeTagImage')
|
@nickname('changeTagImage')
|
||||||
@validate_json_request('MoveTag')
|
@validate_json_request('MoveTag')
|
||||||
def put(self, namespace, repository, tag):
|
def put(self, namespace, repository, tag):
|
||||||
|
@ -128,7 +126,6 @@ class RepositoryTag(RepositoryParamResource):
|
||||||
|
|
||||||
@require_repo_write
|
@require_repo_write
|
||||||
@disallow_for_app_repositories
|
@disallow_for_app_repositories
|
||||||
@disallow_under_trust
|
|
||||||
@nickname('deleteFullTag')
|
@nickname('deleteFullTag')
|
||||||
def delete(self, namespace, repository, tag):
|
def delete(self, namespace, repository, tag):
|
||||||
""" Delete the specified repository tag. """
|
""" Delete the specified repository tag. """
|
||||||
|
@ -207,9 +204,7 @@ class RestoreTag(RepositoryParamResource):
|
||||||
'RestoreTag': {
|
'RestoreTag': {
|
||||||
'type': 'object',
|
'type': 'object',
|
||||||
'description': 'Restores a tag to a specific image',
|
'description': 'Restores a tag to a specific image',
|
||||||
'required': [
|
'required': ['image',],
|
||||||
'image',
|
|
||||||
],
|
|
||||||
'properties': {
|
'properties': {
|
||||||
'image': {
|
'image': {
|
||||||
'type': 'string',
|
'type': 'string',
|
||||||
|
@ -225,7 +220,6 @@ class RestoreTag(RepositoryParamResource):
|
||||||
|
|
||||||
@require_repo_write
|
@require_repo_write
|
||||||
@disallow_for_app_repositories
|
@disallow_for_app_repositories
|
||||||
@disallow_under_trust
|
|
||||||
@nickname('restoreTag')
|
@nickname('restoreTag')
|
||||||
@validate_json_request('RestoreTag')
|
@validate_json_request('RestoreTag')
|
||||||
def post(self, namespace, repository, tag):
|
def post(self, namespace, repository, tag):
|
||||||
|
@ -254,8 +248,8 @@ class RestoreTag(RepositoryParamResource):
|
||||||
if existing_image is not None:
|
if existing_image is not None:
|
||||||
log_data['original_image'] = existing_image.docker_image_id
|
log_data['original_image'] = existing_image.docker_image_id
|
||||||
|
|
||||||
log_action('revert_tag', namespace, log_data, repo=model.repository.get_repository(
|
log_action('revert_tag', namespace, log_data, repo=model.repository.get_repository(namespace,
|
||||||
namespace, repository))
|
repository))
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'image_id': image_id,
|
'image_id': image_id,
|
||||||
|
|
43
endpoints/api/tag_models_interface.py
Normal file
43
endpoints/api/tag_models_interface.py
Normal file
|
@ -0,0 +1,43 @@
|
||||||
|
from abc import ABCMeta, abstractmethod
|
||||||
|
from collections import namedtuple
|
||||||
|
|
||||||
|
from six import add_metaclass
|
||||||
|
|
||||||
|
|
||||||
|
class Tag(
|
||||||
|
namedtuple('Tag', [
|
||||||
|
'name', 'image', 'reversion', 'lifetime_start_ts', 'lifetime_end_ts', 'manifest_list',
|
||||||
|
'docker_image_id'
|
||||||
|
])):
|
||||||
|
"""
|
||||||
|
Tag represents a name to an image.
|
||||||
|
:type name: string
|
||||||
|
:type image: Image
|
||||||
|
:type reversion: boolean
|
||||||
|
:type lifetime_start_ts: int
|
||||||
|
:type lifetime_end_ts: int
|
||||||
|
:type manifest_list: [manifest_digest]
|
||||||
|
:type docker_image_id: string
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class RepositoryTagHistory(namedtuple('RepositoryTagHistory', ['tags', 'more'])):
|
||||||
|
"""
|
||||||
|
Tag represents a name to an image.
|
||||||
|
:type tags: [Tag]
|
||||||
|
:type more: boolean
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
@add_metaclass(ABCMeta)
|
||||||
|
class TagDataInterface(object):
|
||||||
|
"""
|
||||||
|
Interface that represents all data store interactions required by a Tag.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def list_repository_tag_history(self, namespace_name, repository_name, page=1, size=100,
|
||||||
|
specific_tag=None):
|
||||||
|
"""
|
||||||
|
Returns a RepositoryTagHistory with a list of historic tags and whether there are more tags then returned.
|
||||||
|
"""
|
30
endpoints/api/tag_models_pre_oci.py
Normal file
30
endpoints/api/tag_models_pre_oci.py
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
from data import model
|
||||||
|
from endpoints.api.tag_models_interface import TagDataInterface, Tag, RepositoryTagHistory
|
||||||
|
|
||||||
|
|
||||||
|
class PreOCIModel(TagDataInterface):
|
||||||
|
"""
|
||||||
|
PreOCIModel implements the data model for the Tags using a database schema
|
||||||
|
before it was changed to support the OCI specification.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def list_repository_tag_history(self, namespace_name, repository_name, page=1, size=100,
|
||||||
|
specific_tag=None):
|
||||||
|
repository = model.repository.get_repository(namespace_name, repository_name)
|
||||||
|
if repository is None:
|
||||||
|
return None
|
||||||
|
tags, manifest_map, more = model.tag.list_repository_tag_history(repository, page, size,
|
||||||
|
specific_tag)
|
||||||
|
repository_tag_history = []
|
||||||
|
for tag in tags:
|
||||||
|
manifest_list = None
|
||||||
|
if tag.id in manifest_map:
|
||||||
|
manifest_list = manifest_map[tag.id]
|
||||||
|
repository_tag_history.append(
|
||||||
|
Tag(name=tag.name, image=tag.image, reversion=tag.reversion,
|
||||||
|
lifetime_start_ts=tag.lifetime_start_ts, lifetime_end_ts=tag.lifetime_end_ts,
|
||||||
|
manifest_list=manifest_list, docker_image_id=tag.image.docker_image_id))
|
||||||
|
return RepositoryTagHistory(tags=repository_tag_history, more=more)
|
||||||
|
|
||||||
|
|
||||||
|
pre_oci_model = PreOCIModel()
|
|
@ -1,58 +1,10 @@
|
||||||
import datetime
|
from endpoints.test.shared import conduct_call
|
||||||
import json
|
|
||||||
|
|
||||||
from contextlib import contextmanager
|
|
||||||
from data import model
|
|
||||||
from endpoints.api import api
|
from endpoints.api import api
|
||||||
|
|
||||||
CSRF_TOKEN_KEY = '_csrf_token'
|
|
||||||
CSRF_TOKEN = '123csrfforme'
|
|
||||||
|
|
||||||
|
|
||||||
@contextmanager
|
|
||||||
def client_with_identity(auth_username, client):
|
|
||||||
with client.session_transaction() as sess:
|
|
||||||
if auth_username and auth_username is not None:
|
|
||||||
loaded = model.user.get_user(auth_username)
|
|
||||||
sess['user_id'] = loaded.uuid
|
|
||||||
sess['login_time'] = datetime.datetime.now()
|
|
||||||
sess[CSRF_TOKEN_KEY] = CSRF_TOKEN
|
|
||||||
else:
|
|
||||||
sess['user_id'] = 'anonymous'
|
|
||||||
|
|
||||||
yield client
|
|
||||||
|
|
||||||
with client.session_transaction() as sess:
|
|
||||||
sess['user_id'] = None
|
|
||||||
sess['login_time'] = None
|
|
||||||
sess[CSRF_TOKEN_KEY] = None
|
|
||||||
|
|
||||||
|
|
||||||
def add_csrf_param(params):
|
|
||||||
""" Returns a params dict with the CSRF parameter added. """
|
|
||||||
params = params or {}
|
|
||||||
params[CSRF_TOKEN_KEY] = CSRF_TOKEN
|
|
||||||
return params
|
|
||||||
|
|
||||||
|
|
||||||
def conduct_api_call(client, resource, method, params, body=None, expected_code=200):
|
def conduct_api_call(client, resource, method, params, body=None, expected_code=200):
|
||||||
""" Conducts an API call to the given resource via the given client, and ensures its returned
|
""" Conducts an API call to the given resource via the given client, and ensures its returned
|
||||||
status matches the code given.
|
status matches the code given.
|
||||||
|
|
||||||
Returns the response.
|
Returns the response.
|
||||||
"""
|
"""
|
||||||
params = add_csrf_param(params)
|
return conduct_call(client, resource, api.url_for, method, params, body, expected_code)
|
||||||
|
|
||||||
final_url = api.url_for(resource, **params)
|
|
||||||
|
|
||||||
headers = {}
|
|
||||||
headers.update({"Content-Type": "application/json"})
|
|
||||||
|
|
||||||
if body is not None:
|
|
||||||
body = json.dumps(body)
|
|
||||||
|
|
||||||
rv = client.open(final_url, method=method, data=body, headers=headers)
|
|
||||||
msg = '%s %s: got %s expected: %s | %s' % (method, final_url, rv.status_code, expected_code,
|
|
||||||
rv.data)
|
|
||||||
assert rv.status_code == expected_code, msg
|
|
||||||
return rv
|
|
||||||
|
|
|
@ -16,7 +16,8 @@ from endpoints.api.trigger import (BuildTriggerList, BuildTrigger, BuildTriggerS
|
||||||
BuildTriggerActivate, BuildTriggerAnalyze, ActivateBuildTrigger,
|
BuildTriggerActivate, BuildTriggerAnalyze, ActivateBuildTrigger,
|
||||||
TriggerBuildList, BuildTriggerFieldValues, BuildTriggerSources,
|
TriggerBuildList, BuildTriggerFieldValues, BuildTriggerSources,
|
||||||
BuildTriggerSourceNamespaces)
|
BuildTriggerSourceNamespaces)
|
||||||
from endpoints.api.test.shared import client_with_identity, conduct_api_call
|
from endpoints.api.test.shared import conduct_api_call
|
||||||
|
from endpoints.test.shared import client_with_identity
|
||||||
from test.fixtures import *
|
from test.fixtures import *
|
||||||
|
|
||||||
BUILD_ARGS = {'build_uuid': '1234'}
|
BUILD_ARGS = {'build_uuid': '1234'}
|
||||||
|
@ -45,6 +46,7 @@ FIELD_ARGS = {'trigger_uuid': '1234', 'field_name': 'foobar'}
|
||||||
(RepositoryNotificationList, 'post', None),
|
(RepositoryNotificationList, 'post', None),
|
||||||
(RepositoryNotification, 'get', NOTIFICATION_ARGS),
|
(RepositoryNotification, 'get', NOTIFICATION_ARGS),
|
||||||
(RepositoryNotification, 'delete', NOTIFICATION_ARGS),
|
(RepositoryNotification, 'delete', NOTIFICATION_ARGS),
|
||||||
|
(RepositoryNotification, 'post', NOTIFICATION_ARGS),
|
||||||
(TestRepositoryNotification, 'post', NOTIFICATION_ARGS),
|
(TestRepositoryNotification, 'post', NOTIFICATION_ARGS),
|
||||||
(RepositoryImageSecurity, 'get', IMAGE_ARGS),
|
(RepositoryImageSecurity, 'get', IMAGE_ARGS),
|
||||||
(RepositoryManifestSecurity, 'get', MANIFEST_ARGS),
|
(RepositoryManifestSecurity, 'get', MANIFEST_ARGS),
|
||||||
|
|
|
@ -1,50 +0,0 @@
|
||||||
import pytest
|
|
||||||
|
|
||||||
from data import model
|
|
||||||
from endpoints.api.build import RepositoryBuildList, RepositoryBuildResource
|
|
||||||
from endpoints.api.tag import RepositoryTag, RestoreTag
|
|
||||||
from endpoints.api.trigger import (BuildTrigger, BuildTriggerSubdirs,
|
|
||||||
BuildTriggerActivate, BuildTriggerAnalyze, ActivateBuildTrigger,
|
|
||||||
BuildTriggerFieldValues, BuildTriggerSources,
|
|
||||||
BuildTriggerSourceNamespaces)
|
|
||||||
from endpoints.api.test.shared import client_with_identity, conduct_api_call
|
|
||||||
from test.fixtures import *
|
|
||||||
|
|
||||||
BUILD_ARGS = {'build_uuid': '1234'}
|
|
||||||
IMAGE_ARGS = {'imageid': '1234', 'image_id': 1234}
|
|
||||||
MANIFEST_ARGS = {'manifestref': 'sha256:abcd1234'}
|
|
||||||
LABEL_ARGS = {'manifestref': 'sha256:abcd1234', 'labelid': '1234'}
|
|
||||||
NOTIFICATION_ARGS = {'uuid': '1234'}
|
|
||||||
TAG_ARGS = {'tag': 'foobar'}
|
|
||||||
TRIGGER_ARGS = {'trigger_uuid': '1234'}
|
|
||||||
FIELD_ARGS = {'trigger_uuid': '1234', 'field_name': 'foobar'}
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('resource, method, params', [
|
|
||||||
(RepositoryBuildList, 'post', None),
|
|
||||||
(RepositoryBuildResource, 'delete', BUILD_ARGS),
|
|
||||||
(RepositoryTag, 'put', TAG_ARGS),
|
|
||||||
(RepositoryTag, 'delete', TAG_ARGS),
|
|
||||||
(RestoreTag, 'post', TAG_ARGS),
|
|
||||||
(BuildTrigger, 'delete', TRIGGER_ARGS),
|
|
||||||
(BuildTriggerSubdirs, 'post', TRIGGER_ARGS),
|
|
||||||
(BuildTriggerActivate, 'post', TRIGGER_ARGS),
|
|
||||||
(BuildTriggerAnalyze, 'post', TRIGGER_ARGS),
|
|
||||||
(ActivateBuildTrigger, 'post', TRIGGER_ARGS),
|
|
||||||
(BuildTriggerFieldValues, 'post', FIELD_ARGS),
|
|
||||||
(BuildTriggerSources, 'post', TRIGGER_ARGS),
|
|
||||||
(BuildTriggerSourceNamespaces, 'get', TRIGGER_ARGS),
|
|
||||||
])
|
|
||||||
def test_disallowed_for_apps(resource, method, params, client):
|
|
||||||
namespace = 'devtable'
|
|
||||||
repository = 'somerepo'
|
|
||||||
|
|
||||||
devtable = model.user.get_user('devtable')
|
|
||||||
repo = model.repository.create_repository(namespace, repository, devtable, repo_kind='image')
|
|
||||||
model.repository.set_trust(repo, True)
|
|
||||||
|
|
||||||
params = params or {}
|
|
||||||
params['repository'] = '%s/%s' % (namespace, repository)
|
|
||||||
|
|
||||||
with client_with_identity('devtable', client) as cl:
|
|
||||||
conduct_api_call(cl, resource, method, params, None, 400)
|
|
||||||
|
|
103
endpoints/api/test/test_models_pre_oci.py
Normal file
103
endpoints/api/test/test_models_pre_oci.py
Normal file
|
@ -0,0 +1,103 @@
|
||||||
|
import pytest
|
||||||
|
from endpoints.api.tag_models_interface import RepositoryTagHistory, Tag
|
||||||
|
from mock import Mock
|
||||||
|
|
||||||
|
from data import model
|
||||||
|
from endpoints.api.tag_models_pre_oci import pre_oci_model
|
||||||
|
|
||||||
|
EMPTY_REPOSITORY = 'empty_repository'
|
||||||
|
EMPTY_NAMESPACE = 'empty_namespace'
|
||||||
|
BAD_REPOSITORY_NAME = 'bad_repository_name'
|
||||||
|
BAD_NAMESPACE_NAME = 'bad_namespace_name'
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def get_monkeypatch(monkeypatch):
|
||||||
|
return monkeypatch
|
||||||
|
|
||||||
|
|
||||||
|
def mock_out_get_repository(monkeypatch, namespace_name, repository_name):
|
||||||
|
def return_none(namespace_name, repository_name):
|
||||||
|
return None
|
||||||
|
|
||||||
|
def return_repository(namespace_name, repository_name):
|
||||||
|
return 'repository'
|
||||||
|
|
||||||
|
if namespace_name == BAD_NAMESPACE_NAME or repository_name == BAD_REPOSITORY_NAME:
|
||||||
|
return_function = return_none
|
||||||
|
else:
|
||||||
|
return_function = return_repository
|
||||||
|
|
||||||
|
monkeypatch.setattr(model.repository, 'get_repository', return_function)
|
||||||
|
|
||||||
|
|
||||||
|
def create_mock_tag(name, reversion, lifetime_start_ts, lifetime_end_ts, mock_id, docker_image_id,
|
||||||
|
manifest_list):
|
||||||
|
tag_mock = Mock()
|
||||||
|
tag_mock.name = name
|
||||||
|
image_mock = Mock()
|
||||||
|
image_mock.docker_image_id = docker_image_id
|
||||||
|
tag_mock.image = image_mock
|
||||||
|
tag_mock.reversion = reversion
|
||||||
|
tag_mock.lifetime_start_ts = lifetime_start_ts
|
||||||
|
tag_mock.lifetime_end_ts = lifetime_end_ts
|
||||||
|
tag_mock.id = mock_id
|
||||||
|
tag_mock.manifest_list = manifest_list
|
||||||
|
tag = Tag(name=name, reversion=reversion, image=image_mock, docker_image_id=docker_image_id,
|
||||||
|
lifetime_start_ts=lifetime_start_ts, lifetime_end_ts=lifetime_end_ts,
|
||||||
|
manifest_list=manifest_list)
|
||||||
|
return tag_mock, tag
|
||||||
|
|
||||||
|
|
||||||
|
first_mock, first_tag = create_mock_tag('tag1', 'rev1', 'start1', 'end1', 'id1',
|
||||||
|
'docker_image_id1', [])
|
||||||
|
second_mock, second_tag = create_mock_tag('tag2', 'rev2', 'start2', 'end2', 'id2',
|
||||||
|
'docker_image_id2', ['manifest'])
|
||||||
|
|
||||||
|
|
||||||
|
def mock_out_list_repository_tag_history(monkeypatch, namespace_name, repository_name, page, size,
|
||||||
|
specific_tag):
|
||||||
|
def list_empty_tag_history(repository, page, size, specific_tag):
|
||||||
|
return [], {}, False
|
||||||
|
|
||||||
|
def list_filled_tag_history(repository, page, size, specific_tag):
|
||||||
|
tags = [first_mock, second_mock]
|
||||||
|
return tags, {
|
||||||
|
first_mock.id: first_mock.manifest_list,
|
||||||
|
second_mock.id: second_mock.manifest_list
|
||||||
|
}, len(tags) > size
|
||||||
|
|
||||||
|
def list_only_second_tag(repository, page, size, specific_tag):
|
||||||
|
tags = [second_mock]
|
||||||
|
return tags, {second_mock.id: second_mock.manifest_list}, len(tags) > size
|
||||||
|
|
||||||
|
if namespace_name == EMPTY_NAMESPACE or repository_name == EMPTY_REPOSITORY:
|
||||||
|
return_function = list_empty_tag_history
|
||||||
|
else:
|
||||||
|
if specific_tag == 'tag2':
|
||||||
|
return_function = list_only_second_tag
|
||||||
|
else:
|
||||||
|
return_function = list_filled_tag_history
|
||||||
|
|
||||||
|
monkeypatch.setattr(model.tag, 'list_repository_tag_history', return_function)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
'expected, namespace_name, repository_name, page, size, specific_tag', [
|
||||||
|
(None, BAD_NAMESPACE_NAME, 'repository_name', 1, 100, None),
|
||||||
|
(None, 'namespace_name', BAD_REPOSITORY_NAME, 1, 100, None),
|
||||||
|
(RepositoryTagHistory(tags=[], more=False), EMPTY_NAMESPACE, EMPTY_REPOSITORY, 1, 100, None),
|
||||||
|
(RepositoryTagHistory(tags=[first_tag, second_tag], more=False), 'namespace', 'repository', 1,
|
||||||
|
100, None),
|
||||||
|
(RepositoryTagHistory(tags=[first_tag, second_tag], more=True), 'namespace', 'repository', 1,
|
||||||
|
1, None),
|
||||||
|
(RepositoryTagHistory(tags=[second_tag], more=False), 'namespace', 'repository', 1, 100,
|
||||||
|
'tag2'),
|
||||||
|
])
|
||||||
|
def test_list_repository_tag_history(expected, namespace_name, repository_name, page, size,
|
||||||
|
specific_tag, get_monkeypatch):
|
||||||
|
mock_out_get_repository(get_monkeypatch, namespace_name, repository_name)
|
||||||
|
mock_out_list_repository_tag_history(get_monkeypatch, namespace_name, repository_name, page,
|
||||||
|
size, specific_tag)
|
||||||
|
assert pre_oci_model.list_repository_tag_history(namespace_name, repository_name, page, size,
|
||||||
|
specific_tag) == expected
|
|
@ -2,8 +2,9 @@ import pytest
|
||||||
|
|
||||||
from data import model
|
from data import model
|
||||||
from endpoints.api import api
|
from endpoints.api import api
|
||||||
from endpoints.api.test.shared import client_with_identity, conduct_api_call
|
from endpoints.api.test.shared import conduct_api_call
|
||||||
from endpoints.api.organization import Organization
|
from endpoints.api.organization import Organization
|
||||||
|
from endpoints.test.shared import client_with_identity
|
||||||
from test.fixtures import *
|
from test.fixtures import *
|
||||||
|
|
||||||
@pytest.mark.parametrize('expiration, expected_code', [
|
@pytest.mark.parametrize('expiration, expected_code', [
|
||||||
|
|
|
@ -2,8 +2,9 @@ import pytest
|
||||||
|
|
||||||
from mock import patch, ANY, MagicMock
|
from mock import patch, ANY, MagicMock
|
||||||
|
|
||||||
from endpoints.api.test.shared import client_with_identity, conduct_api_call
|
from endpoints.api.test.shared import conduct_api_call
|
||||||
from endpoints.api.repository import RepositoryTrust, Repository
|
from endpoints.api.repository import RepositoryTrust, Repository
|
||||||
|
from endpoints.test.shared import client_with_identity
|
||||||
from features import FeatureNameValue
|
from features import FeatureNameValue
|
||||||
|
|
||||||
from test.fixtures import *
|
from test.fixtures import *
|
||||||
|
@ -52,8 +53,8 @@ def test_signing_disabled(client):
|
||||||
params = {'repository': 'devtable/simple'}
|
params = {'repository': 'devtable/simple'}
|
||||||
response = conduct_api_call(cl, Repository, 'GET', params).json
|
response = conduct_api_call(cl, Repository, 'GET', params).json
|
||||||
assert not response['trust_enabled']
|
assert not response['trust_enabled']
|
||||||
|
|
||||||
|
|
||||||
def test_sni_support():
|
def test_sni_support():
|
||||||
import ssl
|
import ssl
|
||||||
assert ssl.HAS_SNI
|
assert ssl.HAS_SNI
|
||||||
|
|
|
@ -4,32 +4,33 @@ from playhouse.test_utils import assert_query_count
|
||||||
|
|
||||||
from data.model import _basequery
|
from data.model import _basequery
|
||||||
from endpoints.api.search import ConductRepositorySearch, ConductSearch
|
from endpoints.api.search import ConductRepositorySearch, ConductSearch
|
||||||
from endpoints.api.test.shared import client_with_identity, conduct_api_call
|
from endpoints.api.test.shared import conduct_api_call
|
||||||
|
from endpoints.test.shared import client_with_identity
|
||||||
from test.fixtures import *
|
from test.fixtures import *
|
||||||
|
|
||||||
@pytest.mark.parametrize('query, expected_query_count', [
|
@pytest.mark.parametrize('query', [
|
||||||
('simple', 7),
|
('simple'),
|
||||||
('public', 6),
|
('public'),
|
||||||
('repository', 6),
|
('repository'),
|
||||||
])
|
])
|
||||||
def test_repository_search(query, expected_query_count, client):
|
def test_repository_search(query, client):
|
||||||
with client_with_identity('devtable', client) as cl:
|
with client_with_identity('devtable', client) as cl:
|
||||||
params = {'query': query}
|
params = {'query': query}
|
||||||
with assert_query_count(expected_query_count):
|
with assert_query_count(6):
|
||||||
result = conduct_api_call(cl, ConductRepositorySearch, 'GET', params, None, 200).json
|
result = conduct_api_call(cl, ConductRepositorySearch, 'GET', params, None, 200).json
|
||||||
assert result['start_index'] == 0
|
assert result['start_index'] == 0
|
||||||
assert result['page'] == 1
|
assert result['page'] == 1
|
||||||
assert len(result['results'])
|
assert len(result['results'])
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('query, expected_query_count', [
|
@pytest.mark.parametrize('query', [
|
||||||
('simple', 8),
|
('simple'),
|
||||||
('public', 8),
|
('public'),
|
||||||
('repository', 8),
|
('repository'),
|
||||||
])
|
])
|
||||||
def test_search_query_count(query, expected_query_count, client):
|
def test_search_query_count(query, client):
|
||||||
with client_with_identity('devtable', client) as cl:
|
with client_with_identity('devtable', client) as cl:
|
||||||
params = {'query': query}
|
params = {'query': query}
|
||||||
with assert_query_count(expected_query_count):
|
with assert_query_count(8):
|
||||||
result = conduct_api_call(cl, ConductSearch, 'GET', params, None, 200).json
|
result = conduct_api_call(cl, ConductSearch, 'GET', params, None, 200).json
|
||||||
assert len(result['results'])
|
assert len(result['results'])
|
||||||
|
|
|
@ -2,13 +2,15 @@ import pytest
|
||||||
from flask_principal import AnonymousIdentity
|
from flask_principal import AnonymousIdentity
|
||||||
|
|
||||||
from endpoints.api import api
|
from endpoints.api import api
|
||||||
|
from endpoints.api.repositorynotification import RepositoryNotification
|
||||||
from endpoints.api.team import OrganizationTeamSyncing
|
from endpoints.api.team import OrganizationTeamSyncing
|
||||||
from endpoints.api.test.shared import client_with_identity, conduct_api_call
|
from endpoints.api.test.shared import conduct_api_call
|
||||||
from endpoints.api.repository import RepositoryTrust
|
from endpoints.api.repository import RepositoryTrust
|
||||||
from endpoints.api.signing import RepositorySignatures
|
from endpoints.api.signing import RepositorySignatures
|
||||||
from endpoints.api.search import ConductRepositorySearch
|
from endpoints.api.search import ConductRepositorySearch
|
||||||
from endpoints.api.superuser import SuperUserRepositoryBuildLogs, SuperUserRepositoryBuildResource
|
from endpoints.api.superuser import SuperUserRepositoryBuildLogs, SuperUserRepositoryBuildResource
|
||||||
from endpoints.api.superuser import SuperUserRepositoryBuildStatus
|
from endpoints.api.superuser import SuperUserRepositoryBuildStatus
|
||||||
|
from endpoints.test.shared import client_with_identity
|
||||||
|
|
||||||
from test.fixtures import *
|
from test.fixtures import *
|
||||||
|
|
||||||
|
@ -16,6 +18,8 @@ TEAM_PARAMS = {'orgname': 'buynlarge', 'teamname': 'owners'}
|
||||||
BUILD_PARAMS = {'build_uuid': 'test-1234'}
|
BUILD_PARAMS = {'build_uuid': 'test-1234'}
|
||||||
REPO_PARAMS = {'repository': 'devtable/someapp'}
|
REPO_PARAMS = {'repository': 'devtable/someapp'}
|
||||||
SEARCH_PARAMS = {'query': ''}
|
SEARCH_PARAMS = {'query': ''}
|
||||||
|
NOTIFICATION_PARAMS = {'namespace': 'devtable', 'repository': 'devtable/simple', 'uuid': 'some uuid'}
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('resource,method,params,body,identity,expected', [
|
@pytest.mark.parametrize('resource,method,params,body,identity,expected', [
|
||||||
(OrganizationTeamSyncing, 'POST', TEAM_PARAMS, {}, None, 403),
|
(OrganizationTeamSyncing, 'POST', TEAM_PARAMS, {}, None, 403),
|
||||||
|
@ -52,6 +56,11 @@ SEARCH_PARAMS = {'query': ''}
|
||||||
(RepositorySignatures, 'GET', REPO_PARAMS, {}, 'reader', 403),
|
(RepositorySignatures, 'GET', REPO_PARAMS, {}, 'reader', 403),
|
||||||
(RepositorySignatures, 'GET', REPO_PARAMS, {}, 'devtable', 404),
|
(RepositorySignatures, 'GET', REPO_PARAMS, {}, 'devtable', 404),
|
||||||
|
|
||||||
|
(RepositoryNotification, 'POST', NOTIFICATION_PARAMS, {}, None, 403),
|
||||||
|
(RepositoryNotification, 'POST', NOTIFICATION_PARAMS, {}, 'freshuser', 403),
|
||||||
|
(RepositoryNotification, 'POST', NOTIFICATION_PARAMS, {}, 'reader', 403),
|
||||||
|
(RepositoryNotification, 'POST', NOTIFICATION_PARAMS, {}, 'devtable', 204),
|
||||||
|
|
||||||
(RepositoryTrust, 'POST', REPO_PARAMS, {'trust_enabled': True}, None, 403),
|
(RepositoryTrust, 'POST', REPO_PARAMS, {'trust_enabled': True}, None, 403),
|
||||||
(RepositoryTrust, 'POST', REPO_PARAMS, {'trust_enabled': True}, 'freshuser', 403),
|
(RepositoryTrust, 'POST', REPO_PARAMS, {'trust_enabled': True}, 'freshuser', 403),
|
||||||
(RepositoryTrust, 'POST', REPO_PARAMS, {'trust_enabled': True}, 'reader', 403),
|
(RepositoryTrust, 'POST', REPO_PARAMS, {'trust_enabled': True}, 'reader', 403),
|
||||||
|
|
|
@ -3,42 +3,53 @@ import pytest
|
||||||
from collections import Counter
|
from collections import Counter
|
||||||
from mock import patch
|
from mock import patch
|
||||||
|
|
||||||
from endpoints.api.test.shared import client_with_identity, conduct_api_call
|
from endpoints.api.test.shared import conduct_api_call
|
||||||
from endpoints.api.signing import RepositorySignatures
|
from endpoints.api.signing import RepositorySignatures
|
||||||
|
from endpoints.test.shared import client_with_identity
|
||||||
|
|
||||||
from test.fixtures import *
|
from test.fixtures import *
|
||||||
|
|
||||||
VALID_TARGETS = {
|
VALID_TARGETS_MAP = {
|
||||||
'latest': {
|
"targets/ci": {
|
||||||
'hashes': {
|
"targets": {
|
||||||
'sha256': 'mLmxwTyUrqIRDaz8uaBapfrp3GPERfsDg2kiMujlteo='
|
"latest": {
|
||||||
|
"hashes": {
|
||||||
|
"sha256": "2Q8GLEgX62VBWeL76axFuDj/Z1dd6Zhx0ZDM6kNwPkQ="
|
||||||
|
},
|
||||||
|
"length": 2111
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"expiration": "2020-05-22T10:26:46.618176424-04:00"
|
||||||
},
|
},
|
||||||
'length': 1500
|
"targets": {
|
||||||
},
|
"targets": {
|
||||||
'test_tag': {
|
"latest": {
|
||||||
'hashes': {
|
"hashes": {
|
||||||
'sha256': '1234123'
|
"sha256": "2Q8GLEgX62VBWeL76axFuDj/Z1dd6Zhx0ZDM6kNwPkQ="
|
||||||
},
|
},
|
||||||
'length': 50
|
"length": 2111
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"expiration": "2020-05-22T10:26:01.953414888-04:00"}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
def tags_equal(expected, actual):
|
def tags_equal(expected, actual):
|
||||||
expected_tags = expected.get('tags')
|
expected_tags = expected.get('delegations')
|
||||||
actual_tags = actual.get('tags')
|
actual_tags = actual.get('delegations')
|
||||||
if expected_tags and actual_tags:
|
if expected_tags and actual_tags:
|
||||||
return Counter(expected_tags) == Counter(actual_tags)
|
return Counter(expected_tags) == Counter(actual_tags)
|
||||||
return expected == actual
|
return expected == actual
|
||||||
|
|
||||||
@pytest.mark.parametrize('targets,expected', [
|
@pytest.mark.parametrize('targets_map,expected', [
|
||||||
(VALID_TARGETS, {'tags': VALID_TARGETS, 'expiration': 'expires'}),
|
(VALID_TARGETS_MAP, {'delegations': VALID_TARGETS_MAP}),
|
||||||
({'bad': 'tags'}, {'tags': {'bad': 'tags'}, 'expiration': 'expires'}),
|
({'bad': 'tags'}, {'delegations': {'bad': 'tags'}}),
|
||||||
({}, {'tags': {}, 'expiration': 'expires'}),
|
({}, {'delegations': {}}),
|
||||||
(None, {'tags': None, 'expiration': 'expires'}), # API returns None on exceptions
|
(None, {'delegations': None}), # API returns None on exceptions
|
||||||
])
|
])
|
||||||
def test_get_signatures(targets, expected, client):
|
def test_get_signatures(targets_map, expected, client):
|
||||||
with patch('endpoints.api.signing.tuf_metadata_api') as mock_tuf:
|
with patch('endpoints.api.signing.tuf_metadata_api') as mock_tuf:
|
||||||
mock_tuf.get_default_tags_with_expiration.return_value = (targets, 'expires')
|
mock_tuf.get_all_tags_with_expiration.return_value = targets_map
|
||||||
with client_with_identity('devtable', client) as cl:
|
with client_with_identity('devtable', client) as cl:
|
||||||
params = {'repository': 'devtable/trusted'}
|
params = {'repository': 'devtable/trusted'}
|
||||||
assert tags_equal(expected, conduct_api_call(cl, RepositorySignatures, 'GET', params, None, 200).json)
|
assert tags_equal(expected, conduct_api_call(cl, RepositorySignatures, 'GET', params, None, 200).json)
|
||||||
|
|
|
@ -1,9 +1,15 @@
|
||||||
|
import json
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from mock import patch, Mock
|
from mock import patch, Mock, MagicMock, call
|
||||||
|
|
||||||
|
|
||||||
|
from endpoints.api.tag_models_interface import RepositoryTagHistory, Tag
|
||||||
|
from endpoints.api.test.shared import conduct_api_call
|
||||||
|
from endpoints.test.shared import client_with_identity
|
||||||
|
from endpoints.api.tag import RepositoryTag, RestoreTag, ListRepositoryTags
|
||||||
|
|
||||||
from endpoints.api.test.shared import client_with_identity, conduct_api_call
|
|
||||||
from endpoints.api.tag import RepositoryTag, RestoreTag
|
|
||||||
from features import FeatureNameValue
|
from features import FeatureNameValue
|
||||||
|
|
||||||
from test.fixtures import *
|
from test.fixtures import *
|
||||||
|
@ -80,6 +86,28 @@ def authd_client(client):
|
||||||
yield cl
|
yield cl
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture()
|
||||||
|
def list_repository_tag_history():
|
||||||
|
def list_repository_tag_history(namespace_name, repository_name, page, size, specific_tag):
|
||||||
|
return RepositoryTagHistory(tags=[
|
||||||
|
Tag(name='First Tag', image='image', reversion=False, lifetime_start_ts=0, lifetime_end_ts=0, manifest_list=[],
|
||||||
|
docker_image_id='first docker image id'),
|
||||||
|
Tag(name='Second Tag', image='second image', reversion=True, lifetime_start_ts=10, lifetime_end_ts=100,
|
||||||
|
manifest_list=[], docker_image_id='second docker image id')], more=False)
|
||||||
|
|
||||||
|
with patch('endpoints.api.tag.pre_oci_model.list_repository_tag_history', side_effect=list_repository_tag_history):
|
||||||
|
yield
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture()
|
||||||
|
def find_no_repo_tag_history():
|
||||||
|
def list_repository_tag_history(namespace_name, repository_name, page, size, specific_tag):
|
||||||
|
return None
|
||||||
|
|
||||||
|
with patch('endpoints.api.tag.pre_oci_model.list_repository_tag_history', side_effect=list_repository_tag_history):
|
||||||
|
yield
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('test_image,test_tag,expected_status', [
|
@pytest.mark.parametrize('test_image,test_tag,expected_status', [
|
||||||
('image1', '-INVALID-TAG-NAME', 400),
|
('image1', '-INVALID-TAG-NAME', 400),
|
||||||
('image1', '.INVALID-TAG-NAME', 400),
|
('image1', '.INVALID-TAG-NAME', 400),
|
||||||
|
@ -93,7 +121,7 @@ def authd_client(client):
|
||||||
])
|
])
|
||||||
def test_move_tag(test_image, test_tag, expected_status, get_repo_image, get_repo_tag_image,
|
def test_move_tag(test_image, test_tag, expected_status, get_repo_image, get_repo_tag_image,
|
||||||
create_or_update_tag, generate_manifest, authd_client):
|
create_or_update_tag, generate_manifest, authd_client):
|
||||||
params = {'repository': 'devtable/repo', 'tag': test_tag}
|
params = {'repository': 'devtable/simple', 'tag': test_tag}
|
||||||
request_body = {'image': test_image}
|
request_body = {'image': test_image}
|
||||||
if expected_status is None:
|
if expected_status is None:
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(Exception):
|
||||||
|
@ -102,6 +130,62 @@ def test_move_tag(test_image, test_tag, expected_status, get_repo_image, get_rep
|
||||||
conduct_api_call(authd_client, RepositoryTag, 'put', params, request_body, expected_status)
|
conduct_api_call(authd_client, RepositoryTag, 'put', params, request_body, expected_status)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('namespace, repository, specific_tag, page, limit, expected_response_code, expected', [
|
||||||
|
('devtable', 'simple', None, 1, 10, 200, {'has_additional': False}),
|
||||||
|
('devtable', 'simple', None, 1, 10, 200, {'page': 1}),
|
||||||
|
('devtable', 'simple', None, 1, 10, 200, {'tags': [{'docker_image_id': 'first docker image id',
|
||||||
|
'name': 'First Tag',
|
||||||
|
'reversion': False},
|
||||||
|
{'docker_image_id': 'second docker image id',
|
||||||
|
'end_ts': 100,
|
||||||
|
'name': 'Second Tag',
|
||||||
|
'reversion': True,
|
||||||
|
'start_ts': 10}]}),
|
||||||
|
])
|
||||||
|
def test_list_repository_tags_view_is_correct(namespace, repository, specific_tag, page, limit,
|
||||||
|
list_repository_tag_history, expected_response_code, expected,
|
||||||
|
authd_client):
|
||||||
|
params = {'repository': namespace + '/' + repository, 'specificTag': specific_tag, 'page': page, 'limit': limit}
|
||||||
|
response = conduct_api_call(authd_client, ListRepositoryTags, 'get', params, expected_code=expected_response_code)
|
||||||
|
compare_list_history_tags_response(expected, response.json)
|
||||||
|
|
||||||
|
|
||||||
|
def compare_list_history_tags_response(expected, actual):
|
||||||
|
if 'has_additional' in expected:
|
||||||
|
assert expected['has_additional'] == actual['has_additional']
|
||||||
|
|
||||||
|
if 'page' in expected:
|
||||||
|
assert expected['page'] == actual['page']
|
||||||
|
|
||||||
|
if 'tags' in expected:
|
||||||
|
assert expected['tags'] == actual['tags']
|
||||||
|
|
||||||
|
|
||||||
|
def test_no_repo_tag_history(find_no_repo_tag_history, authd_client):
|
||||||
|
params = {'repository': 'devtable/simple', 'specificTag': None, 'page': 1, 'limit': 10}
|
||||||
|
conduct_api_call(authd_client, ListRepositoryTags, 'get', params, expected_code=404)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
'specific_tag, page, limit, expected_specific_tag, expected_page, expected_limit', [
|
||||||
|
(None, None, None, None, 1, 50),
|
||||||
|
('specific_tag', 12, 13, 'specific_tag', 12, 13),
|
||||||
|
('specific_tag', -1, 101, 'specific_tag', 1, 100),
|
||||||
|
('specific_tag', 0, 0, 'specific_tag', 1, 1),
|
||||||
|
])
|
||||||
|
def test_repo_tag_history_param_parse(specific_tag, page, limit, expected_specific_tag, expected_page, expected_limit,
|
||||||
|
authd_client):
|
||||||
|
mock = MagicMock()
|
||||||
|
mock.return_value = RepositoryTagHistory(tags=[], more=False)
|
||||||
|
|
||||||
|
with patch('endpoints.api.tag.pre_oci_model.list_repository_tag_history', side_effect=mock):
|
||||||
|
params = {'repository': 'devtable/simple', 'specificTag': specific_tag, 'page': page, 'limit': limit}
|
||||||
|
conduct_api_call(authd_client, ListRepositoryTags, 'get', params)
|
||||||
|
|
||||||
|
assert mock.call_args == call(namespace_name='devtable', repository_name='simple',
|
||||||
|
page=expected_page, size=expected_limit, specific_tag=expected_specific_tag)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('test_manifest,test_tag,manifest_generated,expected_status', [
|
@pytest.mark.parametrize('test_manifest,test_tag,manifest_generated,expected_status', [
|
||||||
(None, 'newtag', True, 200),
|
(None, 'newtag', True, 200),
|
||||||
(None, 'generatemanifestfail', True, None),
|
(None, 'generatemanifestfail', True, None),
|
||||||
|
@ -110,7 +194,7 @@ def test_move_tag(test_image, test_tag, expected_status, get_repo_image, get_rep
|
||||||
def test_restore_tag(test_manifest, test_tag, manifest_generated, expected_status, get_repository,
|
def test_restore_tag(test_manifest, test_tag, manifest_generated, expected_status, get_repository,
|
||||||
restore_tag_to_manifest, restore_tag_to_image, generate_manifest,
|
restore_tag_to_manifest, restore_tag_to_image, generate_manifest,
|
||||||
authd_client):
|
authd_client):
|
||||||
params = {'repository': 'devtable/repo', 'tag': test_tag}
|
params = {'repository': 'devtable/simple', 'tag': test_tag}
|
||||||
request_body = {'image': 'image1'}
|
request_body = {'image': 'image1'}
|
||||||
if test_manifest is not None:
|
if test_manifest is not None:
|
||||||
request_body['manifest_digest'] = test_manifest
|
request_body['manifest_digest'] = test_manifest
|
||||||
|
@ -121,4 +205,4 @@ def test_restore_tag(test_manifest, test_tag, manifest_generated, expected_statu
|
||||||
conduct_api_call(authd_client, RestoreTag, 'post', params, request_body, expected_status)
|
conduct_api_call(authd_client, RestoreTag, 'post', params, request_body, expected_status)
|
||||||
|
|
||||||
if manifest_generated:
|
if manifest_generated:
|
||||||
generate_manifest.assert_called_with('devtable', 'repo', test_tag)
|
generate_manifest.assert_called_with('devtable', 'simple', test_tag)
|
||||||
|
|
|
@ -4,9 +4,11 @@ from mock import patch
|
||||||
|
|
||||||
from data import model
|
from data import model
|
||||||
from endpoints.api import api
|
from endpoints.api import api
|
||||||
from endpoints.api.test.shared import client_with_identity, conduct_api_call
|
from endpoints.api.test.shared import conduct_api_call
|
||||||
from endpoints.api.team import OrganizationTeamSyncing, TeamMemberList
|
from endpoints.api.team import OrganizationTeamSyncing, TeamMemberList
|
||||||
from endpoints.api.organization import Organization
|
from endpoints.api.organization import Organization
|
||||||
|
from endpoints.test.shared import client_with_identity
|
||||||
|
|
||||||
from test.test_ldap import mock_ldap
|
from test.test_ldap import mock_ldap
|
||||||
|
|
||||||
from test.fixtures import *
|
from test.fixtures import *
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from endpoints.api.trigger import is_parent
|
from endpoints.api.trigger_analyzer import is_parent
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('context,dockerfile_path,expected', [
|
@pytest.mark.parametrize('context,dockerfile_path,expected', [
|
||||||
|
|
152
endpoints/api/test/test_trigger_analyzer.py
Normal file
152
endpoints/api/test/test_trigger_analyzer.py
Normal file
|
@ -0,0 +1,152 @@
|
||||||
|
import pytest
|
||||||
|
from mock import Mock
|
||||||
|
|
||||||
|
from auth import permissions
|
||||||
|
from data import model
|
||||||
|
from endpoints.api.trigger_analyzer import TriggerAnalyzer
|
||||||
|
from util import dockerfileparse
|
||||||
|
|
||||||
|
BAD_PATH = "\"server_hostname/\" is not a valid Quay repository path"
|
||||||
|
|
||||||
|
EMPTY_CONF = {}
|
||||||
|
|
||||||
|
GOOD_CONF = {'context': '/', 'dockerfile_path': '/file'}
|
||||||
|
|
||||||
|
BAD_CONF = {'context': 'context', 'dockerfile_path': 'dockerfile_path'}
|
||||||
|
|
||||||
|
ONE_ROBOT = {'can_read': False, 'is_robot': True, 'kind': 'user', 'name': 'name'}
|
||||||
|
|
||||||
|
DOCKERFILE_NOT_CHILD = 'Dockerfile, context, is not a child of the context, dockerfile_path.'
|
||||||
|
|
||||||
|
THE_DOCKERFILE_SPECIFIED = 'Could not parse the Dockerfile specified'
|
||||||
|
|
||||||
|
DOCKERFILE_PATH_NOT_FOUND = 'Specified Dockerfile path for the trigger was not found on the main branch. This trigger may fail.'
|
||||||
|
|
||||||
|
NO_FROM_LINE = 'No FROM line found in the Dockerfile'
|
||||||
|
|
||||||
|
REPO_NOT_FOUND = 'Repository "server_hostname/path/file" referenced by the Dockerfile was not found'
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def get_monkeypatch(monkeypatch):
|
||||||
|
return monkeypatch
|
||||||
|
|
||||||
|
|
||||||
|
def patch_permissions(monkeypatch, can_read=False):
|
||||||
|
def can_read_fn(base_namespace, base_repository):
|
||||||
|
return can_read
|
||||||
|
|
||||||
|
monkeypatch.setattr(permissions, 'ReadRepositoryPermission', can_read_fn)
|
||||||
|
|
||||||
|
|
||||||
|
def patch_list_namespace_robots(monkeypatch):
|
||||||
|
my_mock = Mock()
|
||||||
|
my_mock.configure_mock(**{'username': 'name'})
|
||||||
|
return_value = [my_mock]
|
||||||
|
|
||||||
|
def return_list_mocks(namesapce):
|
||||||
|
return return_value
|
||||||
|
|
||||||
|
monkeypatch.setattr(model.user, 'list_namespace_robots', return_list_mocks)
|
||||||
|
return return_value
|
||||||
|
|
||||||
|
|
||||||
|
def patch_get_all_repo_users_transitive(monkeypatch):
|
||||||
|
my_mock = Mock()
|
||||||
|
my_mock.configure_mock(**{'username': 'name'})
|
||||||
|
return_value = [my_mock]
|
||||||
|
|
||||||
|
def return_get_mocks(namesapce, image_repostiory):
|
||||||
|
return return_value
|
||||||
|
|
||||||
|
monkeypatch.setattr(model.user, 'get_all_repo_users_transitive', return_get_mocks)
|
||||||
|
return return_value
|
||||||
|
|
||||||
|
|
||||||
|
def patch_parse_dockerfile(monkeypatch, get_base_image):
|
||||||
|
if get_base_image is not None:
|
||||||
|
def return_return_value(content):
|
||||||
|
parse_mock = Mock()
|
||||||
|
parse_mock.configure_mock(**{'get_base_image': get_base_image})
|
||||||
|
return parse_mock
|
||||||
|
|
||||||
|
monkeypatch.setattr(dockerfileparse, "parse_dockerfile", return_return_value)
|
||||||
|
else:
|
||||||
|
def return_return_value(content):
|
||||||
|
return get_base_image
|
||||||
|
|
||||||
|
monkeypatch.setattr(dockerfileparse, "parse_dockerfile", return_return_value)
|
||||||
|
|
||||||
|
|
||||||
|
def patch_model_repository_get_repository(monkeypatch, get_repository):
|
||||||
|
if get_repository is not None:
|
||||||
|
|
||||||
|
def mock_get_repository(base_namespace, base_repository):
|
||||||
|
vis_mock = Mock()
|
||||||
|
vis_mock.name = get_repository
|
||||||
|
get_repo_mock = Mock(visibility=vis_mock)
|
||||||
|
|
||||||
|
|
||||||
|
return get_repo_mock
|
||||||
|
|
||||||
|
else:
|
||||||
|
def mock_get_repository(base_namespace, base_repository):
|
||||||
|
return None
|
||||||
|
|
||||||
|
monkeypatch.setattr(model.repository, "get_repository", mock_get_repository)
|
||||||
|
|
||||||
|
|
||||||
|
def return_none():
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def return_content():
|
||||||
|
return Mock()
|
||||||
|
|
||||||
|
|
||||||
|
def return_server_hostname():
|
||||||
|
return "server_hostname/"
|
||||||
|
|
||||||
|
|
||||||
|
def return_non_server_hostname():
|
||||||
|
return "slime"
|
||||||
|
|
||||||
|
|
||||||
|
def return_path():
|
||||||
|
return "server_hostname/path/file"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
'handler_fn, config_dict, admin_org_permission, status, message, get_base_image, robots, server_hostname, get_repository, can_read, namespace, name', [
|
||||||
|
(return_none, EMPTY_CONF, False, "warning", DOCKERFILE_PATH_NOT_FOUND, None, [], None, None, False, "namespace", None),
|
||||||
|
(return_none, EMPTY_CONF, True, "warning", DOCKERFILE_PATH_NOT_FOUND, None, [ONE_ROBOT], None, None, False, "namespace", None),
|
||||||
|
(return_content, BAD_CONF, False, "error", THE_DOCKERFILE_SPECIFIED, None, [], None, None, False, "namespace", None),
|
||||||
|
(return_none, EMPTY_CONF, False, "warning", DOCKERFILE_PATH_NOT_FOUND, return_none, [], None, None, False, "namespace", None),
|
||||||
|
(return_none, EMPTY_CONF, True, "warning", DOCKERFILE_PATH_NOT_FOUND, return_none, [ONE_ROBOT], None, None, False, "namespace", None),
|
||||||
|
(return_content, BAD_CONF, False, "error", DOCKERFILE_NOT_CHILD, return_none, [], None, None, False, "namespace", None),
|
||||||
|
(return_content, GOOD_CONF, False, "warning", NO_FROM_LINE, return_none, [], None, None, False, "namespace", None),
|
||||||
|
(return_content, GOOD_CONF, False, "publicbase", None, return_non_server_hostname, [], "server_hostname", None, False, "namespace", None),
|
||||||
|
(return_content, GOOD_CONF, False, "warning", BAD_PATH, return_server_hostname, [], "server_hostname", None, False, "namespace", None),
|
||||||
|
(return_content, GOOD_CONF, False, "error", REPO_NOT_FOUND, return_path, [], "server_hostname", None, False, "namespace", None),
|
||||||
|
(return_content, GOOD_CONF, False, "error", REPO_NOT_FOUND, return_path, [], "server_hostname", "nonpublic", False, "namespace", None),
|
||||||
|
(return_content, GOOD_CONF, False, "requiresrobot", None, return_path, [], "server_hostname", "nonpublic", True, "path", "file"),
|
||||||
|
(return_content, GOOD_CONF, False, "publicbase", None, return_path, [], "server_hostname", "public", True, "path", "file"),
|
||||||
|
|
||||||
|
])
|
||||||
|
def test_trigger_analyzer(handler_fn, config_dict, admin_org_permission, status, message, get_base_image, robots,
|
||||||
|
server_hostname, get_repository, can_read, namespace, name,
|
||||||
|
get_monkeypatch):
|
||||||
|
patch_list_namespace_robots(get_monkeypatch)
|
||||||
|
patch_get_all_repo_users_transitive(get_monkeypatch)
|
||||||
|
patch_parse_dockerfile(get_monkeypatch, get_base_image)
|
||||||
|
patch_model_repository_get_repository(get_monkeypatch, get_repository)
|
||||||
|
patch_permissions(get_monkeypatch, can_read)
|
||||||
|
handler_mock = Mock()
|
||||||
|
handler_mock.configure_mock(**{'load_dockerfile_contents': handler_fn})
|
||||||
|
trigger_analyzer = TriggerAnalyzer(handler_mock, 'namespace', server_hostname, config_dict, admin_org_permission)
|
||||||
|
assert trigger_analyzer.analyze_trigger() == {'namespace': namespace,
|
||||||
|
'name': name,
|
||||||
|
'robots': robots,
|
||||||
|
'status': status,
|
||||||
|
'message': message,
|
||||||
|
'is_admin': admin_org_permission}
|
|
@ -1,6 +1,5 @@
|
||||||
""" Create, list and manage build triggers. """
|
""" Create, list and manage build triggers. """
|
||||||
|
|
||||||
import json
|
|
||||||
import logging
|
import logging
|
||||||
from os import path
|
from os import path
|
||||||
from urllib import quote
|
from urllib import quote
|
||||||
|
@ -20,11 +19,11 @@ from data.model.build import update_build_trigger
|
||||||
from endpoints.api import (RepositoryParamResource, nickname, resource, require_repo_admin,
|
from endpoints.api import (RepositoryParamResource, nickname, resource, require_repo_admin,
|
||||||
log_action, request_error, query_param, parse_args, internal_only,
|
log_action, request_error, query_param, parse_args, internal_only,
|
||||||
validate_json_request, api, path_param, abort,
|
validate_json_request, api, path_param, abort,
|
||||||
disallow_for_app_repositories, disallow_under_trust)
|
disallow_for_app_repositories)
|
||||||
from endpoints.api.build import build_status_view, trigger_view, RepositoryBuildStatus
|
from endpoints.api.build import build_status_view, trigger_view, RepositoryBuildStatus
|
||||||
|
from endpoints.api.trigger_analyzer import TriggerAnalyzer
|
||||||
from endpoints.building import start_build, MaximumBuildsQueuedException
|
from endpoints.building import start_build, MaximumBuildsQueuedException
|
||||||
from endpoints.exception import NotFound, Unauthorized, InvalidRequest
|
from endpoints.exception import NotFound, Unauthorized, InvalidRequest
|
||||||
from util.dockerfileparse import parse_dockerfile
|
|
||||||
from util.names import parse_robot_username
|
from util.names import parse_robot_username
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
@ -35,6 +34,13 @@ def _prepare_webhook_url(scheme, username, password, hostname, path):
|
||||||
return urlunparse((scheme, auth_hostname, path, '', '', ''))
|
return urlunparse((scheme, auth_hostname, path, '', '', ''))
|
||||||
|
|
||||||
|
|
||||||
|
def get_trigger(trigger_uuid):
|
||||||
|
try:
|
||||||
|
trigger = model.build.get_build_trigger(trigger_uuid)
|
||||||
|
except model.InvalidBuildTriggerException:
|
||||||
|
raise NotFound()
|
||||||
|
return trigger
|
||||||
|
|
||||||
@resource('/v1/repository/<apirepopath:repository>/trigger/')
|
@resource('/v1/repository/<apirepopath:repository>/trigger/')
|
||||||
@path_param('repository', 'The full path of the repository. e.g. namespace/name')
|
@path_param('repository', 'The full path of the repository. e.g. namespace/name')
|
||||||
class BuildTriggerList(RepositoryParamResource):
|
class BuildTriggerList(RepositoryParamResource):
|
||||||
|
@ -62,23 +68,14 @@ class BuildTrigger(RepositoryParamResource):
|
||||||
@nickname('getBuildTrigger')
|
@nickname('getBuildTrigger')
|
||||||
def get(self, namespace_name, repo_name, trigger_uuid):
|
def get(self, namespace_name, repo_name, trigger_uuid):
|
||||||
""" Get information for the specified build trigger. """
|
""" Get information for the specified build trigger. """
|
||||||
try:
|
return trigger_view(get_trigger(trigger_uuid), can_admin=True)
|
||||||
trigger = model.build.get_build_trigger(trigger_uuid)
|
|
||||||
except model.InvalidBuildTriggerException:
|
|
||||||
raise NotFound()
|
|
||||||
|
|
||||||
return trigger_view(trigger, can_admin=True)
|
|
||||||
|
|
||||||
@require_repo_admin
|
@require_repo_admin
|
||||||
@disallow_for_app_repositories
|
@disallow_for_app_repositories
|
||||||
@disallow_under_trust
|
|
||||||
@nickname('deleteBuildTrigger')
|
@nickname('deleteBuildTrigger')
|
||||||
def delete(self, namespace_name, repo_name, trigger_uuid):
|
def delete(self, namespace_name, repo_name, trigger_uuid):
|
||||||
""" Delete the specified build trigger. """
|
""" Delete the specified build trigger. """
|
||||||
try:
|
trigger = get_trigger(trigger_uuid)
|
||||||
trigger = model.build.get_build_trigger(trigger_uuid)
|
|
||||||
except model.InvalidBuildTriggerException:
|
|
||||||
raise NotFound()
|
|
||||||
|
|
||||||
handler = BuildTriggerHandler.get_handler(trigger)
|
handler = BuildTriggerHandler.get_handler(trigger)
|
||||||
if handler.is_active():
|
if handler.is_active():
|
||||||
|
@ -116,15 +113,11 @@ class BuildTriggerSubdirs(RepositoryParamResource):
|
||||||
|
|
||||||
@require_repo_admin
|
@require_repo_admin
|
||||||
@disallow_for_app_repositories
|
@disallow_for_app_repositories
|
||||||
@disallow_under_trust
|
|
||||||
@nickname('listBuildTriggerSubdirs')
|
@nickname('listBuildTriggerSubdirs')
|
||||||
@validate_json_request('BuildTriggerSubdirRequest')
|
@validate_json_request('BuildTriggerSubdirRequest')
|
||||||
def post(self, namespace_name, repo_name, trigger_uuid):
|
def post(self, namespace_name, repo_name, trigger_uuid):
|
||||||
""" List the subdirectories available for the specified build trigger and source. """
|
""" List the subdirectories available for the specified build trigger and source. """
|
||||||
try:
|
trigger = get_trigger(trigger_uuid)
|
||||||
trigger = model.build.get_build_trigger(trigger_uuid)
|
|
||||||
except model.InvalidBuildTriggerException:
|
|
||||||
raise NotFound()
|
|
||||||
|
|
||||||
user_permission = UserAdminPermission(trigger.connected_user.username)
|
user_permission = UserAdminPermission(trigger.connected_user.username)
|
||||||
if user_permission.can():
|
if user_permission.can():
|
||||||
|
@ -184,16 +177,11 @@ class BuildTriggerActivate(RepositoryParamResource):
|
||||||
|
|
||||||
@require_repo_admin
|
@require_repo_admin
|
||||||
@disallow_for_app_repositories
|
@disallow_for_app_repositories
|
||||||
@disallow_under_trust
|
|
||||||
@nickname('activateBuildTrigger')
|
@nickname('activateBuildTrigger')
|
||||||
@validate_json_request('BuildTriggerActivateRequest')
|
@validate_json_request('BuildTriggerActivateRequest')
|
||||||
def post(self, namespace_name, repo_name, trigger_uuid):
|
def post(self, namespace_name, repo_name, trigger_uuid):
|
||||||
""" Activate the specified build trigger. """
|
""" Activate the specified build trigger. """
|
||||||
try:
|
trigger = get_trigger(trigger_uuid)
|
||||||
trigger = model.build.get_build_trigger(trigger_uuid)
|
|
||||||
except model.InvalidBuildTriggerException:
|
|
||||||
raise NotFound()
|
|
||||||
|
|
||||||
handler = BuildTriggerHandler.get_handler(trigger)
|
handler = BuildTriggerHandler.get_handler(trigger)
|
||||||
if handler.is_active():
|
if handler.is_active():
|
||||||
raise InvalidRequest('Trigger config is not sufficient for activation.')
|
raise InvalidRequest('Trigger config is not sufficient for activation.')
|
||||||
|
@ -285,15 +273,11 @@ class BuildTriggerAnalyze(RepositoryParamResource):
|
||||||
|
|
||||||
@require_repo_admin
|
@require_repo_admin
|
||||||
@disallow_for_app_repositories
|
@disallow_for_app_repositories
|
||||||
@disallow_under_trust
|
|
||||||
@nickname('analyzeBuildTrigger')
|
@nickname('analyzeBuildTrigger')
|
||||||
@validate_json_request('BuildTriggerAnalyzeRequest')
|
@validate_json_request('BuildTriggerAnalyzeRequest')
|
||||||
def post(self, namespace_name, repo_name, trigger_uuid):
|
def post(self, namespace_name, repo_name, trigger_uuid):
|
||||||
""" Analyze the specified build trigger configuration. """
|
""" Analyze the specified build trigger configuration. """
|
||||||
try:
|
trigger = get_trigger(trigger_uuid)
|
||||||
trigger = model.build.get_build_trigger(trigger_uuid)
|
|
||||||
except model.InvalidBuildTriggerException:
|
|
||||||
raise NotFound()
|
|
||||||
|
|
||||||
if trigger.repository.namespace_user.username != namespace_name:
|
if trigger.repository.namespace_user.username != namespace_name:
|
||||||
raise NotFound()
|
raise NotFound()
|
||||||
|
@ -303,106 +287,14 @@ class BuildTriggerAnalyze(RepositoryParamResource):
|
||||||
|
|
||||||
new_config_dict = request.get_json()['config']
|
new_config_dict = request.get_json()['config']
|
||||||
handler = BuildTriggerHandler.get_handler(trigger, new_config_dict)
|
handler = BuildTriggerHandler.get_handler(trigger, new_config_dict)
|
||||||
|
server_hostname = app.config['SERVER_HOSTNAME']
|
||||||
def analyze_view(image_namespace, image_repository, status, message=None):
|
|
||||||
# Retrieve the list of robots and mark whether they have read access already.
|
|
||||||
robots = []
|
|
||||||
if AdministerOrganizationPermission(image_namespace).can():
|
|
||||||
if image_repository is not None:
|
|
||||||
perm_query = model.user.get_all_repo_users_transitive(image_namespace, image_repository)
|
|
||||||
user_ids_with_permission = set([user.id for user in perm_query])
|
|
||||||
else:
|
|
||||||
user_ids_with_permission = set()
|
|
||||||
|
|
||||||
def robot_view(robot):
|
|
||||||
return {
|
|
||||||
'name': robot.username,
|
|
||||||
'kind': 'user',
|
|
||||||
'is_robot': True,
|
|
||||||
'can_read': robot.id in user_ids_with_permission,
|
|
||||||
}
|
|
||||||
|
|
||||||
robots = [robot_view(robot) for robot in model.user.list_namespace_robots(image_namespace)]
|
|
||||||
|
|
||||||
return {
|
|
||||||
'namespace': image_namespace,
|
|
||||||
'name': image_repository,
|
|
||||||
'robots': robots,
|
|
||||||
'status': status,
|
|
||||||
'message': message,
|
|
||||||
'is_admin': AdministerOrganizationPermission(image_namespace).can(),
|
|
||||||
}
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Load the contents of the Dockerfile.
|
trigger_analyzer = TriggerAnalyzer(handler,
|
||||||
contents = handler.load_dockerfile_contents()
|
namespace_name,
|
||||||
if not contents:
|
server_hostname,
|
||||||
return {
|
new_config_dict,
|
||||||
'status': 'warning',
|
AdministerOrganizationPermission(namespace_name).can())
|
||||||
'message': 'Specified Dockerfile path for the trigger was not found on the main ' +
|
return trigger_analyzer.analyze_trigger()
|
||||||
'branch. This trigger may fail.',
|
|
||||||
}
|
|
||||||
|
|
||||||
# Parse the contents of the Dockerfile.
|
|
||||||
parsed = parse_dockerfile(contents)
|
|
||||||
if not parsed:
|
|
||||||
return {
|
|
||||||
'status': 'error',
|
|
||||||
'message': 'Could not parse the Dockerfile specified'
|
|
||||||
}
|
|
||||||
|
|
||||||
# Check whether the dockerfile_path is correct
|
|
||||||
if new_config_dict.get('context'):
|
|
||||||
if not is_parent(new_config_dict.get('context'), new_config_dict.get('dockerfile_path')):
|
|
||||||
return {
|
|
||||||
'status': 'error',
|
|
||||||
'message': 'Dockerfile, %s, is not child of the context, %s.' %
|
|
||||||
(new_config_dict.get('context'), new_config_dict.get('dockerfile_path'))
|
|
||||||
}
|
|
||||||
|
|
||||||
# Default to the current namespace.
|
|
||||||
base_namespace = namespace_name
|
|
||||||
base_repository = None
|
|
||||||
|
|
||||||
# Determine the base image (i.e. the FROM) for the Dockerfile.
|
|
||||||
base_image = parsed.get_base_image()
|
|
||||||
if not base_image:
|
|
||||||
return analyze_view(base_namespace, base_repository, 'warning',
|
|
||||||
message='No FROM line found in the Dockerfile')
|
|
||||||
|
|
||||||
# Check to see if the base image lives in Quay.
|
|
||||||
quay_registry_prefix = '%s/' % (app.config['SERVER_HOSTNAME'])
|
|
||||||
if not base_image.startswith(quay_registry_prefix):
|
|
||||||
return analyze_view(base_namespace, base_repository, 'publicbase')
|
|
||||||
|
|
||||||
# Lookup the repository in Quay.
|
|
||||||
result = str(base_image)[len(quay_registry_prefix):].split('/', 2)
|
|
||||||
if len(result) != 2:
|
|
||||||
msg = '"%s" is not a valid Quay repository path' % (base_image)
|
|
||||||
return analyze_view(base_namespace, base_repository, 'warning', message=msg)
|
|
||||||
|
|
||||||
(base_namespace, base_repository) = result
|
|
||||||
found_repository = model.repository.get_repository(base_namespace, base_repository)
|
|
||||||
if not found_repository:
|
|
||||||
return {
|
|
||||||
'status': 'error',
|
|
||||||
'message': 'Repository "%s" referenced by the Dockerfile was not found' % (base_image)
|
|
||||||
}
|
|
||||||
|
|
||||||
# If the repository is private and the user cannot see that repo, then
|
|
||||||
# mark it as not found.
|
|
||||||
can_read = ReadRepositoryPermission(base_namespace, base_repository)
|
|
||||||
if found_repository.visibility.name != 'public' and not can_read:
|
|
||||||
return {
|
|
||||||
'status': 'error',
|
|
||||||
'message': 'Repository "%s" referenced by the Dockerfile was not found' % (base_image)
|
|
||||||
}
|
|
||||||
|
|
||||||
if found_repository.visibility.name == 'public':
|
|
||||||
return analyze_view(base_namespace, base_repository, 'publicbase')
|
|
||||||
else:
|
|
||||||
return analyze_view(base_namespace, base_repository, 'requiresrobot')
|
|
||||||
|
|
||||||
except RepositoryReadException as rre:
|
except RepositoryReadException as rre:
|
||||||
return {
|
return {
|
||||||
'status': 'error',
|
'status': 'error',
|
||||||
|
@ -413,30 +305,6 @@ class BuildTriggerAnalyze(RepositoryParamResource):
|
||||||
'status': 'notimplemented',
|
'status': 'notimplemented',
|
||||||
}
|
}
|
||||||
|
|
||||||
raise NotFound()
|
|
||||||
|
|
||||||
|
|
||||||
def is_parent(context, dockerfile_path):
|
|
||||||
""" This checks whether the context is a parent of the dockerfile_path"""
|
|
||||||
if context == "" or dockerfile_path == "":
|
|
||||||
return False
|
|
||||||
|
|
||||||
normalized_context = path.normpath(context)
|
|
||||||
if normalized_context[len(normalized_context) - 1] != path.sep:
|
|
||||||
normalized_context += path.sep
|
|
||||||
|
|
||||||
if normalized_context[0] != path.sep:
|
|
||||||
normalized_context = path.sep + normalized_context
|
|
||||||
|
|
||||||
normalized_subdir = path.normpath(path.dirname(dockerfile_path))
|
|
||||||
if normalized_subdir[0] != path.sep:
|
|
||||||
normalized_subdir = path.sep + normalized_subdir
|
|
||||||
|
|
||||||
if normalized_subdir[len(normalized_subdir) - 1] != path.sep:
|
|
||||||
normalized_subdir += path.sep
|
|
||||||
|
|
||||||
return normalized_subdir.startswith(normalized_context)
|
|
||||||
|
|
||||||
|
|
||||||
@resource('/v1/repository/<apirepopath:repository>/trigger/<trigger_uuid>/start')
|
@resource('/v1/repository/<apirepopath:repository>/trigger/<trigger_uuid>/start')
|
||||||
@path_param('repository', 'The full path of the repository. e.g. namespace/name')
|
@path_param('repository', 'The full path of the repository. e.g. namespace/name')
|
||||||
|
@ -467,15 +335,11 @@ class ActivateBuildTrigger(RepositoryParamResource):
|
||||||
|
|
||||||
@require_repo_admin
|
@require_repo_admin
|
||||||
@disallow_for_app_repositories
|
@disallow_for_app_repositories
|
||||||
@disallow_under_trust
|
|
||||||
@nickname('manuallyStartBuildTrigger')
|
@nickname('manuallyStartBuildTrigger')
|
||||||
@validate_json_request('RunParameters')
|
@validate_json_request('RunParameters')
|
||||||
def post(self, namespace_name, repo_name, trigger_uuid):
|
def post(self, namespace_name, repo_name, trigger_uuid):
|
||||||
""" Manually start a build from the specified trigger. """
|
""" Manually start a build from the specified trigger. """
|
||||||
try:
|
trigger = get_trigger(trigger_uuid)
|
||||||
trigger = model.build.get_build_trigger(trigger_uuid)
|
|
||||||
except model.InvalidBuildTriggerException:
|
|
||||||
raise NotFound()
|
|
||||||
|
|
||||||
handler = BuildTriggerHandler.get_handler(trigger)
|
handler = BuildTriggerHandler.get_handler(trigger)
|
||||||
if not handler.is_active():
|
if not handler.is_active():
|
||||||
|
@ -532,14 +396,10 @@ class BuildTriggerFieldValues(RepositoryParamResource):
|
||||||
|
|
||||||
@require_repo_admin
|
@require_repo_admin
|
||||||
@disallow_for_app_repositories
|
@disallow_for_app_repositories
|
||||||
@disallow_under_trust
|
|
||||||
@nickname('listTriggerFieldValues')
|
@nickname('listTriggerFieldValues')
|
||||||
def post(self, namespace_name, repo_name, trigger_uuid, field_name):
|
def post(self, namespace_name, repo_name, trigger_uuid, field_name):
|
||||||
""" List the field values for a custom run field. """
|
""" List the field values for a custom run field. """
|
||||||
try:
|
trigger = get_trigger(trigger_uuid)
|
||||||
trigger = model.build.get_build_trigger(trigger_uuid)
|
|
||||||
except model.InvalidBuildTriggerException:
|
|
||||||
raise NotFound()
|
|
||||||
|
|
||||||
config = request.get_json() or None
|
config = request.get_json() or None
|
||||||
if AdministerRepositoryPermission(namespace_name, repo_name).can():
|
if AdministerRepositoryPermission(namespace_name, repo_name).can():
|
||||||
|
@ -577,17 +437,13 @@ class BuildTriggerSources(RepositoryParamResource):
|
||||||
|
|
||||||
@require_repo_admin
|
@require_repo_admin
|
||||||
@disallow_for_app_repositories
|
@disallow_for_app_repositories
|
||||||
@disallow_under_trust
|
|
||||||
@nickname('listTriggerBuildSources')
|
@nickname('listTriggerBuildSources')
|
||||||
@validate_json_request('BuildTriggerSourcesRequest')
|
@validate_json_request('BuildTriggerSourcesRequest')
|
||||||
def post(self, namespace_name, repo_name, trigger_uuid):
|
def post(self, namespace_name, repo_name, trigger_uuid):
|
||||||
""" List the build sources for the trigger configuration thus far. """
|
""" List the build sources for the trigger configuration thus far. """
|
||||||
namespace = request.get_json()['namespace']
|
namespace = request.get_json()['namespace']
|
||||||
|
|
||||||
try:
|
trigger = get_trigger(trigger_uuid)
|
||||||
trigger = model.build.get_build_trigger(trigger_uuid)
|
|
||||||
except model.InvalidBuildTriggerException:
|
|
||||||
raise NotFound()
|
|
||||||
|
|
||||||
user_permission = UserAdminPermission(trigger.connected_user.username)
|
user_permission = UserAdminPermission(trigger.connected_user.username)
|
||||||
if user_permission.can():
|
if user_permission.can():
|
||||||
|
@ -612,14 +468,10 @@ class BuildTriggerSourceNamespaces(RepositoryParamResource):
|
||||||
|
|
||||||
@require_repo_admin
|
@require_repo_admin
|
||||||
@disallow_for_app_repositories
|
@disallow_for_app_repositories
|
||||||
@disallow_under_trust
|
|
||||||
@nickname('listTriggerBuildSourceNamespaces')
|
@nickname('listTriggerBuildSourceNamespaces')
|
||||||
def get(self, namespace_name, repo_name, trigger_uuid):
|
def get(self, namespace_name, repo_name, trigger_uuid):
|
||||||
""" List the build sources for the trigger configuration thus far. """
|
""" List the build sources for the trigger configuration thus far. """
|
||||||
try:
|
trigger = get_trigger(trigger_uuid)
|
||||||
trigger = model.build.get_build_trigger(trigger_uuid)
|
|
||||||
except model.InvalidBuildTriggerException:
|
|
||||||
raise NotFound()
|
|
||||||
|
|
||||||
user_permission = UserAdminPermission(trigger.connected_user.username)
|
user_permission = UserAdminPermission(trigger.connected_user.username)
|
||||||
if user_permission.can():
|
if user_permission.can():
|
||||||
|
|
122
endpoints/api/trigger_analyzer.py
Normal file
122
endpoints/api/trigger_analyzer.py
Normal file
|
@ -0,0 +1,122 @@
|
||||||
|
from os import path
|
||||||
|
|
||||||
|
from auth import permissions
|
||||||
|
from data import model
|
||||||
|
from util import dockerfileparse
|
||||||
|
|
||||||
|
|
||||||
|
def is_parent(context, dockerfile_path):
|
||||||
|
""" This checks whether the context is a parent of the dockerfile_path"""
|
||||||
|
if context == "" or dockerfile_path == "":
|
||||||
|
return False
|
||||||
|
|
||||||
|
normalized_context = path.normpath(context)
|
||||||
|
if normalized_context[len(normalized_context) - 1] != path.sep:
|
||||||
|
normalized_context += path.sep
|
||||||
|
|
||||||
|
if normalized_context[0] != path.sep:
|
||||||
|
normalized_context = path.sep + normalized_context
|
||||||
|
|
||||||
|
normalized_subdir = path.normpath(path.dirname(dockerfile_path))
|
||||||
|
if normalized_subdir[0] != path.sep:
|
||||||
|
normalized_subdir = path.sep + normalized_subdir
|
||||||
|
|
||||||
|
if normalized_subdir[len(normalized_subdir) - 1] != path.sep:
|
||||||
|
normalized_subdir += path.sep
|
||||||
|
|
||||||
|
return normalized_subdir.startswith(normalized_context)
|
||||||
|
|
||||||
|
|
||||||
|
class TriggerAnalyzer:
|
||||||
|
""" This analyzes triggers and returns the appropriate trigger and robot view to the frontend. """
|
||||||
|
|
||||||
|
def __init__(self, handler, namespace_name, server_hostname, new_config_dict, admin_org_permission):
|
||||||
|
self.handler = handler
|
||||||
|
self.namespace_name = namespace_name
|
||||||
|
self.server_hostname = server_hostname
|
||||||
|
self.new_config_dict = new_config_dict
|
||||||
|
self.admin_org_permission = admin_org_permission
|
||||||
|
|
||||||
|
def analyze_trigger(self):
|
||||||
|
# Load the contents of the Dockerfile.
|
||||||
|
contents = self.handler.load_dockerfile_contents()
|
||||||
|
if not contents:
|
||||||
|
return self.analyze_view(self.namespace_name, None, 'warning',
|
||||||
|
message='Specified Dockerfile path for the trigger was not found on the main ' +
|
||||||
|
'branch. This trigger may fail.')
|
||||||
|
|
||||||
|
# Parse the contents of the Dockerfile.
|
||||||
|
parsed = dockerfileparse.parse_dockerfile(contents)
|
||||||
|
if not parsed:
|
||||||
|
return self.analyze_view(self.namespace_name, None, 'error', message='Could not parse the Dockerfile specified')
|
||||||
|
|
||||||
|
# Check whether the dockerfile_path is correct
|
||||||
|
if self.new_config_dict.get('context') and not is_parent(self.new_config_dict.get('context'),
|
||||||
|
self.new_config_dict.get('dockerfile_path')):
|
||||||
|
return self.analyze_view(self.namespace_name, None, 'error',
|
||||||
|
message='Dockerfile, %s, is not a child of the context, %s.' %
|
||||||
|
(self.new_config_dict.get('context'),
|
||||||
|
self.new_config_dict.get('dockerfile_path')))
|
||||||
|
|
||||||
|
# Determine the base image (i.e. the FROM) for the Dockerfile.
|
||||||
|
base_image = parsed.get_base_image()
|
||||||
|
if not base_image:
|
||||||
|
return self.analyze_view(self.namespace_name, None, 'warning', message='No FROM line found in the Dockerfile')
|
||||||
|
|
||||||
|
# Check to see if the base image lives in Quay.
|
||||||
|
quay_registry_prefix = '%s/' % self.server_hostname
|
||||||
|
if not base_image.startswith(quay_registry_prefix):
|
||||||
|
return self.analyze_view(self.namespace_name, None, 'publicbase')
|
||||||
|
|
||||||
|
# Lookup the repository in Quay.
|
||||||
|
result = str(base_image)[len(quay_registry_prefix):].split('/', 2)
|
||||||
|
if len(result) != 2:
|
||||||
|
msg = '"%s" is not a valid Quay repository path' % base_image
|
||||||
|
return self.analyze_view(self.namespace_name, None, 'warning', message=msg)
|
||||||
|
|
||||||
|
(base_namespace, base_repository) = result
|
||||||
|
found_repository = model.repository.get_repository(base_namespace, base_repository)
|
||||||
|
if not found_repository:
|
||||||
|
return self.analyze_view(self.namespace_name, None, 'error',
|
||||||
|
message='Repository "%s" referenced by the Dockerfile was not found' % base_image)
|
||||||
|
|
||||||
|
# If the repository is private and the user cannot see that repo, then
|
||||||
|
# mark it as not found.
|
||||||
|
can_read = permissions.ReadRepositoryPermission(base_namespace, base_repository)
|
||||||
|
if found_repository.visibility.name != 'public' and not can_read:
|
||||||
|
return self.analyze_view(self.namespace_name, None, 'error',
|
||||||
|
message='Repository "%s" referenced by the Dockerfile was not found' % base_image)
|
||||||
|
|
||||||
|
if found_repository.visibility.name == 'public':
|
||||||
|
return self.analyze_view(base_namespace, base_repository, 'publicbase')
|
||||||
|
|
||||||
|
return self.analyze_view(base_namespace, base_repository, 'requiresrobot')
|
||||||
|
|
||||||
|
def analyze_view(self, image_namespace, image_repository, status, message=None):
|
||||||
|
# Retrieve the list of robots and mark whether they have read access already.
|
||||||
|
robots = []
|
||||||
|
if self.admin_org_permission:
|
||||||
|
if image_repository is not None:
|
||||||
|
perm_query = model.user.get_all_repo_users_transitive(image_namespace, image_repository)
|
||||||
|
user_ids_with_permission = set([user.id for user in perm_query])
|
||||||
|
else:
|
||||||
|
user_ids_with_permission = set()
|
||||||
|
|
||||||
|
def robot_view(robot):
|
||||||
|
return {
|
||||||
|
'name': robot.username,
|
||||||
|
'kind': 'user',
|
||||||
|
'is_robot': True,
|
||||||
|
'can_read': robot.id in user_ids_with_permission,
|
||||||
|
}
|
||||||
|
|
||||||
|
robots = [robot_view(robot) for robot in model.user.list_namespace_robots(image_namespace)]
|
||||||
|
|
||||||
|
return {
|
||||||
|
'namespace': image_namespace,
|
||||||
|
'name': image_repository,
|
||||||
|
'robots': robots,
|
||||||
|
'status': status,
|
||||||
|
'message': message,
|
||||||
|
'is_admin': self.admin_org_permission,
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show more
Reference in a new issue