Compare commits
No commits in common. "v2.8.0" and "master" have entirely different histories.
|
@ -1,4 +0,0 @@
|
|||
FROM nginx:alpine
|
||||
MAINTAINER Antoine Legrand <2t.antoine@gmail.com>
|
||||
COPY . /usr/share/nginx/html
|
||||
EXPOSE 80
|
|
@ -31,3 +31,5 @@ static/build/**
|
|||
.gitlab-ci/*
|
||||
.gitlab-ci.*
|
||||
docker-compose.yaml
|
||||
test/dockerclients/**
|
||||
node_modules
|
||||
|
|
|
@ -22,5 +22,9 @@ htmlcov
|
|||
.cache
|
||||
.npm-debug.log
|
||||
Dockerfile-e
|
||||
build/
|
||||
.vscode
|
||||
*.iml
|
||||
.DS_Store
|
||||
.pytest_cache/*
|
||||
test/dockerclients/Vagrantfile
|
||||
test/dockerclients/.*
|
||||
|
|
|
@ -1,205 +0,0 @@
|
|||
local utils = import '.gitlab-ci/utils.libsonnet';
|
||||
local vars = import '.gitlab-ci/vars.libsonnet';
|
||||
local mergeJob = utils.ci.mergeJob;
|
||||
local images = vars.images;
|
||||
local baseJob = (import '.gitlab-ci/base_jobs.libsonnet')(vars);
|
||||
|
||||
local stages_list = [
|
||||
// gitlab-ci stages
|
||||
'docker_base',
|
||||
'docker_build',
|
||||
'deploy_preview',
|
||||
'tests',
|
||||
'integration',
|
||||
'docker_release',
|
||||
'deploy_staging',
|
||||
'teardown',
|
||||
];
|
||||
|
||||
local stages = utils.set(stages_list);
|
||||
|
||||
// List CI jobs
|
||||
local jobs = {
|
||||
// Helpers
|
||||
local onlyMaster = {
|
||||
only: ['master', 'tags'],
|
||||
},
|
||||
|
||||
local onlyBranch = {
|
||||
only: ['branches'],
|
||||
except: ['master']
|
||||
},
|
||||
|
||||
'container-base-build': baseJob.dockerBuild {
|
||||
// ! Only master/tags
|
||||
// Update the base container
|
||||
stage: stages.docker_base,
|
||||
script: [
|
||||
'docker build --no-cache' +
|
||||
' -t %s -f quay-base.dockerfile .' % images.base.name,
|
||||
'docker push %s' % images.base.name,
|
||||
],
|
||||
only: ["schedules"]
|
||||
},
|
||||
|
||||
'container-build': baseJob.dockerBuild {
|
||||
// Build and push the quay container.
|
||||
// Docker Tag is the branch/tag name
|
||||
stage: stages.docker_build,
|
||||
script: [
|
||||
'docker build --no-cache -t %s -f Dockerfile .' % images.quayci.name,
|
||||
'docker run --rm %s cat ALEMBIC_HEAD > /tmp/ALEMBIC_HEAD' % images.quayci.name,
|
||||
'export MIGRATION_HEAD=`cat /tmp/ALEMBIC_HEAD | cut -d" " -f1`',
|
||||
'echo $MIGRATION_HEAD',
|
||||
'docker build --label db-schema-head=$MIGRATION_HEAD -t %s -f Dockerfile .' % images.quayci.name,
|
||||
'docker push %s' % images.quayci.name],
|
||||
},
|
||||
|
||||
'container-release': baseJob.dockerBuild + onlyMaster {
|
||||
// ! Only master/tags
|
||||
// push the container to the 'prod' repository
|
||||
local repo_with_sha = images.release.name,
|
||||
stage: stages.docker_release,
|
||||
script: [
|
||||
'docker pull %s' % images.quayci.name,
|
||||
'docker tag %s %s' % [images.quayci.name, repo_with_sha],
|
||||
'docker push %s' % [repo_with_sha], # @TODO(ant31) add signing
|
||||
],
|
||||
},
|
||||
|
||||
// Unit-tests
|
||||
local unittest_stage = baseJob.QuayTest {
|
||||
stage: stages.tests },
|
||||
local pytest_cmd = 'py.test --cov="." --cov-report=html --cov-report=term-missing' +
|
||||
' --timeout=3600 --verbose -x --color=no --show-count ',
|
||||
'unit-tests': unittest_stage {
|
||||
coverage: @"/^TOTAL.*\s+(\d+\%)\s*$/",
|
||||
script: [
|
||||
pytest_cmd + ' ./',],
|
||||
after_script: [
|
||||
'cp -r $QUAYDIR/htmlcov/ $CI_PROJECT_DIR/coverage'
|
||||
],
|
||||
artifacts: {
|
||||
paths: ['coverage',]
|
||||
}
|
||||
},
|
||||
|
||||
'registry-tests': unittest_stage {
|
||||
script: [ pytest_cmd + ' ./test/registry_tests.py'],
|
||||
coverage: @"/^TOTAL.*\s+(\d+\%)\s*$/",
|
||||
},
|
||||
|
||||
// UI tests
|
||||
'karma-tests': unittest_stage {
|
||||
script: [
|
||||
'curl -Ss https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add -',
|
||||
'echo "deb http://dl.google.com/linux/chrome/deb/ stable main" > /etc/apt/sources.list.d/google.list',
|
||||
'apt-get update -yqqq',
|
||||
'apt-get install -y google-chrome-stable',
|
||||
'yarn test'
|
||||
] },
|
||||
|
||||
// Integration API
|
||||
local integration_test = baseJob.EndToEndTest {
|
||||
stage: stages.integration
|
||||
},
|
||||
|
||||
'protractor_e2e': integration_test {
|
||||
initDb:: true,
|
||||
variables+: {"APP_HOST": "http://localhost:80"},
|
||||
services+: [{name: "selenium/standalone-chrome:3.4.0"}],
|
||||
before_script+: [
|
||||
"curl -sL https://deb.nodesource.com/setup_8.x | bash -",
|
||||
"apt-get install -y nodejs",
|
||||
"./node_modules/.bin/webdriver-manager update",
|
||||
],
|
||||
allow_failure: true,
|
||||
script+: [
|
||||
"yarn e2e",
|
||||
]
|
||||
},
|
||||
|
||||
'appr_e2e': integration_test {
|
||||
initDb:: true,
|
||||
image: {name: "quay.io/appr/appr:kubectl"},
|
||||
before_script: [],
|
||||
script+: [
|
||||
"appr version localhost:80",
|
||||
"appr list localhost:80",
|
||||
],
|
||||
allow_failure: true,
|
||||
},
|
||||
|
||||
'docker_e2e': integration_test {
|
||||
initDb:: true,
|
||||
image: {name: "docker"},
|
||||
services+: [{name: "docker:dind"}],
|
||||
variables+: {
|
||||
DOCKER_DRIVER: "overlay2",
|
||||
DOCKER_HOST: "tcp://localhost:2375"
|
||||
},
|
||||
before_script: [],
|
||||
script+: [
|
||||
"docker login localhost:80 -u devtable -p password",
|
||||
"docker pull nginx",
|
||||
"docker tag nginx localhost:80/devtable/nginx",
|
||||
"docker push localhost:80/devtable/nginx",
|
||||
"sleep 1",
|
||||
"docker pull localhost:80/devtable/nginx",
|
||||
],
|
||||
allow_failure: true,
|
||||
},
|
||||
|
||||
// Unit-tests with real databases
|
||||
local db_stage = { stage: stages.tests },
|
||||
local dbname = 'quay',
|
||||
|
||||
postgres: db_stage + baseJob.dbTest('postgresql',
|
||||
image='postgres:9.6',
|
||||
env={ POSTGRES_PASSWORD: dbname, POSTGRES_USER: dbname }),
|
||||
|
||||
mysql: db_stage + baseJob.dbTest('mysql+pymysql',
|
||||
image='mysql:latest',
|
||||
env={ [key]: dbname for key in ['MYSQL_ROOT_PASSWORD', 'MYSQL_DATABASE',
|
||||
'MYSQL_USER', 'MYSQL_PASSWORD'] }),
|
||||
|
||||
"deploy-preview": baseJob.QuayDeploy {
|
||||
local _vars = self.localvars,
|
||||
stage: stages.deploy_preview,
|
||||
when: "manual",
|
||||
environment+: {
|
||||
on_stop: "stop-preview",
|
||||
},
|
||||
} + onlyBranch,
|
||||
|
||||
"stop-preview": baseJob.QuayDeployStop {
|
||||
when: "manual",
|
||||
stage: stages.deploy_preview,
|
||||
script: [
|
||||
"kubectl delete ns $K8S_NAMESPACE",
|
||||
"kubectl get pods -o wide -n $K8S_NAMESPACE"
|
||||
]
|
||||
} + onlyBranch,
|
||||
|
||||
"deploy-staging": baseJob.QuayDeploy {
|
||||
local _vars = self.localvars,
|
||||
localvars+:: {
|
||||
image: images.release,
|
||||
domain: "quay-staging.k8s.devtable.com",
|
||||
namespace: "ci-staging",
|
||||
channels: ['master' , 'beta', 'latest'],
|
||||
},
|
||||
stage: stages.deploy_staging,
|
||||
script+: [],
|
||||
environment+: {
|
||||
name: "staging",
|
||||
},
|
||||
only: ['master']
|
||||
},
|
||||
|
||||
};
|
||||
|
||||
{
|
||||
stages: stages_list,
|
||||
variables: vars.global,
|
||||
} + jobs
|
357
.gitlab-ci.yml
357
.gitlab-ci.yml
|
@ -1,357 +0,0 @@
|
|||
# Generated from .gitlab-ci.jsonnet
|
||||
# DO NOT EDIT THIS FILE BY HAND -- YOUR CHANGES WILL BE OVERWRITTEN
|
||||
---
|
||||
appr_e2e:
|
||||
allow_failure: true
|
||||
before_script: []
|
||||
image:
|
||||
name: quay.io/appr/appr:kubectl
|
||||
script:
|
||||
- sleep 150
|
||||
- appr version localhost:80
|
||||
- appr list localhost:80
|
||||
services:
|
||||
- name: postgres:9.6
|
||||
- alias: quay
|
||||
name: quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG}
|
||||
- alias: db-init
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- sleep 30&& /quay-registry/venv/bin/python initdb.py&& sleep 3600
|
||||
name: quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG}
|
||||
stage: integration
|
||||
tags:
|
||||
- kubernetes
|
||||
variables:
|
||||
APP_HOST: localhost:80
|
||||
DB_URI: postgresql://quay:quay@localhost/quay
|
||||
GIT_STRATEGY: none
|
||||
POSTGRES_PASSWORD: quay
|
||||
POSTGRES_USER: quay
|
||||
PYTHONPATH: .
|
||||
QUAYDIR: /quay-registry
|
||||
QUAY_OVERRIDE_CONFIG: '{"SERVER_HOSTNAME": "localhost:80"}'
|
||||
SKIP_DB_SCHEMA: 'true'
|
||||
TEST: 'true'
|
||||
TEST_DATABASE_URI: postgresql://quay:quay@localhost/quay
|
||||
container-base-build:
|
||||
before_script:
|
||||
- docker login -u $DOCKER_USER -p $DOCKER_PASS quay.io
|
||||
image: docker:git
|
||||
only:
|
||||
- schedules
|
||||
script:
|
||||
- docker build --no-cache -t quay.io/quay/quay-base:latest -f quay-base.dockerfile .
|
||||
- docker push quay.io/quay/quay-base:latest
|
||||
stage: docker_base
|
||||
tags:
|
||||
- kubernetes
|
||||
variables:
|
||||
DOCKER_DRIVER: overlay2
|
||||
DOCKER_HOST: tcp://docker-host.gitlab-runner.svc.cluster.local:2375
|
||||
container-build:
|
||||
before_script:
|
||||
- docker login -u $DOCKER_USER -p $DOCKER_PASS quay.io
|
||||
image: docker:git
|
||||
script:
|
||||
- docker build --no-cache -t quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG} -f Dockerfile .
|
||||
- docker run --rm quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG} cat ALEMBIC_HEAD > /tmp/ALEMBIC_HEAD
|
||||
- export MIGRATION_HEAD=`cat /tmp/ALEMBIC_HEAD | cut -d" " -f1`
|
||||
- echo $MIGRATION_HEAD
|
||||
- docker build --label db-schema-head=$MIGRATION_HEAD -t quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG} -f Dockerfile .
|
||||
- docker push quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG}
|
||||
stage: docker_build
|
||||
tags:
|
||||
- kubernetes
|
||||
variables:
|
||||
DOCKER_DRIVER: overlay2
|
||||
DOCKER_HOST: tcp://docker-host.gitlab-runner.svc.cluster.local:2375
|
||||
container-release:
|
||||
before_script:
|
||||
- docker login -u $DOCKER_USER -p $DOCKER_PASS quay.io
|
||||
image: docker:git
|
||||
only:
|
||||
- master
|
||||
- tags
|
||||
script:
|
||||
- docker pull quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG}
|
||||
- docker tag quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG} quay.io/quay/quay:${CI_COMMIT_REF_SLUG}-${SHA8}
|
||||
- docker push quay.io/quay/quay:${CI_COMMIT_REF_SLUG}-${SHA8}
|
||||
stage: docker_release
|
||||
tags:
|
||||
- kubernetes
|
||||
variables:
|
||||
DOCKER_DRIVER: overlay2
|
||||
DOCKER_HOST: tcp://docker-host.gitlab-runner.svc.cluster.local:2375
|
||||
deploy-preview:
|
||||
before_script:
|
||||
- appr login -u $DOCKER_USER -p $DOCKER_PASS quay.io
|
||||
- cd deploy/quay-ci-app
|
||||
- echo -n 1.0.0-${CI_COMMIT_REF_SLUG} > VERSION
|
||||
- 'echo "{\"domain\": \"$QUAY_DOMAIN\", \"image\": \"quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG}\", \"tag\": \"${CI_COMMIT_REF_SLUG}\"}" > params.json'
|
||||
- cat params.json
|
||||
environment:
|
||||
name: review/${CI_COMMIT_REF_SLUG}
|
||||
on_stop: stop-preview
|
||||
url: https://quay-${CI_COMMIT_REF_SLUG}.k8s.devtable.com
|
||||
except:
|
||||
- master
|
||||
image: quay.io/appr/appr:kubectl
|
||||
only:
|
||||
- branches
|
||||
script:
|
||||
- appr push quay.io/quay/quay-ci-app -f
|
||||
- appr deploy quay.io/quay/quay-ci-app@1.0.0-${CI_COMMIT_REF_SLUG} --namespace ci-quay-${CI_COMMIT_REF_SLUG} -x docker_user=$DOCKER_USER -x docker_pass=$DOCKER_PASS
|
||||
- kubectl get ingresses -n ci-quay-${CI_COMMIT_REF_SLUG} -o wide
|
||||
stage: deploy_preview
|
||||
tags:
|
||||
- kubernetes
|
||||
variables:
|
||||
K8S_NAMESPACE: ci-quay-${CI_COMMIT_REF_SLUG}
|
||||
QUAY_DOMAIN: quay-${CI_COMMIT_REF_SLUG}.k8s.devtable.com
|
||||
when: manual
|
||||
deploy-staging:
|
||||
before_script:
|
||||
- appr login -u $DOCKER_USER -p $DOCKER_PASS quay.io
|
||||
- cd deploy/quay-ci-app
|
||||
- echo -n 1.0.0-${CI_COMMIT_REF_SLUG}-${SHA8} > VERSION
|
||||
- 'echo "{\"domain\": \"$QUAY_DOMAIN\", \"image\": \"quay.io/quay/quay:${CI_COMMIT_REF_SLUG}-${SHA8}\", \"tag\": \"${CI_COMMIT_REF_SLUG}-${SHA8}\"}" > params.json'
|
||||
- cat params.json
|
||||
environment:
|
||||
name: staging
|
||||
url: https://quay-staging.k8s.devtable.com
|
||||
image: quay.io/appr/appr:kubectl
|
||||
only:
|
||||
- master
|
||||
script:
|
||||
- appr push quay.io/quay/quay-ci-app -f
|
||||
- appr push quay.io/quay/quay-ci-app -c master
|
||||
- appr push quay.io/quay/quay-ci-app -c beta
|
||||
- appr push quay.io/quay/quay-ci-app -c latest
|
||||
- appr deploy quay.io/quay/quay-ci-app@1.0.0-${CI_COMMIT_REF_SLUG}-${SHA8} --namespace ci-staging -x docker_user=$DOCKER_USER -x docker_pass=$DOCKER_PASS
|
||||
- kubectl get ingresses -n ci-staging -o wide
|
||||
stage: deploy_staging
|
||||
tags:
|
||||
- kubernetes
|
||||
variables:
|
||||
K8S_NAMESPACE: ci-staging
|
||||
QUAY_DOMAIN: quay-staging.k8s.devtable.com
|
||||
docker_e2e:
|
||||
allow_failure: true
|
||||
before_script: []
|
||||
image:
|
||||
name: docker
|
||||
script:
|
||||
- sleep 150
|
||||
- docker login localhost:80 -u devtable -p password
|
||||
- docker pull nginx
|
||||
- docker tag nginx localhost:80/devtable/nginx
|
||||
- docker push localhost:80/devtable/nginx
|
||||
- sleep 1
|
||||
- docker pull localhost:80/devtable/nginx
|
||||
services:
|
||||
- name: postgres:9.6
|
||||
- alias: quay
|
||||
name: quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG}
|
||||
- alias: db-init
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- sleep 30&& /quay-registry/venv/bin/python initdb.py&& sleep 3600
|
||||
name: quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG}
|
||||
- name: docker:dind
|
||||
stage: integration
|
||||
tags:
|
||||
- kubernetes
|
||||
variables:
|
||||
APP_HOST: localhost:80
|
||||
DB_URI: postgresql://quay:quay@localhost/quay
|
||||
DOCKER_DRIVER: overlay2
|
||||
DOCKER_HOST: tcp://localhost:2375
|
||||
GIT_STRATEGY: none
|
||||
POSTGRES_PASSWORD: quay
|
||||
POSTGRES_USER: quay
|
||||
PYTHONPATH: .
|
||||
QUAYDIR: /quay-registry
|
||||
QUAY_OVERRIDE_CONFIG: '{"SERVER_HOSTNAME": "localhost:80"}'
|
||||
SKIP_DB_SCHEMA: 'true'
|
||||
TEST: 'true'
|
||||
TEST_DATABASE_URI: postgresql://quay:quay@localhost/quay
|
||||
karma-tests:
|
||||
before_script:
|
||||
- cd $QUAYDIR
|
||||
- source $QUAYDIR/venv/bin/activate
|
||||
image: quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG}
|
||||
script:
|
||||
- curl -Ss https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add -
|
||||
- echo "deb http://dl.google.com/linux/chrome/deb/ stable main" > /etc/apt/sources.list.d/google.list
|
||||
- apt-get update -yqqq
|
||||
- apt-get install -y google-chrome-stable
|
||||
- yarn test
|
||||
stage: tests
|
||||
tags:
|
||||
- kubernetes
|
||||
variables:
|
||||
GIT_STRATEGY: none
|
||||
PYTHONPATH: .
|
||||
QUAYDIR: /quay-registry
|
||||
TEST: 'true'
|
||||
mysql:
|
||||
before_script:
|
||||
- cd $QUAYDIR
|
||||
- source $QUAYDIR/venv/bin/activate
|
||||
coverage: /^TOTAL.*\s+(\d+\%)\s*$/
|
||||
image: quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG}
|
||||
script:
|
||||
- sleep 30
|
||||
- alembic upgrade head
|
||||
- PYTHONPATH="." TEST="true" py.test --timeout=7200 --verbose --show-count ./ --color=no --ignore=endpoints/appr/test/ -x
|
||||
services:
|
||||
- mysql:latest
|
||||
stage: tests
|
||||
tags:
|
||||
- kubernetes
|
||||
variables:
|
||||
GIT_STRATEGY: none
|
||||
MYSQL_DATABASE: quay
|
||||
MYSQL_PASSWORD: quay
|
||||
MYSQL_ROOT_PASSWORD: quay
|
||||
MYSQL_USER: quay
|
||||
PYTHONPATH: .
|
||||
QUAYDIR: /quay-registry
|
||||
SKIP_DB_SCHEMA: 'true'
|
||||
TEST: 'true'
|
||||
TEST_DATABASE_URI: mysql+pymysql://quay:quay@localhost/quay
|
||||
postgres:
|
||||
before_script:
|
||||
- cd $QUAYDIR
|
||||
- source $QUAYDIR/venv/bin/activate
|
||||
coverage: /^TOTAL.*\s+(\d+\%)\s*$/
|
||||
image: quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG}
|
||||
script:
|
||||
- sleep 30
|
||||
- alembic upgrade head
|
||||
- PYTHONPATH="." TEST="true" py.test --timeout=7200 --verbose --show-count ./ --color=no --ignore=endpoints/appr/test/ -x
|
||||
services:
|
||||
- postgres:9.6
|
||||
stage: tests
|
||||
tags:
|
||||
- kubernetes
|
||||
variables:
|
||||
GIT_STRATEGY: none
|
||||
POSTGRES_PASSWORD: quay
|
||||
POSTGRES_USER: quay
|
||||
PYTHONPATH: .
|
||||
QUAYDIR: /quay-registry
|
||||
SKIP_DB_SCHEMA: 'true'
|
||||
TEST: 'true'
|
||||
TEST_DATABASE_URI: postgresql://quay:quay@localhost/quay
|
||||
protractor_e2e:
|
||||
allow_failure: true
|
||||
before_script:
|
||||
- cd $QUAYDIR
|
||||
- source $QUAYDIR/venv/bin/activate
|
||||
- curl -sL https://deb.nodesource.com/setup_8.x | bash -
|
||||
- apt-get install -y nodejs
|
||||
- ./node_modules/.bin/webdriver-manager update
|
||||
image: quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG}
|
||||
script:
|
||||
- sleep 150
|
||||
- yarn e2e
|
||||
services:
|
||||
- name: postgres:9.6
|
||||
- alias: quay
|
||||
name: quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG}
|
||||
- alias: db-init
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- sleep 30&& /quay-registry/venv/bin/python initdb.py&& sleep 3600
|
||||
name: quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG}
|
||||
- name: selenium/standalone-chrome:3.4.0
|
||||
stage: integration
|
||||
tags:
|
||||
- kubernetes
|
||||
variables:
|
||||
APP_HOST: http://localhost:80
|
||||
DB_URI: postgresql://quay:quay@localhost/quay
|
||||
GIT_STRATEGY: none
|
||||
POSTGRES_PASSWORD: quay
|
||||
POSTGRES_USER: quay
|
||||
PYTHONPATH: .
|
||||
QUAYDIR: /quay-registry
|
||||
QUAY_OVERRIDE_CONFIG: '{"SERVER_HOSTNAME": "localhost:80"}'
|
||||
SKIP_DB_SCHEMA: 'true'
|
||||
TEST: 'true'
|
||||
TEST_DATABASE_URI: postgresql://quay:quay@localhost/quay
|
||||
registry-tests:
|
||||
before_script:
|
||||
- cd $QUAYDIR
|
||||
- source $QUAYDIR/venv/bin/activate
|
||||
coverage: /^TOTAL.*\s+(\d+\%)\s*$/
|
||||
image: quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG}
|
||||
script:
|
||||
- py.test --cov="." --cov-report=html --cov-report=term-missing --timeout=3600 --verbose -x --color=no --show-count ./test/registry_tests.py
|
||||
stage: tests
|
||||
tags:
|
||||
- kubernetes
|
||||
variables:
|
||||
GIT_STRATEGY: none
|
||||
PYTHONPATH: .
|
||||
QUAYDIR: /quay-registry
|
||||
TEST: 'true'
|
||||
stages:
|
||||
- docker_base
|
||||
- docker_build
|
||||
- deploy_preview
|
||||
- tests
|
||||
- integration
|
||||
- docker_release
|
||||
- deploy_staging
|
||||
- teardown
|
||||
stop-preview:
|
||||
before_script: []
|
||||
environment:
|
||||
action: stop
|
||||
name: review/${CI_COMMIT_REF_SLUG}
|
||||
url: https://quay-${CI_COMMIT_REF_SLUG}.k8s.devtable.com
|
||||
except:
|
||||
- master
|
||||
image: quay.io/appr/appr:kubectl
|
||||
only:
|
||||
- branches
|
||||
script:
|
||||
- kubectl delete ns $K8S_NAMESPACE
|
||||
- kubectl get pods -o wide -n $K8S_NAMESPACE
|
||||
stage: deploy_preview
|
||||
tags:
|
||||
- kubernetes
|
||||
variables:
|
||||
GIT_STRATEGY: none
|
||||
K8S_NAMESPACE: ci-quay-${CI_COMMIT_REF_SLUG}
|
||||
QUAY_DOMAIN: quay-${CI_COMMIT_REF_SLUG}.k8s.devtable.com
|
||||
when: manual
|
||||
unit-tests:
|
||||
after_script:
|
||||
- cp -r $QUAYDIR/htmlcov/ $CI_PROJECT_DIR/coverage
|
||||
artifacts:
|
||||
paths:
|
||||
- coverage
|
||||
before_script:
|
||||
- cd $QUAYDIR
|
||||
- source $QUAYDIR/venv/bin/activate
|
||||
coverage: /^TOTAL.*\s+(\d+\%)\s*$/
|
||||
image: quay.io/quay/quay-ci:${CI_COMMIT_REF_SLUG}
|
||||
script:
|
||||
- py.test --cov="." --cov-report=html --cov-report=term-missing --timeout=3600 --verbose -x --color=no --show-count ./
|
||||
stage: tests
|
||||
tags:
|
||||
- kubernetes
|
||||
variables:
|
||||
GIT_STRATEGY: none
|
||||
PYTHONPATH: .
|
||||
QUAYDIR: /quay-registry
|
||||
TEST: 'true'
|
||||
variables:
|
||||
FAILFASTCI_NAMESPACE: quay
|
|
@ -1,145 +0,0 @@
|
|||
function(vars={})
|
||||
{
|
||||
dockerBuild: {
|
||||
// base job to manage containers (build / push)
|
||||
variables: {
|
||||
DOCKER_DRIVER: "overlay2",
|
||||
DOCKER_HOST: "tcp://docker-host.gitlab-runner.svc.cluster.local:2375"
|
||||
},
|
||||
|
||||
image: "docker:git",
|
||||
before_script: [
|
||||
"docker login -u $DOCKER_USER -p $DOCKER_PASS quay.io",
|
||||
],
|
||||
|
||||
tags: [
|
||||
"kubernetes",
|
||||
],
|
||||
},
|
||||
|
||||
QuayTest: {
|
||||
// base job to test the container
|
||||
image: vars.images.quayci.name,
|
||||
variables: {
|
||||
TEST: "true",
|
||||
PYTHONPATH: ".",
|
||||
QUAYDIR: "/quay-registry",
|
||||
GIT_STRATEGY: "none",
|
||||
},
|
||||
before_script: [
|
||||
"cd $QUAYDIR",
|
||||
"source $QUAYDIR/venv/bin/activate",
|
||||
],
|
||||
tags: [
|
||||
"kubernetes",
|
||||
],
|
||||
},
|
||||
|
||||
QuayDeploy: {
|
||||
local this = self,
|
||||
local _vars = self.localvars,
|
||||
localvars:: {
|
||||
appversion: "1.0.0-%s" % self.image.tag,
|
||||
apprepo: "quay.io/quay/%s" % self.appname,
|
||||
appname: "quay-ci-app",
|
||||
domain: "quay-%s.k8s.devtable.com" % self.image.tag,
|
||||
namespace: "ci-quay-%s" % self.image.tag,
|
||||
image: vars.images.quayci,
|
||||
channels: [],
|
||||
pushargs: ["-f"],
|
||||
|
||||
buildParams(extra_params={}):: (
|
||||
local params = {image: _vars.image.name,
|
||||
tag: _vars.image.tag,
|
||||
domain: "$QUAY_DOMAIN"} + extra_params;
|
||||
[
|
||||
"echo -n %s > VERSION" % _vars.appversion,
|
||||
'echo %s > params.json' % std.escapeStringJson(params),
|
||||
"cat params.json"
|
||||
]),
|
||||
|
||||
pushApp(repo="quay.io/quay", extra_args=['-f'], channels=[]):: (
|
||||
["appr push %s %s" % [repo, std.join(" ", extra_args)]] +
|
||||
["appr push %s -c %s" % [repo, channel] for channel in channels]
|
||||
),
|
||||
},
|
||||
|
||||
variables: {K8S_NAMESPACE: _vars.namespace,
|
||||
QUAY_DOMAIN: _vars.domain},
|
||||
image: "quay.io/appr/appr:kubectl",
|
||||
environment: {
|
||||
name: "review/%s" % _vars.image.tag,
|
||||
url: "https://%s" % _vars.domain,
|
||||
},
|
||||
tags: [
|
||||
"kubernetes",
|
||||
],
|
||||
|
||||
before_script: [
|
||||
"appr login -u $DOCKER_USER -p $DOCKER_PASS quay.io",
|
||||
"cd deploy/%s" % _vars.appname,
|
||||
] + _vars.buildParams(),
|
||||
|
||||
script:
|
||||
_vars.pushApp(_vars.apprepo, _vars.pushargs, _vars.channels) +
|
||||
[
|
||||
"appr deploy %s@%s --namespace %s -x docker_user=$DOCKER_USER -x docker_pass=$DOCKER_PASS" % [
|
||||
_vars.apprepo,
|
||||
_vars.appversion,
|
||||
_vars.namespace],
|
||||
"kubectl get ingresses -n %s -o wide" % _vars.namespace,
|
||||
],
|
||||
},
|
||||
|
||||
QuayDeployStop: self.QuayDeploy {
|
||||
variables+: {GIT_STRATEGY: "none"},
|
||||
environment+: {
|
||||
action: "stop"
|
||||
},
|
||||
before_script: [],
|
||||
script: [],
|
||||
},
|
||||
|
||||
EndToEndTest: self.QuayTest {
|
||||
initDb:: true,
|
||||
services: [
|
||||
{name: "postgres:9.6"},
|
||||
{name: vars.images.quayci.name, alias: 'quay'}
|
||||
] + if self.initDb == true then [
|
||||
{name: vars.images.quayci.name,
|
||||
alias: 'db-init',
|
||||
command: ["/bin/sh",
|
||||
"-c",
|
||||
"sleep 30" +
|
||||
"&& /quay-registry/venv/bin/python initdb.py" +
|
||||
"&& sleep 3600",]},
|
||||
] else [],
|
||||
before_script: if self.image == vars.images.quayci.name
|
||||
then super.before_script
|
||||
else [],
|
||||
script: ['sleep 150'],
|
||||
variables+: {
|
||||
APP_HOST: "localhost:80",
|
||||
POSTGRES_PASSWORD: "quay",
|
||||
POSTGRES_USER: "quay",
|
||||
SKIP_DB_SCHEMA: 'true',
|
||||
TEST_DATABASE_URI: self['DB_URI'],
|
||||
QUAY_OVERRIDE_CONFIG: '' + {"SERVER_HOSTNAME": "localhost:80"},
|
||||
DB_URI: 'postgresql://quay:quay@localhost/quay'
|
||||
},
|
||||
},
|
||||
|
||||
dbTest(scheme, image, env):: self.QuayTest {
|
||||
variables+: {
|
||||
SKIP_DB_SCHEMA: 'true',
|
||||
TEST_DATABASE_URI: '%s://quay:quay@localhost/quay' % scheme,
|
||||
} + env,
|
||||
coverage: @"/^TOTAL.*\s+(\d+\%)\s*$/",
|
||||
services: [image],
|
||||
script: [
|
||||
"sleep 30",
|
||||
"alembic upgrade head",
|
||||
'PYTHONPATH="." TEST="true" py.test --timeout=7200 --verbose --show-count ./ --color=no --ignore=endpoints/appr/test/ -x',
|
||||
],
|
||||
},
|
||||
}
|
|
@ -1,66 +0,0 @@
|
|||
{
|
||||
local topSelf = self,
|
||||
# Generate a sequence array from 1 to i
|
||||
seq(i):: (
|
||||
[x for x in std.range(1, i)]
|
||||
),
|
||||
|
||||
objectFieldsHidden(obj):: (
|
||||
std.setDiff(std.objectFieldsAll(obj), std.objectFields(obj))
|
||||
),
|
||||
|
||||
objectFlatten(obj):: (
|
||||
// Merge 1 level dict depth into toplevel
|
||||
local visible = { [k]: obj[j][k]
|
||||
for j in std.objectFieldsAll(obj)
|
||||
for k in std.objectFieldsAll(obj[j]) };
|
||||
|
||||
visible
|
||||
),
|
||||
|
||||
compact(array):: (
|
||||
[x for x in array if x != null]
|
||||
),
|
||||
|
||||
objectValues(obj):: (
|
||||
local fields = std.objectFields(obj);
|
||||
[obj[key] for key in fields]
|
||||
),
|
||||
|
||||
objectMap(func, obj):: (
|
||||
local fields = std.objectFields(obj);
|
||||
{ [key]: func(obj[key]) for key in fields }
|
||||
),
|
||||
|
||||
capitalize(str):: (
|
||||
std.char(std.codepoint(str[0]) - 32) + str[1:]
|
||||
),
|
||||
|
||||
test: self.capitalize("test"),
|
||||
|
||||
set(array)::
|
||||
{ [key]: key for key in array },
|
||||
|
||||
containerName(repo, tag):: "%s:%s" % [repo, tag],
|
||||
|
||||
ci: {
|
||||
|
||||
mergeJob(base_job, jobs, stage=null):: {
|
||||
[job_name]: base_job + jobs[job_name] +
|
||||
if stage != null then { stage: stage } else {}
|
||||
for job_name in std.objectFields(jobs)
|
||||
},
|
||||
|
||||
only(key):: (
|
||||
if key == "master"
|
||||
then { only: ['master', 'tags'] }
|
||||
else { only: ['branches'] }
|
||||
),
|
||||
|
||||
setManual(key, values):: (
|
||||
if std.objectHas(topSelf.set(values), key)
|
||||
then { when: 'manual' }
|
||||
else { only: ['branches'] }
|
||||
),
|
||||
},
|
||||
}
|
|
@ -1,31 +0,0 @@
|
|||
local utils = import "utils.libsonnet";
|
||||
|
||||
{
|
||||
global: {
|
||||
// .gitlab-ci.yaml top `variables` key
|
||||
FAILFASTCI_NAMESPACE: "quay",
|
||||
},
|
||||
|
||||
// internal variables
|
||||
images: {
|
||||
// Quay initial image, used in the Dockerfile FROM clause
|
||||
base: { repo: "quay.io/quay/quay-base", tag: "latest",
|
||||
name: utils.containerName(self.repo, self.tag),
|
||||
},
|
||||
|
||||
// release is a copy of the quayci image to the 'prod' repository
|
||||
release: { repo: "quay.io/quay/quay",
|
||||
tag: "${CI_COMMIT_REF_SLUG}-${SHA8}",
|
||||
name: utils.containerName(self.repo, self.tag),
|
||||
},
|
||||
|
||||
quayci: { repo: "quay.io/quay/quay-ci",
|
||||
tag: "${CI_COMMIT_REF_SLUG}",
|
||||
name: utils.containerName(self.repo, self.tag),
|
||||
},
|
||||
|
||||
coverage: { repo: "quay.io/quay/quay-coverage", tag: "${CI_COMMIT_REF_SLUG}",
|
||||
name: utils.containerName(self.repo, self.tag),
|
||||
},
|
||||
},
|
||||
}
|
|
@ -0,0 +1,103 @@
|
|||
---
|
||||
language: python
|
||||
python: 2.7
|
||||
|
||||
sudo: required
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
install: true
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
# Stop default database instances here to avoid port conflicts.
|
||||
before_script:
|
||||
- sudo service mysql stop
|
||||
- sudo service postgresql stop
|
||||
|
||||
# Clean the cache if any step fails.
|
||||
before_cache:
|
||||
- scripts/ci fail-clean
|
||||
|
||||
cache:
|
||||
timeout: 1000
|
||||
directories:
|
||||
- $HOME/docker
|
||||
|
||||
stages:
|
||||
- build
|
||||
- test
|
||||
- clean
|
||||
|
||||
# We should label the steps if Travis ever supports it:
|
||||
# https://github.com/travis-ci/travis-ci/issues/5898
|
||||
jobs:
|
||||
include:
|
||||
- stage: build
|
||||
name: Build
|
||||
script: scripts/ci build
|
||||
|
||||
# To further shard, change the script to shard_X_of_XS and add new steps
|
||||
- stage: test
|
||||
name: Unit tests (shard 1)
|
||||
script: scripts/ci unit shard_1_of_2
|
||||
- stage: test
|
||||
name: Unit tests (shard 2)
|
||||
script: scripts/ci unit shard_2_of_2
|
||||
|
||||
- stage: test
|
||||
name: Registry tests (shard 1)
|
||||
script: scripts/ci registry shard_1_of_5
|
||||
- stage: test
|
||||
name: Registry tests (shard 2)
|
||||
script: scripts/ci registry shard_2_of_5
|
||||
- stage: test
|
||||
name: Registry tests (shard 3)
|
||||
script: scripts/ci registry shard_3_of_5
|
||||
- stage: test
|
||||
name: Registry tests (shard 4)
|
||||
script: scripts/ci registry shard_4_of_5
|
||||
- stage: test
|
||||
name: Registry tests (shard 5)
|
||||
script: scripts/ci registry shard_5_of_5
|
||||
|
||||
- stage: test
|
||||
name: Legacy registry tests
|
||||
script: scripts/ci registry_old
|
||||
|
||||
- stage: test
|
||||
name: Custom TLS certs test
|
||||
script: scripts/ci certs_test
|
||||
|
||||
- stage: test
|
||||
name: Gunicorn worker test
|
||||
script: scripts/ci gunicorn_test
|
||||
|
||||
- stage: test
|
||||
name: MySQL unit tests (shard 1)
|
||||
script: scripts/ci mysql shard_1_of_2
|
||||
- stage: test
|
||||
name: MySQL unit tests (shard 2)
|
||||
script: scripts/ci mysql shard_2_of_2
|
||||
|
||||
- stage: test
|
||||
name: Postgres unit tests (shard 1)
|
||||
script: scripts/ci postgres shard_1_of_2
|
||||
- stage: test
|
||||
name: Postgres unit tests (shard 2)
|
||||
script: scripts/ci postgres shard_2_of_2
|
||||
|
||||
- stage: clean
|
||||
name: Cleanup
|
||||
script: scripts/ci clean
|
||||
|
||||
notifications:
|
||||
slack:
|
||||
rooms:
|
||||
- secure: "fBR3YMXaOkoX2Iz7oSJVAw9zrcDoqwadiMEWTWhx7Ic0zoM8IieD2EWIcDHAoGpqf3ixHkc1v/iLBpbWHgvK7TkrSrGEbFyEmu/uomuHU8oGTiazWCbMWg9T2mhWYFyVaKtt8bzMbFo8k72kYK/NWV8bR4W/Qe/opkH2GGzfhZA="
|
||||
on_success: change
|
||||
on_failure: always
|
||||
on_pull_requests: false
|
138
CHANGELOG.md
138
CHANGELOG.md
|
@ -1,3 +1,135 @@
|
|||
### v3.1.2
|
||||
|
||||
- Fixed: Repository mirroring properly updates status
|
||||
- Fixed: Application repositories in public namespaces shown in UI
|
||||
- Fixed: Description of log operations in UI
|
||||
- Fixed: Quay V3 upgrade fails with "id field missing from v1Compatibility JSON"
|
||||
- Fixed: Security token for storage proxy properly URL encoded
|
||||
|
||||
### v3.1.1
|
||||
|
||||
- Fixed: Quoting of username/password for repository mirror
|
||||
- Fixed: Changing next sync date in repository mirror UI
|
||||
- Fixed: Enable cancel button in repository mirror UI
|
||||
|
||||
### v3.1.0
|
||||
|
||||
- Added: New Repository Mirror functionality to continously synchronize repositories from external source registries into Quay
|
||||
- Added: New Repository Mode setting (Normal, Mirrored, Read-Only) to indicate how a repository is updated
|
||||
- Added: New Quay Setup Operator (Dev Preview) to automate configuring Quay on OpenShift
|
||||
- Added: Support for using Red Hat OpenShift Container Storage 3 as a Quay storage backend
|
||||
- Added: Support for using the Crunchy Data Operator to deploy Postgresql as Quay database
|
||||
- Added: Ability to use build ARGS as first line in Dockerfiles in Quay builds
|
||||
- Added: New Red Hat color scheme in Quay web UI
|
||||
- Fixed: Display of repo_verb logs in logs panel
|
||||
- Fixed: Ensure robot accounts being granted access actually belongs in same namespace
|
||||
- Fixed: Numerous documentation improvements
|
||||
|
||||
### v3.0.5
|
||||
|
||||
- Fixed: LDAP config error when user search results exceeds 1000 objects (#1736)[https://jira.coreos.com/browse/QUAY-1736]
|
||||
- Fixed: Remove obsolete 01_copy_syslog_config.sh (#1768)[https://jira.coreos.com/browse/QUAY-1768)
|
||||
- Fixed: Config tool fails to set up database when password string contains "$" (#1510)[https://jira.coreos.com/browse/QUAY-1510)
|
||||
- Added: Config flag to disable TLSv1.0 support (#1726)[https://jira.coreos.com/browse/QUAY-1726]
|
||||
|
||||
### v3.0.4
|
||||
|
||||
- Fixed: Package vulnerability notifications now shown in UI
|
||||
- Fixed: Error deleting manifest after pushing new tag
|
||||
- Fixed: Manifest now shown in UI for all types
|
||||
- Fixed: CSRF rotation corrected
|
||||
- Fixed: nginx access and error logs now to stdout
|
||||
|
||||
### v3.0.3
|
||||
|
||||
- Fixed: Security scan notifications endpoint not working (part #2) (#3472)
|
||||
- Fixed: Exception raised during parallel pushes of same manifest on Postgres (#3478)
|
||||
- Fixed: Connection pooling was ignoring environment variable (#3480)
|
||||
- Fixed: Exception when in OAuth approval flow (#3491)
|
||||
|
||||
### v3.0.2
|
||||
|
||||
- Fixed: Configuration tool now operates in disconnected environments (#3468)
|
||||
- Fixed: Security scan notifications endpoint not working (#3472)
|
||||
|
||||
### v3.0.1
|
||||
|
||||
- Fixed: Instance health endpoint (`/health/instance`) (#3467)
|
||||
|
||||
### v3.0.0
|
||||
|
||||
**IMPORTANT NOTE:** This release is a **major** release and has special upgrade instructions. Please see the upgrade instructions documentation.
|
||||
|
||||
- Added: Full support for Docker Manifest Version 2, Schema 2, including support for manifest lists and Windows images
|
||||
- Added: New, distinct configuration tool for Quay that can be run outside of Quay itself and perform in-place configuration changes
|
||||
- Added: Disabling of V1 push support by default and support for whitelist-enabling specific namespaces for this legacy protocol (#3398)
|
||||
- Added: Full support for blob mounting via the Docker protocol (#3057)
|
||||
- Added: Have all registry operations be disabled if a namespace is disabled (#3091)
|
||||
- Added: Allow syncing of team members from LDAP/Keystone groups, even if user creation is disabled (#3089)
|
||||
- Added: Add a feature flag to allow username confirmation to be disabled (#3099)
|
||||
- Added: New indexes which should result in significant database performance when accessing lists of tags
|
||||
- Added: Add support for POST on OIDC endpoints, to support those providers that POST back (#3246)
|
||||
- Added: Add support for configuration of the claims required for OIDC authentication (#3246)
|
||||
- Added: Have the instance health check verify the disk space available to ensure it doesn’t run out and cause problems for nginx (#3241)
|
||||
- Added: Support for basic auth on security scanner API endpoints (#3255)
|
||||
- Added: Support for geo-blocking pulls in a namespace from a country (#3300)
|
||||
|
||||
- Fixed: Ensure that starred public repositories appear in the starred repositories list (#3098)
|
||||
- Fixed: Add rate limiting to the catalog endpoint (#3106)
|
||||
- Fixed: Have the catalog endpoint return empty for a namespace if it is disabled (#3106)
|
||||
- Fixed: Have user logs start writing to a new LogEntry3 table, which has a BigInteger ID column, to ensure no overflow
|
||||
- Fixed: Improve loading of action logs to be less jumpy (#3299)
|
||||
- Fixed: Ensure that all upload segments are deleted in Swift storage engine once no longer necessary (#3260)
|
||||
- Fixed: Handling of unicode in manifests (#3325)
|
||||
- Fixed: Unauthorized request handling under podman for public repositories when anonymous access is disabled (#3365)
|
||||
|
||||
### v2.9.2
|
||||
|
||||
**IMPORTANT NOTE:** This release fixes a bug in which the deletion of namespaces did not result in the deletion of robot accounts under that namespace. While this is not a security issue (no permissions or credentials are leaked), it can appear unusual to users, so an upgrade is highly recommended. This change also includes a migration that cleans up the aforementioned robot accounts, so the migration step can take **several minutes**. Please plan accordingly.
|
||||
|
||||
- Added: Support for custom query parameters on OIDC endpoints (#3050)
|
||||
- Added: Configurable options for search page length and maximum number of pages (#3060)
|
||||
- Added: Better messaging for when the maximum search page is reached (#3060)
|
||||
- Added: Support for browser notifications (#3068)
|
||||
|
||||
- Fixed: Robot accounts were not being immediately deleted under namespaces (#3071)
|
||||
- Fixed: Setup under latest versions of Kubernetes (#3051)
|
||||
- Fixed: Viewing of logs in repositories with many, many logs (#3082)
|
||||
- Fixed: Filtering of deleting users and organizations in superuser panel (#3080)
|
||||
- Fixed: Incorrect information displayed for builds triggered by deleted build triggers (#3078)
|
||||
- Fixed: Robots could not be created with empty descriptions (#3073)
|
||||
- Fixed: Inability to find Dockerfile in certain archives (#3072)
|
||||
- Fixed: Display of empty tab in credentials dialog under certain circumstances (#3061)
|
||||
- Fixed: Overflow of robot names when extremely long (#3062)
|
||||
- Fixed: Respect CPU affinity when determining number of workers to run (#3064)
|
||||
- Fixed: Breakage in RECATPCHA support (#3065)
|
||||
|
||||
### v2.9.1
|
||||
|
||||
**IMPORTANT NOTE:** This release fixes the 2.9.0 migration. If you experienced an error during the 2.9.0 migration, manually rollback and then upgrade your quay instance to 2.9.1.
|
||||
|
||||
- Fixed: Specify default server value for new integer fields added (#3052)
|
||||
- Fixed: Overflow of repository grid UI (#3049)
|
||||
|
||||
### v2.9.0
|
||||
|
||||
- Added: Automatic cleanup of expired external application tokens (#3002)
|
||||
- Added: Make deletions of namespaces occur in the background (#3014)
|
||||
- Added: Ability to disable build triggers (#2892)
|
||||
- Added: Have repeatedly failing build triggers be automatically disabled (#2892)
|
||||
- Added: Automatic caching of registry Blob data for faster pull operations (#3022)
|
||||
- Added: Creation date/time, last usage date/time and other metadata for robot accounts (#3024)
|
||||
- Added: Collaborators view under organizations, for viewing non-members (#3025)
|
||||
|
||||
- Fixed: Make superusers APIs for users and organizations visible in the API browser (#3017)
|
||||
- Fixed: Better messaging when attempting to create a team that already exists (#3006)
|
||||
- Fixed: Prevent possible reflected text attacks by limiting API access (#2987)
|
||||
- Fixed: Have checkable menus in UI respect filters (#3013)
|
||||
- Fixed: Users being invited to a new organization must always be invited (#3029)
|
||||
- Fixed: Removed all license requirements in Quay (#3031)
|
||||
- Fixed: Squashed images with hard links pointing to deleted files no longer fail (#3032)
|
||||
- Fixed: 500 error when trying to pull certain images via torrent (#3036)
|
||||
|
||||
### v2.8.0
|
||||
|
||||
- Added: Support for Azure Blob Storage (#2902)
|
||||
|
@ -181,9 +313,9 @@
|
|||
|
||||
### v2.1.0
|
||||
|
||||
**IMPORTANT NOTE FOR POSTGRES USERS:** This release contains a migration which adds full-text searching capabilities to Quay Enterprise. In order to support this feature, the migration will attempt to create the `pg_trgm` extension in the database. This operation requires **superuser access** to run and requires the extension to be installed. See https://coreos.com/quay-enterprise/docs/latest/postgres-additional-modules.html for more information on installing the extension.
|
||||
**IMPORTANT NOTE FOR POSTGRES USERS:** This release contains a migration which adds full-text searching capabilities to Red Hat Quay. In order to support this feature, the migration will attempt to create the `pg_trgm` extension in the database. This operation requires **superuser access** to run and requires the extension to be installed. See https://coreos.com/quay-enterprise/docs/latest/postgres-additional-modules.html for more information on installing the extension.
|
||||
|
||||
If the user given to Quay Enterprise is not a superuser, please temporarily grant superuser access to the Quay Enterprise user in the database (or change the user in config) **before** upgrading.
|
||||
If the user given to Red Hat Quay is not a superuser, please temporarily grant superuser access to the Red Hat Quay user in the database (or change the user in config) **before** upgrading.
|
||||
|
||||
- Added: Full text search support (#2272)
|
||||
- Added: OIDC support (#2300, #2348)
|
||||
|
@ -591,7 +723,7 @@ In order to upgrade to this version, your cluster must contain a valid license,
|
|||
|
||||
### v1.13.0
|
||||
|
||||
- Added new Quay Enterprise rebranding (#723, #738, #735, #745, #746, #748, #747, #751)
|
||||
- Added new Red Hat Quay rebranding (#723, #738, #735, #745, #746, #748, #747, #751)
|
||||
- Added a styled 404 page (#683)
|
||||
- Hid the run button from users that haven't created a trigger (#727)
|
||||
- Added timeouts to calls to GitLab, Bitbucket, GitHub APIs (#636, #633, #631, #722)
|
||||
|
|
162
Dockerfile
162
Dockerfile
|
@ -1,60 +1,128 @@
|
|||
# vim:ft=dockerfile
|
||||
FROM centos:7
|
||||
LABEL maintainer "thomasmckay@redhat.com"
|
||||
|
||||
FROM quay.io/quay/quay-base:latest
|
||||
ENV PYTHON_VERSION=2.7 \
|
||||
PATH=$HOME/.local/bin/:$PATH \
|
||||
PYTHONUNBUFFERED=1 \
|
||||
PYTHONIOENCODING=UTF-8 \
|
||||
LC_ALL=en_US.UTF-8 \
|
||||
LANG=en_US.UTF-8 \
|
||||
PIP_NO_CACHE_DIR=off
|
||||
|
||||
ENV QUAYDIR /quay-registry
|
||||
ENV QUAYCONF /quay-registry/conf
|
||||
ENV QUAYPATH "."
|
||||
|
||||
RUN mkdir $QUAYDIR
|
||||
WORKDIR $QUAYDIR
|
||||
|
||||
COPY requirements.txt requirements-tests.txt ./
|
||||
|
||||
# Check python dependencies for the GPL
|
||||
# Due to the following bug, pip results must be piped to a file before grepping:
|
||||
# https://github.com/pypa/pip/pull/3304
|
||||
RUN cat requirements.txt | grep -v "^-e" | awk -F'==' '{print $1}' | xargs venv/bin/pip --disable-pip-version-check show > pipinfo.txt && \
|
||||
test -z $(cat pipinfo.txt | grep GPL | grep -v LGPL) && \
|
||||
rm pipinfo.txt
|
||||
|
||||
RUN virtualenv --distribute venv \
|
||||
&& venv/bin/pip install -r requirements.txt \
|
||||
&& venv/bin/pip install -r requirements-tests.txt \
|
||||
&& venv/bin/pip freeze
|
||||
|
||||
# Install front-end dependencies
|
||||
# JS dependencies
|
||||
COPY yarn.lock package.json tsconfig.json webpack.config.js tslint.json ./
|
||||
RUN yarn install --ignore-engines
|
||||
|
||||
# JS compile
|
||||
COPY static static
|
||||
RUN yarn build \
|
||||
&& jpegoptim static/img/**/*.jpg \
|
||||
&& optipng -clobber -quiet static/img/**/*.png
|
||||
RUN INSTALL_PKGS="\
|
||||
python27 \
|
||||
python27-python-pip \
|
||||
rh-nginx112 rh-nginx112-nginx \
|
||||
openldap \
|
||||
scl-utils \
|
||||
gcc-c++ git \
|
||||
openldap-devel \
|
||||
gpgme-devel \
|
||||
dnsmasq \
|
||||
memcached \
|
||||
openssl \
|
||||
skopeo \
|
||||
" && \
|
||||
yum install -y yum-utils && \
|
||||
yum install -y epel-release centos-release-scl && \
|
||||
yum -y --setopt=tsflags=nodocs --setopt=skip_missing_names_on_install=False install $INSTALL_PKGS && \
|
||||
yum -y update && \
|
||||
yum -y clean all
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN scl enable python27 "\
|
||||
pip install --upgrade setuptools pip && \
|
||||
pip install -r requirements.txt --no-cache && \
|
||||
pip install -r requirements-tests.txt --no-cache && \
|
||||
pip freeze && \
|
||||
mkdir -p $QUAYDIR/static/webfonts && \
|
||||
mkdir -p $QUAYDIR/static/fonts && \
|
||||
mkdir -p $QUAYDIR/static/ldn && \
|
||||
PYTHONPATH=$QUAYPATH python -m external_libraries \
|
||||
"
|
||||
|
||||
RUN cp -r $QUAYDIR/static/ldn $QUAYDIR/config_app/static/ldn && \
|
||||
cp -r $QUAYDIR/static/fonts $QUAYDIR/config_app/static/fonts && \
|
||||
cp -r $QUAYDIR/static/webfonts $QUAYDIR/config_app/static/webfonts
|
||||
|
||||
# Check python dependencies for GPL
|
||||
# Due to the following bug, pip results must be piped to a file before grepping:
|
||||
# https://github.com/pypa/pip/pull/3304
|
||||
# 'docutils' is a setup dependency of botocore required by s3transfer. It's under
|
||||
# GPLv3, and so is manually removed.
|
||||
RUN rm -Rf /opt/rh/python27/root/usr/lib/python2.7/site-packages/docutils && \
|
||||
scl enable python27 "pip freeze" | grep -v '^-e' | awk -F == '{print $1}' | grep -v docutils > piplist.txt && \
|
||||
scl enable python27 "xargs -a piplist.txt pip --disable-pip-version-check show" > pipinfo.txt && \
|
||||
test -z "$(cat pipinfo.txt | grep GPL | grep -v LGPL)" && \
|
||||
rm -f piplist.txt pipinfo.txt
|
||||
|
||||
# # Front-end
|
||||
RUN curl --silent --location https://rpm.nodesource.com/setup_8.x | bash - && \
|
||||
yum install -y nodejs && \
|
||||
curl --silent --location https://dl.yarnpkg.com/rpm/yarn.repo | tee /etc/yum.repos.d/yarn.repo && \
|
||||
rpm --import https://dl.yarnpkg.com/rpm/pubkey.gpg && \
|
||||
yum install -y yarn && \
|
||||
yarn install --ignore-engines && \
|
||||
yarn build && \
|
||||
yarn build-config-app
|
||||
|
||||
# TODO: Build jwtproxy in dist-git
|
||||
# https://jira.coreos.com/browse/QUAY-1315
|
||||
RUN curl -fsSL -o /usr/local/bin/jwtproxy https://github.com/coreos/jwtproxy/releases/download/v0.0.3/jwtproxy-linux-x64 && \
|
||||
chmod +x /usr/local/bin/jwtproxy
|
||||
|
||||
# TODO: Build prometheus-aggregator in dist-git
|
||||
# https://jira.coreos.com/browse/QUAY-1324
|
||||
RUN curl -fsSL -o /usr/local/bin/prometheus-aggregator https://github.com/coreos/prometheus-aggregator/releases/download/v0.0.1-alpha/prometheus-aggregator &&\
|
||||
chmod +x /usr/local/bin/prometheus-aggregator
|
||||
|
||||
# Update local copy of AWS IP Ranges.
|
||||
RUN curl https://ip-ranges.amazonaws.com/ip-ranges.json -o util/ipresolver/aws-ip-ranges.json
|
||||
RUN curl -fsSL https://ip-ranges.amazonaws.com/ip-ranges.json -o util/ipresolver/aws-ip-ranges.json
|
||||
|
||||
# Set up the init system
|
||||
RUN mkdir -p /etc/my_init.d /etc/systlog-ng /usr/local/bin /etc/monit static/fonts static/ldn /usr/local/nginx/logs/ \
|
||||
&& cp $QUAYCONF/init/*.sh /etc/my_init.d/ \
|
||||
&& cp $QUAYCONF/init/syslog-ng.conf /etc/syslog-ng/ \
|
||||
&& cp $QUAYCONF/kill-buildmanager.sh /usr/local/bin/kill-buildmanager.sh \
|
||||
&& cp $QUAYCONF/monitrc /etc/monit/monitrc \
|
||||
&& chmod 0600 /etc/monit/monitrc \
|
||||
&& cp $QUAYCONF/init/logrotate.conf /etc/logrotate.conf \
|
||||
&& cp .git/HEAD GIT_HEAD \
|
||||
&& rm -rf /etc/service/syslog-forwarder
|
||||
RUN ln -s $QUAYCONF /conf && \
|
||||
mkdir /var/log/nginx && \
|
||||
ln -sf /dev/stdout /var/log/nginx/access.log && \
|
||||
ln -sf /dev/stdout /var/log/nginx/error.log && \
|
||||
chmod -R a+rwx /var/log/nginx
|
||||
|
||||
RUN ln -s $QUAYCONF /conf
|
||||
# Cleanup
|
||||
RUN UNINSTALL_PKGS="\
|
||||
gcc-c++ \
|
||||
openldap-devel \
|
||||
gpgme-devel \
|
||||
optipng \
|
||||
kernel-headers \
|
||||
" && \
|
||||
yum remove -y $UNINSTALL_PKGS && \
|
||||
yum clean all && \
|
||||
rm -rf /var/cache/yum /tmp/* /var/tmp/* /root/.cache
|
||||
|
||||
# Cleanup any NPM-related stuff.
|
||||
# RUN apt-get remove -y --auto-remove python-dev g++ libjpeg62-dev libevent-dev libldap2-dev libsasl2-dev libpq-dev libffi-dev libgpgme11-dev nodejs jpegoptim optipng w3m \
|
||||
# && apt-get autoremove -y \
|
||||
# && apt-get clean
|
||||
# && rm -rf /root/.npm /.npm /usr/local/lib/node_modules /usr/share/yarn/node_modules \
|
||||
# /root/node_modules /node_modules /grunt
|
||||
RUN PYTHONPATH=$QUAYPATH venv/bin/alembic heads | grep -E '^[0-9a-f]+ \(head\)$' > ALEMBIC_HEAD
|
||||
EXPOSE 8080 8443 7443
|
||||
|
||||
RUN ./scripts/detect-config.sh
|
||||
RUN chgrp -R 0 $QUAYDIR && \
|
||||
chmod -R g=u $QUAYDIR
|
||||
|
||||
CMD ./quay-entrypoint.sh
|
||||
RUN mkdir /datastorage && chgrp 0 /datastorage && chmod g=u /datastorage && \
|
||||
mkdir -p /var/log/nginx && chgrp 0 /var/log/nginx && chmod g=u /var/log/nginx && \
|
||||
mkdir -p /conf/stack && chgrp 0 /conf/stack && chmod g=u /conf/stack && \
|
||||
mkdir -p /tmp && chgrp 0 /tmp && chmod g=u /tmp && \
|
||||
chmod g=u /etc/passwd
|
||||
|
||||
RUN chgrp 0 /var/opt/rh/rh-nginx112/log/nginx && chmod g=u /var/opt/rh/rh-nginx112/log/nginx
|
||||
|
||||
VOLUME ["/var/log", "/datastorage", "/tmp", "/conf/stack"]
|
||||
|
||||
ENTRYPOINT ["/quay-registry/quay-entrypoint.sh"]
|
||||
CMD ["registry"]
|
||||
|
||||
# root required to create and install certs
|
||||
# https://jira.coreos.com/browse/QUAY-1468
|
||||
# USER 1001
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
FROM quay-ci-base
|
||||
RUN mkdir -p conf/stack
|
||||
RUN rm -rf test/data/test.db
|
||||
ENV ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE remove-old-fields
|
||||
ADD cirun.config.yaml conf/stack/config.yaml
|
||||
RUN /usr/bin/scl enable python27 rh-nginx112 "LOGGING_LEVEL=INFO python initdb.py"
|
||||
ENTRYPOINT ["/quay-registry/quay-entrypoint.sh"]
|
||||
CMD ["registry"]
|
189
Dockerfile.old
189
Dockerfile.old
|
@ -1,189 +0,0 @@
|
|||
# vim:ft=dockerfile
|
||||
|
||||
FROM phusion/baseimage:0.9.19
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
ENV HOME /root
|
||||
ENV QUAYCONF /quay/conf
|
||||
ENV QUAYDIR /quay
|
||||
ENV QUAYPATH "."
|
||||
|
||||
RUN mkdir $QUAYDIR
|
||||
WORKDIR $QUAYDIR
|
||||
|
||||
# This is so we don't break http golang/go#17066
|
||||
# When Ubuntu has nginx >= 1.11.0 we can switch back.
|
||||
RUN add-apt-repository ppa:nginx/development
|
||||
|
||||
# Add Yarn repository until it is officially added to Ubuntu
|
||||
RUN curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add -
|
||||
RUN echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list
|
||||
|
||||
# Install system packages
|
||||
RUN apt-get update && apt-get upgrade -y # 26MAY2017
|
||||
RUN apt-get install -y \
|
||||
dnsmasq \
|
||||
g++ \
|
||||
gdb \
|
||||
gdebi-core \
|
||||
git \
|
||||
jpegoptim \
|
||||
libevent-2.0.5 \
|
||||
libevent-dev \
|
||||
libffi-dev \
|
||||
libfreetype6-dev \
|
||||
libgpgme11 \
|
||||
libgpgme11-dev \
|
||||
libjpeg62 \
|
||||
libjpeg62-dev \
|
||||
libjpeg8 \
|
||||
libldap-2.4-2 \
|
||||
libldap2-dev \
|
||||
libmagic1 \
|
||||
libpq-dev \
|
||||
libpq5 \
|
||||
libsasl2-dev \
|
||||
libsasl2-modules \
|
||||
monit \
|
||||
nginx \
|
||||
nodejs \
|
||||
optipng \
|
||||
openssl \
|
||||
python-dbg \
|
||||
python-dev \
|
||||
python-pip \
|
||||
python-virtualenv \
|
||||
yarn=0.22.0-1 \
|
||||
w3m
|
||||
|
||||
# Install python dependencies
|
||||
|
||||
ADD requirements.txt requirements.txt
|
||||
RUN virtualenv --distribute venv
|
||||
RUN venv/bin/pip install -r requirements.txt # 07SEP2016
|
||||
RUN venv/bin/pip freeze
|
||||
|
||||
# Check python dependencies for the GPL
|
||||
# Due to the following bug, pip results must be piped to a file before grepping:
|
||||
# https://github.com/pypa/pip/pull/3304
|
||||
RUN cat requirements.txt | grep -v "^-e" | awk -F'==' '{print $1}' | xargs venv/bin/pip --disable-pip-version-check show > pipinfo.txt && \
|
||||
test -z $(cat pipinfo.txt | grep GPL | grep -v LGPL) && \
|
||||
rm pipinfo.txt
|
||||
|
||||
# Install cfssl
|
||||
RUN mkdir /gocode
|
||||
ENV GOPATH /gocode
|
||||
RUN curl -O https://storage.googleapis.com/golang/go1.6.linux-amd64.tar.gz && \
|
||||
tar -xvf go1.6.linux-amd64.tar.gz && \
|
||||
mv go /usr/local && \
|
||||
rm -rf go1.6.linux-amd64.tar.gz && \
|
||||
/usr/local/go/bin/go get -u github.com/cloudflare/cfssl/cmd/cfssl && \
|
||||
/usr/local/go/bin/go get -u github.com/cloudflare/cfssl/cmd/cfssljson && \
|
||||
cp /gocode/bin/cfssljson /bin/cfssljson && \
|
||||
cp /gocode/bin/cfssl /bin/cfssl && \
|
||||
rm -rf /gocode && rm -rf /usr/local/go
|
||||
|
||||
# Install jwtproxy
|
||||
RUN curl -L -o /usr/local/bin/jwtproxy https://github.com/coreos/jwtproxy/releases/download/v0.0.1/jwtproxy-linux-x64
|
||||
RUN chmod +x /usr/local/bin/jwtproxy
|
||||
|
||||
# Install prometheus-aggregator
|
||||
RUN curl -L -o /usr/local/bin/prometheus-aggregator https://github.com/coreos/prometheus-aggregator/releases/download/v0.0.1-alpha/prometheus-aggregator
|
||||
RUN chmod +x /usr/local/bin/prometheus-aggregator
|
||||
|
||||
# Install front-end dependencies
|
||||
RUN ln -s /usr/bin/nodejs /usr/bin/node
|
||||
ADD package.json package.json
|
||||
ADD tsconfig.json tsconfig.json
|
||||
ADD webpack.config.js webpack.config.js
|
||||
ADD yarn.lock yarn.lock
|
||||
RUN yarn install --ignore-engines
|
||||
|
||||
# Add static files
|
||||
ADD static static
|
||||
|
||||
# Run Webpack
|
||||
RUN yarn build
|
||||
|
||||
# Optimize our images
|
||||
ADD static/img static/img
|
||||
RUN jpegoptim static/img/**/*.jpg
|
||||
RUN optipng -clobber -quiet static/img/**/*.png
|
||||
|
||||
RUN apt-get remove -y --auto-remove python-dev g++ libjpeg62-dev libevent-dev libldap2-dev libsasl2-dev libpq-dev libffi-dev libgpgme11-dev nodejs jpegoptim optipng w3m
|
||||
RUN apt-get autoremove -y
|
||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
# Set up the init system
|
||||
ADD conf/init/copy_config_files.sh /etc/my_init.d/
|
||||
ADD conf/init/doupdatelimits.sh /etc/my_init.d/
|
||||
ADD conf/init/copy_syslog_config.sh /etc/my_init.d/
|
||||
ADD conf/init/certs_create.sh /etc/my_init.d/
|
||||
ADD conf/init/certs_install.sh /etc/my_init.d/
|
||||
ADD conf/init/nginx_conf_create.sh /etc/my_init.d/
|
||||
ADD conf/init/runmigration.sh /etc/my_init.d/
|
||||
ADD conf/init/syslog-ng.conf /etc/syslog-ng/
|
||||
ADD conf/init/zz_boot.sh /etc/my_init.d/
|
||||
ADD conf/init/service/ /etc/service/
|
||||
RUN rm -rf /etc/service/syslog-forwarder
|
||||
|
||||
ADD conf/kill-buildmanager.sh /usr/local/bin/kill-buildmanager.sh
|
||||
ADD conf/monitrc /etc/monit/monitrc
|
||||
RUN chmod 0600 /etc/monit/monitrc
|
||||
|
||||
# remove after phusion/baseimage-docker#338 is fixed
|
||||
ADD conf/init/logrotate.conf /etc/logrotate.conf
|
||||
|
||||
# TODO(ssewell): only works on a detached head, make work with ref
|
||||
ADD .git/HEAD GIT_HEAD
|
||||
|
||||
# Add all of the files!
|
||||
ADD . .
|
||||
RUN mkdir static/fonts static/ldn
|
||||
|
||||
# Download any external libs.
|
||||
RUN venv/bin/python -m external_libraries
|
||||
RUN mkdir -p /usr/local/nginx/logs/
|
||||
|
||||
|
||||
RUN pyclean .
|
||||
|
||||
# Cleanup any NPM-related stuff.
|
||||
RUN rm -rf /root/.npm
|
||||
RUN rm -rf .npm
|
||||
RUN rm -rf /usr/local/lib/node_modules
|
||||
RUN rm -rf /usr/share/yarn/node_modules
|
||||
RUN rm -rf /root/node_modules
|
||||
RUN rm -rf node_modules
|
||||
RUN rm -rf grunt
|
||||
RUN rm package.json yarn.lock
|
||||
|
||||
# Run the tests
|
||||
ARG RUN_TESTS=true
|
||||
ENV RUN_TESTS ${RUN_TESTS}
|
||||
|
||||
ENV RUN_ACI_TESTS False
|
||||
ADD requirements-tests.txt requirements-tests.txt
|
||||
|
||||
RUN if [ "$RUN_TESTS" = true ]; then \
|
||||
venv/bin/pip install -r requirements-tests.txt ;\
|
||||
fi
|
||||
|
||||
RUN if [ "$RUN_TESTS" = true ]; then \
|
||||
TEST=true PYTHONPATH="." venv/bin/py.test --timeout=7200 --verbose \
|
||||
--show-count -x --color=no ./ && rm -rf /var/tmp/; \
|
||||
fi
|
||||
|
||||
RUN if [ "$RUN_TESTS" = true ]; then \
|
||||
TEST=true PYTHONPATH="." venv/bin/py.test --timeout=7200 --verbose \
|
||||
--show-count -x --color=no test/registry_tests.py && rm -rf /var/tmp/;\
|
||||
|
||||
fi
|
||||
|
||||
RUN rm -rf /root/.cache
|
||||
|
||||
RUN PYTHONPATH=. venv/bin/alembic heads | grep -E '^[0-9a-f]+ \(head\)$' > ALEMBIC_HEAD
|
||||
|
||||
VOLUME ["/conf/stack", "/var/log", "/datastorage", "/tmp", "/conf/etcd"]
|
||||
|
||||
EXPOSE 443 8443 80
|
|
@ -0,0 +1,142 @@
|
|||
FROM registry.redhat.io/rhel7:7.7
|
||||
LABEL maintainer "thomasmckay@redhat.com"
|
||||
|
||||
ENV PYTHON_VERSION=2.7 \
|
||||
PATH=$HOME/.local/bin/:$PATH \
|
||||
PYTHONUNBUFFERED=1 \
|
||||
PYTHONIOENCODING=UTF-8 \
|
||||
LC_ALL=en_US.UTF-8 \
|
||||
LANG=en_US.UTF-8 \
|
||||
PIP_NO_CACHE_DIR=off
|
||||
|
||||
ENV QUAYDIR /quay-registry
|
||||
ENV QUAYCONF /quay-registry/conf
|
||||
ENV QUAYPATH "."
|
||||
|
||||
RUN mkdir $QUAYDIR
|
||||
WORKDIR $QUAYDIR
|
||||
|
||||
RUN INSTALL_PKGS="\
|
||||
python27 \
|
||||
python27-python-pip \
|
||||
rh-nginx112 rh-nginx112-nginx \
|
||||
openldap \
|
||||
scl-utils \
|
||||
gcc-c++ git \
|
||||
openldap-devel \
|
||||
gpgme-devel \
|
||||
dnsmasq \
|
||||
memcached \
|
||||
openssl \
|
||||
skopeo \
|
||||
" && \
|
||||
yum install -y yum-utils && \
|
||||
yum-config-manager --quiet --disable "*" >/dev/null && \
|
||||
yum-config-manager --quiet --enable \
|
||||
rhel-7-server-rpms \
|
||||
rhel-server-rhscl-7-rpms \
|
||||
rhel-7-server-optional-rpms \
|
||||
rhel-7-server-extras-rpms \
|
||||
--save >/dev/null && \
|
||||
yum -y --setopt=tsflags=nodocs --setopt=skip_missing_names_on_install=False install $INSTALL_PKGS && \
|
||||
yum -y update && \
|
||||
yum -y clean all
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN scl enable python27 "\
|
||||
pip install --upgrade setuptools pip && \
|
||||
pip install -r requirements.txt --no-cache && \
|
||||
pip freeze && \
|
||||
mkdir -p $QUAYDIR/static/webfonts && \
|
||||
mkdir -p $QUAYDIR/static/fonts && \
|
||||
mkdir -p $QUAYDIR/static/ldn && \
|
||||
PYTHONPATH=$QUAYPATH python -m external_libraries \
|
||||
"
|
||||
|
||||
RUN cp -r $QUAYDIR/static/ldn $QUAYDIR/config_app/static/ldn && \
|
||||
cp -r $QUAYDIR/static/fonts $QUAYDIR/config_app/static/fonts && \
|
||||
cp -r $QUAYDIR/static/webfonts $QUAYDIR/config_app/static/webfonts
|
||||
|
||||
# Check python dependencies for GPL
|
||||
# Due to the following bug, pip results must be piped to a file before grepping:
|
||||
# https://github.com/pypa/pip/pull/3304
|
||||
# 'docutils' is a setup dependency of botocore required by s3transfer. It's under
|
||||
# GPLv3, and so is manually removed.
|
||||
RUN rm -Rf /opt/rh/python27/root/usr/lib/python2.7/site-packages/docutils && \
|
||||
scl enable python27 "pip freeze" | grep -v '^-e' | awk -F == '{print $1}' | grep -v docutils > piplist.txt && \
|
||||
scl enable python27 "xargs -a piplist.txt pip --disable-pip-version-check show" > pipinfo.txt && \
|
||||
test -z "$(cat pipinfo.txt | grep GPL | grep -v LGPL)" && \
|
||||
rm -f piplist.txt pipinfo.txt
|
||||
|
||||
# Front-end
|
||||
RUN curl --silent --location https://rpm.nodesource.com/setup_8.x | bash - && \
|
||||
yum install -y nodejs && \
|
||||
curl --silent --location https://dl.yarnpkg.com/rpm/yarn.repo | tee /etc/yum.repos.d/yarn.repo && \
|
||||
rpm --import https://dl.yarnpkg.com/rpm/pubkey.gpg && \
|
||||
yum install -y yarn && \
|
||||
yarn install --ignore-engines && \
|
||||
yarn build && \
|
||||
yarn build-config-app
|
||||
|
||||
# TODO: Build jwtproxy in dist-git
|
||||
# https://jira.coreos.com/browse/QUAY-1315
|
||||
RUN curl -fsSL -o /usr/local/bin/jwtproxy https://github.com/coreos/jwtproxy/releases/download/v0.0.3/jwtproxy-linux-x64 && \
|
||||
chmod +x /usr/local/bin/jwtproxy
|
||||
|
||||
# TODO: Build prometheus-aggregator in dist-git
|
||||
# https://jira.coreos.com/browse/QUAY-1324
|
||||
RUN curl -fsSL -o /usr/local/bin/prometheus-aggregator https://github.com/coreos/prometheus-aggregator/releases/download/v0.0.1-alpha/prometheus-aggregator &&\
|
||||
chmod +x /usr/local/bin/prometheus-aggregator
|
||||
|
||||
# Update local copy of AWS IP Ranges.
|
||||
RUN curl -fsSL https://ip-ranges.amazonaws.com/ip-ranges.json -o util/ipresolver/aws-ip-ranges.json
|
||||
|
||||
RUN ln -s $QUAYCONF /conf && \
|
||||
mkdir /var/log/nginx && \
|
||||
ln -sf /dev/stdout /var/log/nginx/access.log && \
|
||||
ln -sf /dev/stdout /var/log/nginx/error.log && \
|
||||
chmod -R a+rwx /var/log/nginx
|
||||
|
||||
# Cleanup
|
||||
RUN UNINSTALL_PKGS="\
|
||||
gcc-c++ git \
|
||||
openldap-devel \
|
||||
gpgme-devel \
|
||||
optipng \
|
||||
kernel-headers \
|
||||
" && \
|
||||
yum remove -y $UNINSTALL_PKGS && \
|
||||
yum clean all && \
|
||||
rm -rf /var/cache/yum /tmp/* /var/tmp/* /root/.cache
|
||||
|
||||
EXPOSE 8080 8443 7443
|
||||
|
||||
RUN chgrp -R 0 $QUAYDIR && \
|
||||
chmod -R g=u $QUAYDIR
|
||||
|
||||
RUN mkdir /datastorage && chgrp 0 /datastorage && chmod g=u /datastorage && \
|
||||
mkdir -p /var/log/nginx && chgrp 0 /var/log/nginx && chmod g=u /var/log/nginx && \
|
||||
mkdir -p /conf/stack && chgrp 0 /conf/stack && chmod g=u /conf/stack && \
|
||||
mkdir -p /tmp && chgrp 0 /tmp && chmod g=u /tmp && \
|
||||
chmod g=u /etc/passwd
|
||||
|
||||
RUN chgrp 0 /var/opt/rh/rh-nginx112/log/nginx && chmod g=u /var/opt/rh/rh-nginx112/log/nginx
|
||||
|
||||
# Allow TLS certs to be created and installed as non-root user
|
||||
RUN chgrp -R 0 /etc/pki/ca-trust/extracted && \
|
||||
chmod -R g=u /etc/pki/ca-trust/extracted && \
|
||||
chgrp -R 0 /etc/pki/ca-trust/source/anchors && \
|
||||
chmod -R g=u /etc/pki/ca-trust/source/anchors && \
|
||||
chgrp -R 0 /opt/rh/python27/root/usr/lib/python2.7/site-packages/requests && \
|
||||
chmod -R g=u /opt/rh/python27/root/usr/lib/python2.7/site-packages/requests && \
|
||||
chgrp -R 0 /opt/rh/python27/root/usr/lib/python2.7/site-packages/certifi && \
|
||||
chmod -R g=u /opt/rh/python27/root/usr/lib/python2.7/site-packages/certifi
|
||||
|
||||
VOLUME ["/var/log", "/datastorage", "/tmp", "/conf/stack"]
|
||||
|
||||
USER 1001
|
||||
|
||||
ENTRYPOINT ["/quay-registry/quay-entrypoint.sh"]
|
||||
CMD ["registry"]
|
||||
|
|
@ -0,0 +1,133 @@
|
|||
FROM registry.redhat.io/rhel7:7.7
|
||||
LABEL maintainer "thomasmckay@redhat.com"
|
||||
|
||||
ENV PYTHON_VERSION=2.7 \
|
||||
PATH=$HOME/.local/bin/:$PATH \
|
||||
PYTHONUNBUFFERED=1 \
|
||||
PYTHONIOENCODING=UTF-8 \
|
||||
LC_ALL=en_US.UTF-8 \
|
||||
LANG=en_US.UTF-8 \
|
||||
PIP_NO_CACHE_DIR=off
|
||||
|
||||
ENV QUAYDIR /quay-registry
|
||||
ENV QUAYCONF /quay-registry/conf
|
||||
ENV QUAYPATH "."
|
||||
|
||||
RUN mkdir $QUAYDIR
|
||||
WORKDIR $QUAYDIR
|
||||
|
||||
RUN INSTALL_PKGS="\
|
||||
python27 \
|
||||
python27-python-pip \
|
||||
rh-nginx112 rh-nginx112-nginx \
|
||||
openldap \
|
||||
scl-utils \
|
||||
gcc-c++ git \
|
||||
openldap-devel \
|
||||
gpgme-devel \
|
||||
dnsmasq \
|
||||
memcached \
|
||||
openssl \
|
||||
skopeo \
|
||||
" && \
|
||||
yum install -y yum-utils && \
|
||||
yum-config-manager --quiet --disable "*" >/dev/null && \
|
||||
yum-config-manager --quiet --enable \
|
||||
rhel-7-server-rpms \
|
||||
rhel-server-rhscl-7-rpms \
|
||||
rhel-7-server-optional-rpms \
|
||||
rhel-7-server-extras-rpms \
|
||||
--save >/dev/null && \
|
||||
yum -y --setopt=tsflags=nodocs --setopt=skip_missing_names_on_install=False install $INSTALL_PKGS && \
|
||||
yum -y update && \
|
||||
yum -y clean all
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN scl enable python27 "\
|
||||
pip install --upgrade setuptools pip && \
|
||||
pip install -r requirements.txt --no-cache && \
|
||||
pip freeze && \
|
||||
mkdir -p $QUAYDIR/static/webfonts && \
|
||||
mkdir -p $QUAYDIR/static/fonts && \
|
||||
mkdir -p $QUAYDIR/static/ldn && \
|
||||
PYTHONPATH=$QUAYPATH python -m external_libraries \
|
||||
"
|
||||
|
||||
RUN cp -r $QUAYDIR/static/ldn $QUAYDIR/config_app/static/ldn && \
|
||||
cp -r $QUAYDIR/static/fonts $QUAYDIR/config_app/static/fonts && \
|
||||
cp -r $QUAYDIR/static/webfonts $QUAYDIR/config_app/static/webfonts
|
||||
|
||||
# Check python dependencies for GPL
|
||||
# Due to the following bug, pip results must be piped to a file before grepping:
|
||||
# https://github.com/pypa/pip/pull/3304
|
||||
# 'docutils' is a setup dependency of botocore required by s3transfer. It's under
|
||||
# GPLv3, and so is manually removed.
|
||||
RUN rm -Rf /opt/rh/python27/root/usr/lib/python2.7/site-packages/docutils && \
|
||||
scl enable python27 "pip freeze" | grep -v '^-e' | awk -F == '{print $1}' | grep -v docutils > piplist.txt && \
|
||||
scl enable python27 "xargs -a piplist.txt pip --disable-pip-version-check show" > pipinfo.txt && \
|
||||
test -z "$(cat pipinfo.txt | grep GPL | grep -v LGPL)" && \
|
||||
rm -f piplist.txt pipinfo.txt
|
||||
|
||||
# Front-end
|
||||
RUN curl --silent --location https://rpm.nodesource.com/setup_8.x | bash - && \
|
||||
yum install -y nodejs && \
|
||||
curl --silent --location https://dl.yarnpkg.com/rpm/yarn.repo | tee /etc/yum.repos.d/yarn.repo && \
|
||||
rpm --import https://dl.yarnpkg.com/rpm/pubkey.gpg && \
|
||||
yum install -y yarn && \
|
||||
yarn install --ignore-engines && \
|
||||
yarn build && \
|
||||
yarn build-config-app
|
||||
|
||||
# TODO: Build jwtproxy in dist-git
|
||||
# https://jira.coreos.com/browse/QUAY-1315
|
||||
RUN curl -fsSL -o /usr/local/bin/jwtproxy https://github.com/coreos/jwtproxy/releases/download/v0.0.3/jwtproxy-linux-x64 && \
|
||||
chmod +x /usr/local/bin/jwtproxy
|
||||
|
||||
# TODO: Build prometheus-aggregator in dist-git
|
||||
# https://jira.coreos.com/browse/QUAY-1324
|
||||
RUN curl -fsSL -o /usr/local/bin/prometheus-aggregator https://github.com/coreos/prometheus-aggregator/releases/download/v0.0.1-alpha/prometheus-aggregator &&\
|
||||
chmod +x /usr/local/bin/prometheus-aggregator
|
||||
|
||||
# Update local copy of AWS IP Ranges.
|
||||
RUN curl -fsSL https://ip-ranges.amazonaws.com/ip-ranges.json -o util/ipresolver/aws-ip-ranges.json
|
||||
|
||||
RUN ln -s $QUAYCONF /conf && \
|
||||
mkdir /var/log/nginx && \
|
||||
ln -sf /dev/stdout /var/log/nginx/access.log && \
|
||||
ln -sf /dev/stdout /var/log/nginx/error.log && \
|
||||
chmod -R a+rwx /var/log/nginx
|
||||
|
||||
# Cleanup
|
||||
RUN UNINSTALL_PKGS="\
|
||||
gcc-c++ git \
|
||||
openldap-devel \
|
||||
gpgme-devel \
|
||||
optipng \
|
||||
kernel-headers \
|
||||
" && \
|
||||
yum remove -y $UNINSTALL_PKGS && \
|
||||
yum clean all && \
|
||||
rm -rf /var/cache/yum /tmp/* /var/tmp/* /root/.cache
|
||||
|
||||
EXPOSE 8080 8443 7443
|
||||
|
||||
RUN chgrp -R 0 $QUAYDIR && \
|
||||
chmod -R g=u $QUAYDIR
|
||||
|
||||
RUN mkdir /datastorage && chgrp 0 /datastorage && chmod g=u /datastorage && \
|
||||
mkdir -p /var/log/nginx && chgrp 0 /var/log/nginx && chmod g=u /var/log/nginx && \
|
||||
mkdir -p /conf/stack && chgrp 0 /conf/stack && chmod g=u /conf/stack && \
|
||||
mkdir -p /tmp && chgrp 0 /tmp && chmod g=u /tmp && \
|
||||
chmod g=u /etc/passwd
|
||||
|
||||
RUN chgrp 0 /var/opt/rh/rh-nginx112/log/nginx && chmod g=u /var/opt/rh/rh-nginx112/log/nginx
|
||||
|
||||
VOLUME ["/var/log", "/datastorage", "/tmp", "/conf/stack"]
|
||||
|
||||
ENTRYPOINT ["/quay-registry/quay-entrypoint.sh"]
|
||||
CMD ["registry"]
|
||||
|
||||
# root required to create and install certs
|
||||
# https://jira.coreos.com/browse/QUAY-1468
|
||||
# USER 1001
|
|
@ -0,0 +1,66 @@
|
|||
# Project Quay Governance
|
||||
|
||||
Project Quay is run according to the guidelines specified below. This is a living document and is expected to evolve along with Project Quay itself.
|
||||
|
||||
## Principles
|
||||
|
||||
Project Quay strives to follow these principles at all times:
|
||||
* Openness - Quay evolves and improves out in the open, with transparent work and decision making that is clear and well understood.
|
||||
* Respectfulness - Quay is a project for a diverse community where different points of view are welcomed. Healthy and respectful discussions help us meet our goals and deliver a better end product.
|
||||
* Meritocracy - In the Quay community all ideas are heard but only the best ideas help drive the project forward. As an open, respectful community we will judge all ideas on their technical merit and alignment with Quay's design principles.
|
||||
* Accountability - The Quay community is accountable
|
||||
* to our users to deliver the best software possible
|
||||
* to the project to ensure each Contributor and Maintainer carries out their duties to the best of their abilities
|
||||
* to itself to ensure the Quay remains a project where indviduals can be passionate about contributing their time and energy
|
||||
|
||||
## Maintainers
|
||||
|
||||
Maintainers play a special role to ensure that contributions align with the expected quality, consistency and long term vision for Project Quay. Each Maintainer is vital to the success of Project Quay and has decided to make the commitment to that cause. Being a Maintainer is difficult work and not for everyone. Therefore Project Quay will have a small group of Maintainers- as many as deemed necessary to handle the pipeline of contributions being made to the project.
|
||||
|
||||
### Becoming a Maintainer
|
||||
|
||||
Each Maintainer must also be a Contributor. Candidates for the Maintainer role are individuals who have made recent, substantial and recurring contributions to the project. The existing Maintainers will periodically identify Contributors and make recommendations to the community that those individuals become Maintainers. The Maintainers will then vote on the candidate and if so agreed the candidate will be invited to raise a PR to add their name into the MAINTAINERS.md file. Approval of that PR signals the Contributor is now a Maintainer.
|
||||
|
||||
### Responsibilities of a Maintainer
|
||||
|
||||
Project Quay's success depends on how well Maintainers perform their duties. Maintainers are responsible to monitor Slack and e-mail lists, help triage issues on the Project Quay JIRA board, review PRs and ensure responses are being provided to Contributors, assist with regular Project Quay releases. If Contributors are the lifeblood of an open source community, the Maintainers act as the heart, hands, eyes and ears, helping to keep the project moving and viable.
|
||||
|
||||
### Stepping Down as a Maintainer
|
||||
|
||||
A Maintainer may decide they are no longer interested in or able to carry out the role. In such a situation the Maintainer should notify the other Maintainers of their intentions to step down and help identify a replacement from existing Contributors. Ideally the outgoing Maintainer will ensure that any outstanding work has been transitioned to another Maintainer. To carry out the actual removal the outgoing Maintainer raises a PR against MAINTAINERS.md file to remove their name.
|
||||
|
||||
## Contributors
|
||||
|
||||
Anyone can be a Contributor to Project Quay. No special approval is required- simply go through our Getting Started guide, fork one of our repositories and submit a PR. All types of conributions will be welcome, whether it is through bug reports via JIRA, code, or documentation.
|
||||
|
||||
## Sub-Projects
|
||||
|
||||
Project Quay will be primarily focused on the delivery of Quay itself but also contains various sub-projects such as Clair and Quay-Builders. Each sub-project must have their own dedicated repositories containing a MAINTAINERS.md file. Each sub-project will abide by this Governance model.
|
||||
|
||||
Requests for new sub-projects under Project Quay should be raised to the Maintainers.
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
Project Quay abides by the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
|
||||
|
||||
## How Decisons Are Made
|
||||
|
||||
Most of the decison making for Project Quay will happen through the regular PR approval process. We stand by the notion that what exists in the Project Quay repositories are the end result of countless community-driven decisions.
|
||||
|
||||
When a more complex decision is required, for example a technical issue related to a PR, it is expected that involved parties will resolve the dispute in a respectful and efficent manner. If the dispute cannot be resolved between the involved parties then the Maintainers will review the dispute and come to an agreement via majority vote amongst themselves. All decision making should be tracked via a JIRA issue and performed transparently via the Project Quay communications channels.
|
||||
|
||||
## Project Quay Releases
|
||||
|
||||
On a regular basis, Project Quay will issue a release. The release cadence will not be strictly defined but should happen approximately every 3 months. Maintainers will be part of a rotating "Release Nanny" role whereby each Maintainer shares the responsibility of creating a Quay release.
|
||||
|
||||
Release duties include:
|
||||
* Creating the Release Notes
|
||||
* Verifying the automated tests have passed
|
||||
* Building the necessary Quay, Clair-JWT, and Quay-Builder container images
|
||||
* Publishing the container images to quay.io
|
||||
* Updating the github release pages
|
||||
* Notifying the community of the new release
|
||||
|
||||
## DCO and Licenses
|
||||
|
||||
Project Quay uses the [Apache 2.0](https://opensource.org/licenses/Apache-2.0) license.
|
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -1,3 +0,0 @@
|
|||
Jake Moshenko <jake.moshenko@coreos.com> (@jakedt)
|
||||
Jimmy Zelinskie <jimmy.zelinskie@coreos.com> (@jzelinskie)
|
||||
Joseph Schorr <joseph.schorr@coreos.com> (@josephschorr)
|
59
Makefile
59
Makefile
|
@ -1,5 +1,7 @@
|
|||
SHELL := /bin/bash
|
||||
|
||||
export PATH := ./venv/bin:$(PATH)
|
||||
|
||||
SHA := $(shell git rev-parse --short HEAD )
|
||||
REPO := quay.io/quay/quay
|
||||
TAG := $(REPO):$(SHA)
|
||||
|
@ -11,7 +13,7 @@ MODIFIED_FILES = $(shell git diff --name-only $(GIT_MERGE_BASED) | grep -E .+\.p
|
|||
show-modified:
|
||||
echo $(MODIFIED_FILES)
|
||||
|
||||
.PHONY: all unit test pkgs build run clean
|
||||
.PHONY: all unit-test registry-test registry-test-old buildman-test test pkgs build run clean
|
||||
|
||||
all: clean pkgs test build
|
||||
|
||||
|
@ -41,19 +43,50 @@ conf/stack/license: $(QUAY_CONFIG)/local/license
|
|||
ln -s $(QUAY_CONFIG)/local/license conf/stack/license
|
||||
|
||||
unit-test:
|
||||
TEST=true PYTHONPATH="." py.test \
|
||||
ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields TEST=true PYTHONPATH="." py.test \
|
||||
--cov="." --cov-report=html --cov-report=term-missing \
|
||||
--timeout=3600 --verbose -x \
|
||||
./
|
||||
|
||||
registry-test:
|
||||
TEST=true PYTHONPATH="." py.test \
|
||||
TEST=true ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields PYTHONPATH="." py.test \
|
||||
--cov="." --cov-report=html --cov-report=term-missing \
|
||||
--timeout=3600 --verbose --show-count -x \
|
||||
test/registry_tests.py
|
||||
test/registry/registry_tests.py
|
||||
|
||||
test: unit-test registry-test
|
||||
registry-test-old:
|
||||
TEST=true PYTHONPATH="." ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields py.test \
|
||||
--cov="." --cov-report=html --cov-report=term-missing \
|
||||
--timeout=3600 --verbose --show-count -x \
|
||||
./test/registry_tests.py
|
||||
|
||||
buildman-test:
|
||||
TEST=true PYTHONPATH="." ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields py.test \
|
||||
--cov="." --cov-report=html --cov-report=term-missing \
|
||||
--timeout=3600 --verbose --show-count -x \
|
||||
./buildman/
|
||||
|
||||
certs-test:
|
||||
./test/test_certs_install.sh
|
||||
|
||||
full-db-test: ensure-test-db
|
||||
TEST=true PYTHONPATH=. QUAY_OVERRIDE_CONFIG='{"DATABASE_SECRET_KEY": "anothercrazykey!"}' \
|
||||
ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields alembic upgrade head
|
||||
TEST=true PYTHONPATH=. ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields \
|
||||
SKIP_DB_SCHEMA=true py.test --timeout=7200 \
|
||||
--verbose --show-count -x --ignore=endpoints/appr/test/ \
|
||||
./
|
||||
|
||||
clients-test:
|
||||
cd test/clients; python clients_test.py
|
||||
|
||||
test: unit-test registry-test registry-test-old certs-test
|
||||
|
||||
ensure-test-db:
|
||||
@if [ -z $(TEST_DATABASE_URI) ]; then \
|
||||
echo "TEST_DATABASE_URI is undefined"; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
PG_PASSWORD := quay
|
||||
PG_USER := quay
|
||||
|
@ -107,6 +140,15 @@ docker-build: pkgs build
|
|||
git checkout $(NAME)
|
||||
echo $(TAG)
|
||||
|
||||
app-sre-docker-build:
|
||||
# get named head (ex: branch, tag, etc..)
|
||||
export NAME=$(shell git rev-parse --abbrev-ref HEAD)
|
||||
# checkout commit so .git/HEAD points to full sha (used in Dockerfile)
|
||||
echo "$(SHA)"
|
||||
git checkout $(SHA)
|
||||
$(BUILD_CMD) -t ${IMG} .
|
||||
git checkout $(NAME)
|
||||
|
||||
run: license
|
||||
goreman start
|
||||
|
||||
|
@ -133,13 +175,6 @@ yapf-all:
|
|||
yapf-diff:
|
||||
if [ $(MODIFIED_FILES_COUNT) -ne 0 ]; then yapf -d -p $(MODIFIED_FILES) ; fi
|
||||
|
||||
yapf:
|
||||
ifneq (0,$(shell git diff-index HEAD | wc -l))
|
||||
echo "Failed, git dirty" && false
|
||||
else ifneq (0,$(shell yapf -d -p $(MODIFIED_FILES) | wc -l))
|
||||
yapf -i -p $(MODIFIED_FILES)
|
||||
git commit -a -m "code-stye Yapf: $(MODIFIED_FILES_COUNT) files updated" -m "$(MODIFIED_FILES)"
|
||||
endif
|
||||
|
||||
yapf-test:
|
||||
if [ `yapf -d -p $(MODIFIED_FILES) | wc -l` -gt 0 ] ; then false ; else true ;fi
|
||||
|
|
|
@ -0,0 +1,69 @@
|
|||
SHELL := /bin/bash
|
||||
PYTEST_MARK ?= shard_1_of_1
|
||||
|
||||
export PATH := ./venv/bin:$(PATH)
|
||||
|
||||
.PHONY: all unit-test registry-test registry-test-old test
|
||||
|
||||
all: test
|
||||
|
||||
unit-test:
|
||||
TEST=true PYTHONPATH="." ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields py.test \
|
||||
-m $(PYTEST_MARK) \
|
||||
--cov="." --cov-report=html --cov-report=term-missing \
|
||||
--timeout=3600 --verbose -x \
|
||||
./
|
||||
|
||||
registry-test:
|
||||
TEST=true PYTHONPATH="." ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields py.test \
|
||||
-m $(PYTEST_MARK) \
|
||||
--cov="." --cov-report=html --cov-report=term-missing \
|
||||
--timeout=3600 --verbose --show-count -x \
|
||||
test/registry/registry_tests.py
|
||||
|
||||
registry-test-old:
|
||||
TEST=true PYTHONPATH="." ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields py.test \
|
||||
--cov="." --cov-report=html --cov-report=term-missing \
|
||||
--timeout=3600 --verbose --show-count -x \
|
||||
./test/registry_tests.py
|
||||
|
||||
certs-test:
|
||||
./test/test_certs_install.sh
|
||||
|
||||
gunicorn-tests:
|
||||
./test/test_gunicorn_running.sh
|
||||
|
||||
full-db-test: ensure-test-db
|
||||
TEST=true PYTHONPATH=. QUAY_OVERRIDE_CONFIG='{"DATABASE_SECRET_KEY": "anothercrazykey!"}' \
|
||||
ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields alembic upgrade head
|
||||
TEST=true PYTHONPATH=. ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields \
|
||||
SKIP_DB_SCHEMA=true py.test --timeout=7200 \
|
||||
-m $(PYTEST_MARK) \
|
||||
--verbose --show-count -x --ignore=endpoints/appr/test/ \
|
||||
./
|
||||
|
||||
test: unit-test registry-test
|
||||
|
||||
ensure-test-db:
|
||||
@if [ -z $(TEST_DATABASE_URI) ]; then \
|
||||
echo "TEST_DATABASE_URI is undefined"; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
PG_PASSWORD := quay
|
||||
PG_USER := quay
|
||||
PG_HOST := postgresql://$(PG_USER):$(PG_PASSWORD)@localhost/quay
|
||||
|
||||
test_postgres : TEST_ENV := SKIP_DB_SCHEMA=true TEST=true \
|
||||
TEST_DATABASE_URI=$(PG_HOST) PYTHONPATH=.
|
||||
|
||||
test_postgres:
|
||||
docker rm -f postgres-testrunner-postgres || true
|
||||
docker run --name postgres-testrunner-postgres \
|
||||
-e POSTGRES_PASSWORD=$(PG_PASSWORD) -e POSTGRES_USER=${PG_USER} \
|
||||
-p 5432:5432 -d postgres:9.2
|
||||
until pg_isready -d $(PG_HOST); do sleep 1; echo "Waiting for postgres"; done
|
||||
$(TEST_ENV) alembic upgrade head
|
||||
$(TEST_ENV) py.test --timeout=7200 --verbose --show-count ./ --color=no \
|
||||
--ignore=endpoints/appr/test/ -x
|
||||
docker rm -f postgres-testrunner-postgres || true
|
1
Procfile
1
Procfile
|
@ -1,3 +1,4 @@
|
|||
app: gunicorn -c conf/gunicorn_local.py application:application
|
||||
webpack: npm run watch
|
||||
builder: python -m buildman.builder
|
||||
|
||||
|
|
404
README.md
404
README.md
|
@ -1,35 +1,36 @@
|
|||
# quay
|
||||
# Project Quay
|
||||
|
||||
![Docker Repository on Quay](https://quay.io/repository/quay/quay/status?token=7bffbc13-8bb0-4fb4-8a70-684a0cf485d3 "Docker Repository on Quay")
|
||||
[![Build Status](https://travis-ci.com/quay/quay.svg?token=pWvEz2TeyDsVn69Hkiwq&branch=master)](https://travis-ci.com/quay/quay)
|
||||
|
||||
:warning: The `master` branch may be in an *unstable or even broken state* during development.
|
||||
Please use [releases] instead of the `master` branch in order to get stable binaries.
|
||||
Please use [releases] instead of the `master` branch in order to get stable software.
|
||||
|
||||
![Quay Logo](static/img/quay_preview.png)
|
||||
[releases]: https://github.com/quay/quay/releases
|
||||
|
||||
Quay is project to build, store, and distribute container images.
|
||||
![Project Quay Logo](project_quay_logo.png)
|
||||
|
||||
Project Quay builds, stores, and distributes your container images.
|
||||
|
||||
High-level features include:
|
||||
|
||||
- Docker Registry Protocol [v1], [v2]
|
||||
- Docker Manifest Schema [v2.1]
|
||||
- Docker Registry Protocol [v2]
|
||||
- Docker Manifest Schema [v2.1], [v2.2]
|
||||
- [AppC Image Discovery] via on-demand transcoding
|
||||
- Image Squashing via on-demand transcoding
|
||||
- Authentication provided by [LDAP], [Keystone], [Dex], [Google], [GitHub]
|
||||
- Authentication provided by [LDAP], [Keystone], [OIDC], [Google], and [GitHub]
|
||||
- ACLs, team management, and auditability logs
|
||||
- Geo-replicated storage provided by local filesystems, [S3], [GCS], [Swift], [Ceph]
|
||||
- Geo-replicated storage provided by local filesystems, [S3], [GCS], [Swift], and [Ceph]
|
||||
- Continuous Integration integrated with [GitHub], [Bitbucket], [GitLab], and [git]
|
||||
- Security Vulnerability Analysis via [Clair]
|
||||
- [Swagger]-compliant HTTP API
|
||||
|
||||
[releases]: https://github.com/coreos-inc/quay/releases
|
||||
[v1]: https://docs.docker.com/v1.6/reference/api/registry_api/
|
||||
[v2]: https://docs.docker.com/v1.6/registry/
|
||||
[v2]: https://docs.docker.com/registry/spec/api/
|
||||
[v2.1]: https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md
|
||||
[v2.2]: https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-2.md
|
||||
[AppC Image Discovery]: https://github.com/appc/spec/blob/master/spec/discovery.md
|
||||
[LDAP]: https://en.wikipedia.org/wiki/Lightweight_Directory_Access_Protocol
|
||||
[Keystone]: http://docs.openstack.org/developer/keystone
|
||||
[Dex]: https://github.com/coreos/dex
|
||||
[OIDC]: https://en.wikipedia.org/wiki/OpenID_Connect
|
||||
[Google]: https://developers.google.com/identity/sign-in/web/sign-in
|
||||
[GitHub]: https://developer.github.com/v3/oauth
|
||||
[S3]: https://aws.amazon.com/s3
|
||||
|
@ -40,369 +41,34 @@ High-level features include:
|
|||
[Bitbucket]: https://bitbucket.com
|
||||
[GitLab]: https://gitlab.com
|
||||
[git]: https://git-scm.com
|
||||
[Clair]: https://github.com/coreos/clair
|
||||
[Clair]: https://github.com/quay/clair
|
||||
[Swagger]: http://swagger.io
|
||||
|
||||
## Table of Contents
|
||||
|
||||
1. **[Getting Started](#getting-started)**
|
||||
1. [macOS](#macos)
|
||||
3. [Linux](#linux)
|
||||
2. **[Development](#development)**
|
||||
1. [PivotalTracker Integration](#pivotaltracker-integration)
|
||||
3. **[Running and Testing](#running-and-testing)**
|
||||
1. [Test Data](#test-data)
|
||||
2. [Local Scripts](#local-scripts)
|
||||
3. [Development inside Docker](#development-inside-docker)
|
||||
4. [Adding a Python Dependency](#adding-a-python-dependency)
|
||||
5. [Adding a Yarn Dependency](#adding-a-yarn-dependency)
|
||||
6. [Running the Build System](#running-the-build-system)
|
||||
7. [To run individual tests](#to-run-individual-tests)
|
||||
1. [Pytest](#pytest)
|
||||
2. [Tox](#tox)
|
||||
8. [Running Migrations](#running-migrations)
|
||||
9. [How to run a build with tests for a push or merge](#how-to-run-a-build-with-tests-for-a-push-or-merge)
|
||||
4. **[Documentation](#documentation)**
|
||||
1. [Architecture at a Glance](#architecture-at-a-glance)
|
||||
2. [Terminology](#terminology)
|
||||
1. [Organizations](#organizations)
|
||||
2. [Concepts](#concepts)
|
||||
3. [Software](#software)
|
||||
|
||||
## Getting Started
|
||||
|
||||
### macOS
|
||||
* Explore a live instance of Project Quay hosted at [Quay.io]
|
||||
* Watch [talks] given about Project Quay
|
||||
* Review the [documentation] for Red Hat Quay
|
||||
* Get up and running with a containerized [development environment]
|
||||
|
||||
macOS developers will need:
|
||||
[Quay.io]: https://quay.io
|
||||
[talks]: /docs/talks.md
|
||||
[documentation]: https://access.redhat.com/documentation/en-us/red_hat_quay
|
||||
[development environment]: /docs/development-container.md
|
||||
|
||||
* [command line tools] or [xcode]
|
||||
* [brew]
|
||||
## Community
|
||||
|
||||
[command line tools]: https://developer.apple.com/downloads
|
||||
[xcode]: https://developer.apple.com/downloads
|
||||
[brew]: https://github.com/Homebrew/brew
|
||||
[docker]: https://www.docker.com/docker-mac
|
||||
* Mailing List: [quay-dev@googlegroups.com]
|
||||
* IRC: #quay on [freenode.net]
|
||||
* Bug tracking: [JBoss JIRA]
|
||||
* Security Issues: [security@redhat.com]
|
||||
|
||||
```sh
|
||||
# Download the code
|
||||
git clone git@github.com:coreos-inc/quay.git && cd quay
|
||||
[quay-dev@googlegroups.com]: https://groups.google.com/forum/#!forum/quay-dev
|
||||
[freenode.net]: https://webchat.freenode.net
|
||||
[JBoss JIRA]: https://issues.jboss.org/projects/PROJQUAY
|
||||
[security@redhat.com]: mailto:security@redhat.com
|
||||
|
||||
# Install the system dependencies
|
||||
brew install libevent libmagic postgresql gpgme pyenv pyenv-virtualenv node yarn
|
||||
## License
|
||||
|
||||
# Create a default virtualmachine for docker
|
||||
docker-machine create -d virtualbox default
|
||||
|
||||
# Add these to ~/.bashrc or ~/.zshrc
|
||||
eval "$(pyenv virtualenv-init -)"
|
||||
eval "$(pyenv init -)"
|
||||
eval $(/usr/local/bin/docker-machine env default)
|
||||
export PYTHONPATH="."
|
||||
|
||||
# Some installs don't have /usr/include, required for finding SASL header files
|
||||
# This command might fail because of the rootfs is read-only. Refer to the following:
|
||||
# http://apple.stackexchange.com/questions/196224/unix-ln-s-command-not-permitted-in-osx-el-capitan-beta3
|
||||
if [ ! -e /usr/include ]; then sudo ln -s `xcrun --show-sdk-path`/usr/include /usr/include; fi
|
||||
|
||||
# Install the Python dependencies
|
||||
pyenv install 2.7.12
|
||||
pyenv virtualenv 2.7.12 quay
|
||||
pyenv activate quay
|
||||
pyenv local quay
|
||||
|
||||
# Some packages may fail to build with clang (which now defaults to C11).
|
||||
# If you're getting errors trying running again with CFLAGS='std=c99'.
|
||||
pip install -r requirements.txt
|
||||
pip install -r requirements-dev.txt
|
||||
pip install -r requirements-test.txt
|
||||
|
||||
# Setup a local config
|
||||
git clone git@github.com:coreos-inc/quay-config.git ../quay-config
|
||||
ln -s ../../quay-config/local conf/stack
|
||||
|
||||
# Install Node Dependencies
|
||||
yarn install
|
||||
|
||||
# Link Typescript
|
||||
yarn link typescript
|
||||
```
|
||||
|
||||
#### Third Party Docs
|
||||
|
||||
* [docker](https://beta.docker.com/docs/mac/getting-started)
|
||||
* [docker-machine](https://docs.docker.com/machine/install-machine)
|
||||
* [pyenv](https://github.com/yyuu/pyenv)
|
||||
* [pyenv-virtualenv](https://github.com/yyuu/pyenv-virtualenv)
|
||||
|
||||
### Docker Compose
|
||||
|
||||
You'll need Docker and [Docker Compose](https://docs.docker.com/compose) installed.
|
||||
If you're on macOS, [Docker for Mac](https://www.docker.com/docker-mac) should include
|
||||
both tools. Otherwise, follow the docs for your platform.
|
||||
|
||||
You'll also need Node.js and NPM if you want to interact with the
|
||||
frontend code outside a container.
|
||||
|
||||
Finally, you'll need a recent [Go](https://golang.org) version for the
|
||||
builder.
|
||||
|
||||
To start Quay locally:
|
||||
```sh
|
||||
# Clone the various repos you'll need:
|
||||
git clone https://github.com/coreos-inc/quay.git
|
||||
git clone https://github.com/coreos-inc/quay-config-local.git
|
||||
git clone https://github.com/coreos-inc/quay-builder.git
|
||||
|
||||
# Build the builder:
|
||||
cd quay-builder
|
||||
make build GOOS=linux
|
||||
|
||||
# Install NPM modules:
|
||||
cd ../quay
|
||||
npm install
|
||||
|
||||
# Build or pull images and start all Quay components:
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
#### Third Party Docs
|
||||
|
||||
* [Docker Compose](https://docs.docker.com/compose)
|
||||
* [Docker for Mac](https://www.docker.com/docker-mac)
|
||||
|
||||
### Linux
|
||||
|
||||
Do you use Linux? Send us a PR! Or use docker-compose!
|
||||
|
||||
## Development
|
||||
|
||||
### JIRA
|
||||
|
||||
The Quay backlog can be found in JIRA: https://jira.coreos.com/projects/QUAY
|
||||
|
||||
## Running and Testing
|
||||
|
||||
### Test Data
|
||||
|
||||
A SQLite database full of test data is committed to this git repository at [test/data/test.db](quay/test/data/test.db).
|
||||
This database is generated by executing `python initdb.py`.
|
||||
The username and password of the admin test account is `devtable` and `password`, respectively.
|
||||
|
||||
### Local Scripts
|
||||
|
||||
Running the web server locally requires [goreman](https://github.com/mattn/goreman):
|
||||
|
||||
```sh
|
||||
go get github.com/mattn/goreman
|
||||
```
|
||||
|
||||
* `local-run` runs the web server for testing
|
||||
* `local-test` runs the unit test suite
|
||||
* `yarn run build` builds front-end dependencies
|
||||
* `yarn run watch` builds and watches front-end dependencies
|
||||
* `yarn test` runs front-end test suite
|
||||
|
||||
### Development inside Docker
|
||||
|
||||
To build and run a development container, pass one argument to [local-docker.sh](quay/local-docker.sh):
|
||||
|
||||
- `buildman`: run the buildmanager
|
||||
- `dev`: run web server on port 5000
|
||||
- `initdb`: clear and initialize the test database
|
||||
- `notifications`: run the notification worker
|
||||
- `test`: run the unit test suite
|
||||
|
||||
### Adding a Python Dependency
|
||||
|
||||
```sh
|
||||
# Create a new virtualenv and activate it
|
||||
pyenv virtualenv 2.7.12 quay-deps
|
||||
pyenv activate quay-deps
|
||||
|
||||
# Install unversioned dependencies with your changes
|
||||
pip install -r requirements-nover.txt
|
||||
|
||||
# Run the unit test suite
|
||||
./local-test.sh
|
||||
|
||||
# Freeze the versions of all of the dependencies
|
||||
pip freeze > requirements.txt
|
||||
|
||||
# Delete the virtualenv
|
||||
pyenv uninstall quay-deps
|
||||
```
|
||||
|
||||
### Adding a Yarn Dependency
|
||||
|
||||
We use [Yarn](https://yarnpkg.com/) for frontend dependency management. The `yarn.lock` file ensures
|
||||
that we get consistant version installs using the `yarn install` command. However, new dependencies
|
||||
should be added using `yarn add <npm package>`. This will add an entry to `package.json` and `yarn.lock`.
|
||||
|
||||
Occassionally there will be merge conflicts with `yarn.lock`. To resolve them, use the following (taken
|
||||
from [here](https://github.com/yarnpkg/yarn/issues/1776#issuecomment-269539948)).
|
||||
|
||||
```sh
|
||||
git rebase origin/master
|
||||
git checkout origin/master -- yarn.lock
|
||||
yarn install
|
||||
git add yarn.lock
|
||||
git rebase --continue
|
||||
```
|
||||
|
||||
### Running the Build System
|
||||
|
||||
TODO
|
||||
|
||||
```sh
|
||||
# Run an instance of redis
|
||||
docker run -d -p 6379:6379 quay.io/quay/redis
|
||||
```
|
||||
|
||||
### To run individual tests
|
||||
|
||||
```sh
|
||||
# To run a specific suite
|
||||
TEST=true python -m test.test_api_usage -f
|
||||
|
||||
# To run a specific test in a suite
|
||||
TEST=true python -m test.test_api_usage -f SuiteName
|
||||
```
|
||||
#### Pytest
|
||||
|
||||
```sh
|
||||
# To run all tests
|
||||
TEST=true PYTHONPATH="." py.test --verbose
|
||||
|
||||
# To run a specific test module
|
||||
TEST=true PYTHONPATH="." py.test --verbose test/registry_tests.py
|
||||
|
||||
# To run a specific test unique test
|
||||
TEST=true PYTHONPATH="." py.test --verbose test/test_api_usage.py::TestDeleteNamespace
|
||||
|
||||
# To retry only last failed (--lf):
|
||||
TEST=true PYTHONPATH="." py.test --verbose --lf
|
||||
|
||||
# To start pdb on failure:
|
||||
TEST=true PYTHONPATH="." py.test --verbose --pdb
|
||||
|
||||
# To run a coverage report (html pages in ./htmlcov):
|
||||
TEST=true PYTHONPATH="." py.test --cov="." --cov-report=html --cov-report=term-missing --cov-config=.coverage.ini --verbose
|
||||
|
||||
# Don't capture stdout (-s)
|
||||
TEST=true PYTHONPATH="." py.test --verbose -s
|
||||
```
|
||||
|
||||
#### Tox
|
||||
|
||||
To create a virtualenv to run the tests.
|
||||
It allows to test the code on multiple env like python2.x and python3.x or different library versions
|
||||
|
||||
```sh
|
||||
# Test all tox env:
|
||||
tox
|
||||
|
||||
# Add extra parameters to the pytest command:
|
||||
# tox -- [pytest ARGS]
|
||||
tox -- -x
|
||||
|
||||
# build a single env with -e:
|
||||
tox -e py27-api
|
||||
```
|
||||
|
||||
### Running migrations
|
||||
|
||||
```sh
|
||||
# To create a new migration with this description.
|
||||
# Note there might be some errors about unique id being to long
|
||||
# That's okay as long as the migration file is created
|
||||
./data/migrations/migration.sh "Description goes here"
|
||||
|
||||
# To test the up and down of the migration
|
||||
./data/migrations/migration.sh # without params
|
||||
|
||||
# Migrations get run when you create a docker image or you can run them
|
||||
# manually with the following command.
|
||||
PYTHONPATH=. alembic upgrade head
|
||||
|
||||
# You can also rebuild your local sqlite db image from initdb.py using
|
||||
# And once you have a migration you should do this and check in the
|
||||
# changes to share your migration with others.
|
||||
rm test/data/test.db
|
||||
python initdb.py
|
||||
```
|
||||
|
||||
|
||||
### Running tests for migrations
|
||||
|
||||
Use AWS/RDS to create a test image.
|
||||
To create a new database from a snapshot to test against see
|
||||
[this](http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_RestoreFromSnapshot.html).
|
||||
|
||||
Then point the migrations to the new instance using
|
||||
quay-config/local/config.yaml
|
||||
|
||||
Remember to run this from the root of the quay directory and to set your
|
||||
python environment first.
|
||||
|
||||
```sh
|
||||
PYTHONPATH=. alembic upgrade head
|
||||
```
|
||||
|
||||
### How to run a build with tests for a push or merge
|
||||
|
||||
```sh
|
||||
# Inside the quay directory.
|
||||
export QUAY_TAG=quay.io/quay/quay:localtest
|
||||
docker build -t $QUAY_TAG --build-arg RUN_TESTS=true .
|
||||
```
|
||||
|
||||
## Documentation
|
||||
|
||||
* [Quay Enterprise Documentation](https://tectonic.com/quay-enterprise/docs/latest)
|
||||
* [Quay.io Documentation](https://docs.quay.io)
|
||||
|
||||
### Architecture at a Glance
|
||||
|
||||
<img src="https://docs.google.com/a/coreos.com/drawings/d/1J-YZs7aun1lLy-1wFwIZcBma5IJmZQ8WfgtEftHCKJ0/pub?w=640&h=480">
|
||||
|
||||
Edit chart on Google Docs at [Architecture at a Glance](https://docs.google.com/a/coreos.com/drawings/d/1J-YZs7aun1lLy-1wFwIZcBma5IJmZQ8WfgtEftHCKJ0/edit?usp=sharing).
|
||||
|
||||
### Terminology
|
||||
|
||||
#### Organizations
|
||||
|
||||
- **AppC**: a standards body responsible for a _Runtime_ and _Image Format_ superseded by the _Open Container Initiative_
|
||||
- **Open Container Initiative**: a standards body responsible for a _Runtime_ specification and an _Image Format_
|
||||
- **Docker**: a company that builds a platform that has its own _Image Formats_, _Build System_, _Container Runtime_, and _Container Orchestration_
|
||||
|
||||
#### Concepts
|
||||
|
||||
- **Image**: an archive containing all of the contents necessary to execute a container
|
||||
- **Image Format**: a specification for the structure of an _Image_
|
||||
- **Image Layer**: an _Image_ that may depend on being applied to other _Images_ to generate a final _Image_
|
||||
- **Image Squashing**: the process of compressing an _Image_ into a single _Layer_
|
||||
- **Manifest**: a text file containing metadata for a particular _Image_
|
||||
- **Tag**: a human-friendly named, mutable pointer to a particular set of _Images_
|
||||
- **Build System**: a program used to generate _Images_
|
||||
- **Registry**: a program that speaks one or more standard protocols to store and receive _Images_
|
||||
- **Repository**: a collection of related _Tags_ organized by a _Registry_
|
||||
- **Push**: the act of uploading an _Image_ to a _Registry_
|
||||
- **Pull**: the act of downloading an _Image_ from a _Registry_
|
||||
- **Container**: an _Image_ and its execution environment
|
||||
- **Container Runtime**: a program that can transform an _Image_ into a _Container_ by executing it
|
||||
- **Container Orchestration**: a program or set of programs that provides a framework for deploying _Containers_
|
||||
|
||||
#### Software
|
||||
|
||||
- **Quay.io**: CoreOS's hosted _Registry_
|
||||
- **Quay**: CoreOS's enterprise-grade _Registry_ product
|
||||
- **quayctl**: an open source program that implements alternative methods for _pulling_ _Images_ from _Quay_
|
||||
- **Clair**: an open source static analysis tool used to detect vulnerability in _Images_
|
||||
- **Quay Security Scanning**: the integration between _Clair_ and _Quay_
|
||||
- **Kubernetes**: an open source program implementing _Container Orchestration_
|
||||
- **Docker Hub**: Docker's hosted _Registry_
|
||||
- **Docker Trusted Registry**: Docker's enterprise-grade _Registry_ product
|
||||
- **Notary**: an open source implementation of the TUF protocol used in _Docker Content Trust_
|
||||
- **Docker Content Trust**: the integration between _Notary_ and _Docker Trusted Registry_
|
||||
- **Docker Engine**: a program used to interact with all aspects of the Docker platform
|
||||
- **Swarm**: a program implementing _Container Orchestration_ for the Docker platform
|
||||
Project Quay is under the Apache 2.0 license.
|
||||
See the LICENSE file for details.
|
||||
|
|
101
ROADMAP.md
101
ROADMAP.md
|
@ -1,101 +0,0 @@
|
|||
# Quay Roadmap
|
||||
|
||||
|
||||
| Abbrebiation | Feature |
|
||||
|---|---|
|
||||
| **(H)** | Hosted Quay.io |
|
||||
| **(ER)**| Enterprise Registry Only |
|
||||
| **(B)** | Builders |
|
||||
|
||||
### Sprint 3/2 - 3/16
|
||||
- **(H)** Launch Clair 1.0
|
||||
- Tasks
|
||||
- Backfill DB
|
||||
- Provide timely logo feedback
|
||||
- Quay blog post
|
||||
- Clair blog post
|
||||
- Screencast
|
||||
- **(H)** Test and launch torrent GA
|
||||
- Have a use case which shows improved performance
|
||||
- Tasks
|
||||
- Docs detailing reference use cases
|
||||
- Publish quayctl
|
||||
- Quayctl man page README
|
||||
- Notify marketing when the above is done
|
||||
- **(ER)** Figure out how to handle client cert generation
|
||||
- Auto approval rules
|
||||
- Auto generation
|
||||
- UI for approving
|
||||
- Tasks
|
||||
- See if there is anything from Ed's tool that we can re-use
|
||||
- Test assumptions around nginx client cert auth
|
||||
- Figure out if we can verify certs in python if nginx approves
|
||||
- Have a hangout with gtank w.r.t. client certs vs hmac vs jwt
|
||||
|
||||
- **(ER)** Clair in ER
|
||||
- Tasks
|
||||
- Integrate Clair with cert generation tool
|
||||
- Blog post for Clair in ER
|
||||
- Add Clair config to the setup tool
|
||||
- Bugs
|
||||
- Fix Quay permission loading performance for Clair
|
||||
- OR: Make the Clair API on Quay batch
|
||||
- Fix Clair readme
|
||||
- Address Huawei PR for new Clair endpoint
|
||||
|
||||
### Unallocated
|
||||
- **(ER)** Torrent support in ER
|
||||
- Setup tool support
|
||||
- Docs on how to get Chihaya running
|
||||
- **(ER)** Online upgrade tool
|
||||
- Migrations while site is live
|
||||
- Nag people to upgrade
|
||||
- **(B)** Dockerfile flag support
|
||||
- Requires quay.yaml
|
||||
- **(B)** Move build traffic to Packet
|
||||
- Preliminary tests reduce build start latency from 2 minutes to 20 seconds
|
||||
- **(B)** Multi-step builds
|
||||
- build artifact
|
||||
- bundle artifact
|
||||
- test bundle
|
||||
- **(H)** Docker Notary
|
||||
- Support signed images with a known key
|
||||
- **(H/ER)** Labels
|
||||
- Support for Midas Package Manager-like distribution
|
||||
- Integrated with Docker labels
|
||||
- Mutable and immutable
|
||||
- Searchable and fleshed out API
|
||||
- **(H)** Integrate with tectonic.com sales pipeline
|
||||
- Mirror Quay customers in tectonic (SVOC)?
|
||||
- Callbacks to inform tectonic about quay events
|
||||
- Accept and apply QE licenses to the stack
|
||||
- **(ER)** Tectonic care and feeding
|
||||
- Build tools to give us a concrete/declarative cluster deploy story
|
||||
- Build a tool to migrate an app between tectonic clusters
|
||||
- Assess the feasibility of upgrading a running cluster
|
||||
- **(H)** Geo distribution through tectonic
|
||||
- Spin up a tectonic cluster in another region
|
||||
- Modify registry to run standalone on a tectonic cluster
|
||||
- **(H)** Read available Quay.io
|
||||
- Ability to choose uptime of data-plane auditability
|
||||
- **(H)** Launch our API GA
|
||||
- Versioned and backward compatible
|
||||
- Adequate documentation
|
||||
- **(B)** Builds as top level concept
|
||||
- Multiple Quay.io repos from a single git push
|
||||
- **(H)** Become the Tectonic app store
|
||||
- Pods/apps as top level concept
|
||||
- **(H)** Distribution tool
|
||||
- Help people to get their apps from quay to Tectonic
|
||||
- Requires App manifest or adequate flexibility
|
||||
- **(H)** AppC support
|
||||
- rkt push
|
||||
- discovery
|
||||
- **(H/ER)** Mirroring from another registry (pull)
|
||||
|
||||
### Speculative
|
||||
- **(H)** Immediately consistent multi-region data availability
|
||||
- Cockroach?
|
||||
- **(H)** 2 factor auth
|
||||
- How to integrate with Docker CLI?
|
||||
- **(H)** Mirroring to a dependent registry (push)
|
|
@ -0,0 +1,49 @@
|
|||
# Testing quay
|
||||
|
||||
## Unit tests (run in CI automatically)
|
||||
|
||||
Basic unit tests for testing all the functionality of Quay:
|
||||
|
||||
```sh
|
||||
make unit-test
|
||||
```
|
||||
|
||||
## Registry tests (run in CI automatically)
|
||||
|
||||
Quay has two sets of registry tests (current and legacy), which simulate Docker clients by executing
|
||||
REST operations against a spanwed Quay.
|
||||
|
||||
```sh
|
||||
make registry-test
|
||||
make registry-test-old
|
||||
```
|
||||
|
||||
## Certs tests (run in CI automatically)
|
||||
|
||||
Ensures that custom TLS certificates are correctly loaded into the Quay container on startup.
|
||||
|
||||
```sh
|
||||
make certs-test
|
||||
```
|
||||
|
||||
## Full database tests (run in CI automatically)
|
||||
|
||||
The full database tests runs the entire suite of Quay unit tests against a real running database
|
||||
instance.
|
||||
|
||||
NOTE: The database *must be running* on the local machine before this test can be run.
|
||||
|
||||
```sh
|
||||
TEST_DATABASE_URI=database-connection-string make full-db-test
|
||||
```
|
||||
|
||||
## Clients tests (must be manually run)
|
||||
|
||||
The clients test spawns CoreOS virtual machines via Vagrant and VirtualBox and runs real Docker/podman
|
||||
commands against a *running Quay*.
|
||||
|
||||
NOTE: A Quay *must be running* on the local machine before this test can be run.
|
||||
|
||||
```sh
|
||||
make clients-test 10.0.2.2:5000 # IP+Port of the Quay on the host machine.
|
||||
```
|
14
_init.py
14
_init.py
|
@ -2,14 +2,26 @@ import os
|
|||
import re
|
||||
import subprocess
|
||||
|
||||
from util.config.provider import get_config_provider
|
||||
|
||||
|
||||
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
CONF_DIR = os.getenv("QUAYCONF", os.path.join(ROOT_DIR, "conf/"))
|
||||
STATIC_DIR = os.path.join(ROOT_DIR, 'static/')
|
||||
STATIC_LDN_DIR = os.path.join(STATIC_DIR, 'ldn/')
|
||||
STATIC_FONTS_DIR = os.path.join(STATIC_DIR, 'fonts/')
|
||||
STATIC_WEBFONTS_DIR = os.path.join(STATIC_DIR, 'webfonts/')
|
||||
TEMPLATE_DIR = os.path.join(ROOT_DIR, 'templates/')
|
||||
|
||||
IS_TESTING = 'TEST' in os.environ
|
||||
IS_BUILDING = 'BUILDING' in os.environ
|
||||
IS_KUBERNETES = 'KUBERNETES_SERVICE_HOST' in os.environ
|
||||
OVERRIDE_CONFIG_DIRECTORY = os.path.join(CONF_DIR, 'stack/')
|
||||
|
||||
|
||||
config_provider = get_config_provider(OVERRIDE_CONFIG_DIRECTORY, 'config.yaml', 'config.py',
|
||||
testing=IS_TESTING, kubernetes=IS_KUBERNETES)
|
||||
|
||||
|
||||
def _get_version_number_changelog():
|
||||
try:
|
||||
|
@ -26,7 +38,7 @@ def _get_git_sha():
|
|||
else:
|
||||
try:
|
||||
return subprocess.check_output(["git", "rev-parse", "HEAD"]).strip()[0:8]
|
||||
except (OSError, subprocess.CalledProcessError):
|
||||
except (OSError, subprocess.CalledProcessError, Exception):
|
||||
pass
|
||||
return "unknown"
|
||||
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
from enum import Enum, unique
|
||||
from data.migrationutil import DefinedDataMigration, MigrationPhase
|
||||
|
||||
@unique
|
||||
class ERTMigrationFlags(Enum):
|
||||
""" Flags for the encrypted robot token migration. """
|
||||
READ_OLD_FIELDS = 'read-old'
|
||||
WRITE_OLD_FIELDS = 'write-old'
|
||||
|
||||
|
||||
ActiveDataMigration = DefinedDataMigration(
|
||||
'encrypted_robot_tokens',
|
||||
'ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE',
|
||||
[
|
||||
MigrationPhase('add-new-fields', 'c13c8052f7a6', [ERTMigrationFlags.READ_OLD_FIELDS,
|
||||
ERTMigrationFlags.WRITE_OLD_FIELDS]),
|
||||
MigrationPhase('backfill-then-read-only-new',
|
||||
'703298a825c2', [ERTMigrationFlags.WRITE_OLD_FIELDS]),
|
||||
MigrationPhase('stop-writing-both', '703298a825c2', []),
|
||||
MigrationPhase('remove-old-fields', 'c059b952ed76', []),
|
||||
]
|
||||
)
|
97
app.py
97
app.py
|
@ -6,71 +6,77 @@ import os
|
|||
from functools import partial
|
||||
|
||||
from Crypto.PublicKey import RSA
|
||||
from flask import Flask, request, Request, _request_ctx_stack
|
||||
from flask_login import LoginManager, UserMixin
|
||||
from flask import Flask, request, Request
|
||||
from flask_login import LoginManager
|
||||
from flask_mail import Mail
|
||||
from flask_principal import Principal
|
||||
from jwkest.jwk import RSAKey
|
||||
from werkzeug.routing import BaseConverter
|
||||
from werkzeug.contrib.fixers import ProxyFix
|
||||
from werkzeug.exceptions import HTTPException
|
||||
|
||||
import features
|
||||
from _init import CONF_DIR
|
||||
|
||||
from _init import (config_provider, CONF_DIR, IS_KUBERNETES, IS_TESTING, OVERRIDE_CONFIG_DIRECTORY,
|
||||
IS_BUILDING)
|
||||
|
||||
from auth.auth_context import get_authenticated_user
|
||||
from avatars.avatars import Avatar
|
||||
from buildman.manager.buildcanceller import BuildCanceller
|
||||
from data import database
|
||||
from data import model
|
||||
from data import logs_model
|
||||
from data.archivedlogs import LogArchive
|
||||
from data.billing import Billing
|
||||
from data.buildlogs import BuildLogs
|
||||
from data.cache import InMemoryDataModelCache
|
||||
from data.cache import get_model_cache
|
||||
from data.model.user import LoginWrappedDBUser
|
||||
from data.queue import WorkQueue, BuildMetricQueueReporter
|
||||
from data.userevent import UserEventsBuilderModule
|
||||
from data.userfiles import Userfiles
|
||||
from data.users import UserAuthentication
|
||||
from data.registry_model import registry_model
|
||||
from path_converters import RegexConverter, RepositoryPathConverter, APIRepositoryPathConverter
|
||||
from oauth.services.github import GithubOAuthService
|
||||
from oauth.services.gitlab import GitLabOAuthService
|
||||
from oauth.loginmanager import OAuthLoginManager
|
||||
from storage import Storage
|
||||
from util.config import URLSchemeAndHostname
|
||||
from util.log import filter_logs
|
||||
from util import get_app_url
|
||||
from util.secscan.secscan_util import get_blob_download_uri_getter
|
||||
from util.ipresolver import IPResolver
|
||||
from util.saas.analytics import Analytics
|
||||
from util.saas.useranalytics import UserAnalytics
|
||||
from util.saas.exceptionlog import Sentry
|
||||
from util.names import urn_generator
|
||||
from util.config.configutil import generate_secret_key
|
||||
from util.config.provider import get_config_provider
|
||||
from util.config.superusermanager import SuperUserManager
|
||||
from util.label_validator import LabelValidator
|
||||
from util.license import LicenseValidator
|
||||
from util.metrics.metricqueue import MetricQueue
|
||||
from util.metrics.prometheus import PrometheusPlugin
|
||||
from util.saas.cloudwatch import start_cloudwatch_sender
|
||||
from util.secscan.api import SecurityScannerAPI
|
||||
from util.repomirror.api import RepoMirrorAPI
|
||||
from util.tufmetadata.api import TUFMetadataAPI
|
||||
from util.security.instancekeys import InstanceKeys
|
||||
from util.security.signing import Signer
|
||||
|
||||
|
||||
OVERRIDE_CONFIG_DIRECTORY = os.path.join(CONF_DIR, 'stack/')
|
||||
OVERRIDE_CONFIG_YAML_FILENAME = os.path.join(CONF_DIR, 'stack/config.yaml')
|
||||
OVERRIDE_CONFIG_PY_FILENAME = os.path.join(CONF_DIR, 'stack/config.py')
|
||||
|
||||
OVERRIDE_CONFIG_KEY = 'QUAY_OVERRIDE_CONFIG'
|
||||
|
||||
DOCKER_V2_SIGNINGKEY_FILENAME = 'docker_v2.pem'
|
||||
INIT_SCRIPTS_LOCATION = '/conf/init/'
|
||||
|
||||
app = Flask(__name__)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Instantiate the configuration.
|
||||
is_testing = 'TEST' in os.environ
|
||||
is_kubernetes = 'KUBERNETES_SERVICE_HOST' in os.environ
|
||||
config_provider = get_config_provider(OVERRIDE_CONFIG_DIRECTORY, 'config.yaml', 'config.py',
|
||||
testing=is_testing, kubernetes=is_kubernetes)
|
||||
is_testing = IS_TESTING
|
||||
is_kubernetes = IS_KUBERNETES
|
||||
is_building = IS_BUILDING
|
||||
|
||||
if is_testing:
|
||||
from test.testconfig import TestConfig
|
||||
|
@ -89,6 +95,31 @@ config_provider.update_app_config(app.config)
|
|||
environ_config = json.loads(os.environ.get(OVERRIDE_CONFIG_KEY, '{}'))
|
||||
app.config.update(environ_config)
|
||||
|
||||
# Fix remote address handling for Flask.
|
||||
if app.config.get('PROXY_COUNT', 1):
|
||||
app.wsgi_app = ProxyFix(app.wsgi_app, num_proxies=app.config.get('PROXY_COUNT', 1))
|
||||
|
||||
# Ensure the V3 upgrade key is specified correctly. If not, simply fail.
|
||||
# TODO: Remove for V3.1.
|
||||
if not is_testing and not is_building and app.config.get('SETUP_COMPLETE', False):
|
||||
v3_upgrade_mode = app.config.get('V3_UPGRADE_MODE')
|
||||
if v3_upgrade_mode is None:
|
||||
raise Exception('Configuration flag `V3_UPGRADE_MODE` must be set. Please check the upgrade docs')
|
||||
|
||||
if (v3_upgrade_mode != 'background'
|
||||
and v3_upgrade_mode != 'complete'
|
||||
and v3_upgrade_mode != 'production-transition'
|
||||
and v3_upgrade_mode != 'post-oci-rollout'
|
||||
and v3_upgrade_mode != 'post-oci-roll-back-compat'):
|
||||
raise Exception('Invalid value for config `V3_UPGRADE_MODE`. Please check the upgrade docs')
|
||||
|
||||
# Split the registry model based on config.
|
||||
# TODO: Remove once we are fully on the OCI data model.
|
||||
registry_model.setup_split(app.config.get('OCI_NAMESPACE_PROPORTION') or 0,
|
||||
app.config.get('OCI_NAMESPACE_WHITELIST') or set(),
|
||||
app.config.get('V22_NAMESPACE_WHITELIST') or set(),
|
||||
app.config.get('V3_UPGRADE_MODE'))
|
||||
|
||||
# Allow user to define a custom storage preference for the local instance.
|
||||
_distributed_storage_preference = os.environ.get('QUAY_DISTRIBUTED_STORAGE_PREFERENCE', '').split()
|
||||
if _distributed_storage_preference:
|
||||
|
@ -123,6 +154,11 @@ class RequestWithId(Request):
|
|||
|
||||
@app.before_request
|
||||
def _request_start():
|
||||
if os.getenv('PYDEV_DEBUG', None):
|
||||
import pydevd
|
||||
host, port = os.getenv('PYDEV_DEBUG').split(':')
|
||||
pydevd.settrace(host, port=int(port), stdoutToServer=True, stderrToServer=True, suspend=False)
|
||||
|
||||
logger.debug('Starting request: %s (%s)', request.request_id, request.path,
|
||||
extra={"request_id": request.request_id})
|
||||
|
||||
|
@ -137,7 +173,11 @@ FILTERED_VALUES = [
|
|||
|
||||
@app.after_request
|
||||
def _request_end(resp):
|
||||
jsonbody = request.get_json(force=True, silent=True)
|
||||
try:
|
||||
jsonbody = request.get_json(force=True, silent=True)
|
||||
except HTTPException:
|
||||
jsonbody = None
|
||||
|
||||
values = request.values.to_dict()
|
||||
|
||||
if jsonbody and not isinstance(jsonbody, dict):
|
||||
|
@ -179,9 +219,7 @@ Principal(app, use_sessions=False)
|
|||
|
||||
tf = app.config['DB_TRANSACTION_FACTORY']
|
||||
|
||||
# TODO(jschorr): make this configurable
|
||||
model_cache = InMemoryDataModelCache()
|
||||
|
||||
model_cache = get_model_cache(app.config)
|
||||
avatar = Avatar(app)
|
||||
login_manager = LoginManager(app)
|
||||
mail = Mail(app)
|
||||
|
@ -206,9 +244,6 @@ instance_keys = InstanceKeys(app)
|
|||
label_validator = LabelValidator(app)
|
||||
build_canceller = BuildCanceller(app)
|
||||
|
||||
license_validator = LicenseValidator(config_provider)
|
||||
license_validator.start()
|
||||
|
||||
start_cloudwatch_sender(metric_queue, app)
|
||||
|
||||
github_trigger = GithubOAuthService(app.config, 'GITHUB_TRIGGER_CONFIG')
|
||||
|
@ -228,11 +263,26 @@ notification_queue = WorkQueue(app.config['NOTIFICATION_QUEUE_NAME'], tf, has_na
|
|||
secscan_notification_queue = WorkQueue(app.config['SECSCAN_NOTIFICATION_QUEUE_NAME'], tf,
|
||||
has_namespace=False,
|
||||
metric_queue=metric_queue)
|
||||
export_action_logs_queue = WorkQueue(app.config['EXPORT_ACTION_LOGS_QUEUE_NAME'], tf,
|
||||
has_namespace=True,
|
||||
metric_queue=metric_queue)
|
||||
|
||||
# Note: We set `has_namespace` to `False` here, as we explicitly want this queue to not be emptied
|
||||
# when a namespace is marked for deletion.
|
||||
namespace_gc_queue = WorkQueue(app.config['NAMESPACE_GC_QUEUE_NAME'], tf, has_namespace=False,
|
||||
metric_queue=metric_queue)
|
||||
|
||||
all_queues = [image_replication_queue, dockerfile_build_queue, notification_queue,
|
||||
secscan_notification_queue, chunk_cleanup_queue]
|
||||
secscan_notification_queue, chunk_cleanup_queue, namespace_gc_queue]
|
||||
|
||||
url_scheme_and_hostname = URLSchemeAndHostname(app.config['PREFERRED_URL_SCHEME'], app.config['SERVER_HOSTNAME'])
|
||||
secscan_api = SecurityScannerAPI(app.config, storage, app.config['SERVER_HOSTNAME'], app.config['HTTPCLIENT'],
|
||||
uri_creator=get_blob_download_uri_getter(app.test_request_context('/'), url_scheme_and_hostname),
|
||||
instance_keys=instance_keys)
|
||||
|
||||
repo_mirror_api = RepoMirrorAPI(app.config, app.config['SERVER_HOSTNAME'], app.config['HTTPCLIENT'],
|
||||
instance_keys=instance_keys)
|
||||
|
||||
secscan_api = SecurityScannerAPI(app, app.config, storage)
|
||||
tuf_metadata_api = TUFMetadataAPI(app, app.config)
|
||||
|
||||
# Check for a key in config. If none found, generate a new signing key for Docker V2 manifests.
|
||||
|
@ -242,8 +292,12 @@ if os.path.exists(_v2_key_path):
|
|||
else:
|
||||
docker_v2_signing_key = RSAKey(key=RSA.generate(2048))
|
||||
|
||||
# Configure the database.
|
||||
if app.config.get('DATABASE_SECRET_KEY') is None and app.config.get('SETUP_COMPLETE', False):
|
||||
raise Exception('Missing DATABASE_SECRET_KEY in config; did you perhaps forget to add it?')
|
||||
|
||||
database.configure(app.config)
|
||||
|
||||
model.config.app_config = app.config
|
||||
model.config.store = storage
|
||||
model.config.register_image_cleanup_callback(secscan_api.cleanup_layers)
|
||||
|
@ -255,5 +309,6 @@ def load_user(user_uuid):
|
|||
logger.debug('User loader loading deferred user with uuid: %s', user_uuid)
|
||||
return LoginWrappedDBUser(user_uuid)
|
||||
|
||||
logs_model.configure(app.config)
|
||||
|
||||
get_app_url = partial(get_app_url, app.config)
|
||||
|
|
|
@ -1,3 +1,7 @@
|
|||
# NOTE: Must be before we import or call anything that may be synchronous.
|
||||
from gevent import monkey
|
||||
monkey.patch_all()
|
||||
|
||||
import os
|
||||
import logging
|
||||
import logging.config
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
Jake Moshenko <jake.moshenko@coreos.com> (@jakedt)
|
|
@ -1,78 +1,21 @@
|
|||
import logging
|
||||
|
||||
from flask import _request_ctx_stack
|
||||
from data import model
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def get_authenticated_context():
|
||||
""" Returns the auth context for the current request context, if any. """
|
||||
return getattr(_request_ctx_stack.top, 'authenticated_context', None)
|
||||
|
||||
def get_authenticated_user():
|
||||
user = getattr(_request_ctx_stack.top, 'authenticated_user', None)
|
||||
if not user:
|
||||
user_uuid = getattr(_request_ctx_stack.top, 'authenticated_user_uuid', None)
|
||||
if not user_uuid:
|
||||
logger.debug('No authenticated user or deferred user uuid.')
|
||||
return None
|
||||
|
||||
logger.debug('Loading deferred authenticated user.')
|
||||
loaded = model.user.get_user_by_uuid(user_uuid)
|
||||
if not loaded.enabled:
|
||||
return None
|
||||
|
||||
set_authenticated_user(loaded)
|
||||
user = loaded
|
||||
|
||||
if user:
|
||||
logger.debug('Returning authenticated user: %s', user.username)
|
||||
return user
|
||||
|
||||
|
||||
def set_authenticated_user(user_or_robot):
|
||||
if not user_or_robot.enabled:
|
||||
raise Exception('Attempt to authenticate a disabled user/robot: %s' % user_or_robot.username)
|
||||
|
||||
ctx = _request_ctx_stack.top
|
||||
ctx.authenticated_user = user_or_robot
|
||||
|
||||
|
||||
def get_grant_context():
|
||||
return getattr(_request_ctx_stack.top, 'grant_context', None)
|
||||
|
||||
|
||||
def set_grant_context(grant_context):
|
||||
ctx = _request_ctx_stack.top
|
||||
ctx.grant_context = grant_context
|
||||
|
||||
|
||||
def set_authenticated_user_deferred(user_or_robot_db_uuid):
|
||||
logger.debug('Deferring loading of authenticated user object with uuid: %s', user_or_robot_db_uuid)
|
||||
ctx = _request_ctx_stack.top
|
||||
ctx.authenticated_user_uuid = user_or_robot_db_uuid
|
||||
|
||||
""" Returns the authenticated user, if any, or None if none. """
|
||||
context = get_authenticated_context()
|
||||
return context.authed_user if context else None
|
||||
|
||||
def get_validated_oauth_token():
|
||||
return getattr(_request_ctx_stack.top, 'validated_oauth_token', None)
|
||||
""" Returns the authenticated and validated OAuth access token, if any, or None if none. """
|
||||
context = get_authenticated_context()
|
||||
return context.authed_oauth_token if context else None
|
||||
|
||||
|
||||
def set_validated_oauth_token(token):
|
||||
def set_authenticated_context(auth_context):
|
||||
""" Sets the auth context for the current request context to that given. """
|
||||
ctx = _request_ctx_stack.top
|
||||
ctx.validated_oauth_token = token
|
||||
|
||||
|
||||
def get_validated_app_specific_token():
|
||||
return getattr(_request_ctx_stack.top, 'validated_app_specific_token', None)
|
||||
|
||||
|
||||
def set_validated_app_specific_token(token):
|
||||
ctx = _request_ctx_stack.top
|
||||
ctx.validated_app_specific_token = token
|
||||
|
||||
|
||||
def get_validated_token():
|
||||
return getattr(_request_ctx_stack.top, 'validated_token', None)
|
||||
|
||||
|
||||
def set_validated_token(token):
|
||||
ctx = _request_ctx_stack.top
|
||||
ctx.validated_token = token
|
||||
ctx.authenticated_context = auth_context
|
||||
return auth_context
|
||||
|
|
|
@ -0,0 +1,437 @@
|
|||
import logging
|
||||
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from cachetools.func import lru_cache
|
||||
from six import add_metaclass
|
||||
|
||||
from app import app
|
||||
from data import model
|
||||
|
||||
from flask_principal import Identity, identity_changed
|
||||
|
||||
from auth.auth_context import set_authenticated_context
|
||||
from auth.context_entity import ContextEntityKind, CONTEXT_ENTITY_HANDLERS
|
||||
from auth.permissions import QuayDeferredPermissionUser
|
||||
from auth.scopes import scopes_from_scope_string
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@add_metaclass(ABCMeta)
|
||||
class AuthContext(object):
|
||||
"""
|
||||
Interface that represents the current context of authentication.
|
||||
"""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def entity_kind(self):
|
||||
""" Returns the kind of the entity in this auth context. """
|
||||
pass
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def is_anonymous(self):
|
||||
""" Returns true if this is an anonymous context. """
|
||||
pass
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def authed_oauth_token(self):
|
||||
""" Returns the authenticated OAuth token, if any. """
|
||||
pass
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def authed_user(self):
|
||||
""" Returns the authenticated user, whether directly, or via an OAuth or access token. Note that
|
||||
this property will also return robot accounts.
|
||||
"""
|
||||
pass
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def has_nonrobot_user(self):
|
||||
""" Returns whether a user (not a robot) was authenticated successfully. """
|
||||
pass
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def identity(self):
|
||||
""" Returns the identity for the auth context. """
|
||||
pass
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def description(self):
|
||||
""" Returns a human-readable and *public* description of the current auth context. """
|
||||
pass
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def credential_username(self):
|
||||
""" Returns the username to create credentials for this context's entity, if any. """
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def analytics_id_and_public_metadata(self):
|
||||
""" Returns the analytics ID and public log metadata for this auth context. """
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def apply_to_request_context(self):
|
||||
""" Applies this auth result to the auth context and Flask-Principal. """
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def to_signed_dict(self):
|
||||
""" Serializes the auth context into a dictionary suitable for inclusion in a JWT or other
|
||||
form of signed serialization.
|
||||
"""
|
||||
pass
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def unique_key(self):
|
||||
""" Returns a key that is unique to this auth context type and its data. For example, an
|
||||
instance of the auth context type for the user might be a string of the form
|
||||
`user-{user-uuid}`. Callers should treat this key as opaque and not rely on the contents
|
||||
for anything besides uniqueness. This is typically used by callers when they'd like to
|
||||
check cache but not hit the database to get a fully validated auth context.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class ValidatedAuthContext(AuthContext):
|
||||
""" ValidatedAuthContext represents the loaded, authenticated and validated auth information
|
||||
for the current request context.
|
||||
"""
|
||||
def __init__(self, user=None, token=None, oauthtoken=None, robot=None, appspecifictoken=None,
|
||||
signed_data=None):
|
||||
# Note: These field names *MUST* match the string values of the kinds defined in
|
||||
# ContextEntityKind.
|
||||
self.user = user
|
||||
self.robot = robot
|
||||
self.token = token
|
||||
self.oauthtoken = oauthtoken
|
||||
self.appspecifictoken = appspecifictoken
|
||||
self.signed_data = signed_data
|
||||
|
||||
def tuple(self):
|
||||
return vars(self).values()
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.tuple() == other.tuple()
|
||||
|
||||
@property
|
||||
def entity_kind(self):
|
||||
""" Returns the kind of the entity in this auth context. """
|
||||
for kind in ContextEntityKind:
|
||||
if hasattr(self, kind.value) and getattr(self, kind.value):
|
||||
return kind
|
||||
|
||||
return ContextEntityKind.anonymous
|
||||
|
||||
@property
|
||||
def authed_user(self):
|
||||
""" Returns the authenticated user, whether directly, or via an OAuth token. Note that this
|
||||
will also return robot accounts.
|
||||
"""
|
||||
authed_user = self._authed_user()
|
||||
if authed_user is not None and not authed_user.enabled:
|
||||
logger.warning('Attempt to reference a disabled user/robot: %s', authed_user.username)
|
||||
return None
|
||||
|
||||
return authed_user
|
||||
|
||||
@property
|
||||
def authed_oauth_token(self):
|
||||
return self.oauthtoken
|
||||
|
||||
def _authed_user(self):
|
||||
if self.oauthtoken:
|
||||
return self.oauthtoken.authorized_user
|
||||
|
||||
if self.appspecifictoken:
|
||||
return self.appspecifictoken.user
|
||||
|
||||
if self.signed_data:
|
||||
return model.user.get_user(self.signed_data['user_context'])
|
||||
|
||||
return self.user if self.user else self.robot
|
||||
|
||||
@property
|
||||
def is_anonymous(self):
|
||||
""" Returns true if this is an anonymous context. """
|
||||
return not self.authed_user and not self.token and not self.signed_data
|
||||
|
||||
@property
|
||||
def has_nonrobot_user(self):
|
||||
""" Returns whether a user (not a robot) was authenticated successfully. """
|
||||
return bool(self.authed_user and not self.robot)
|
||||
|
||||
@property
|
||||
def identity(self):
|
||||
""" Returns the identity for the auth context. """
|
||||
if self.oauthtoken:
|
||||
scope_set = scopes_from_scope_string(self.oauthtoken.scope)
|
||||
return QuayDeferredPermissionUser.for_user(self.oauthtoken.authorized_user, scope_set)
|
||||
|
||||
if self.authed_user:
|
||||
return QuayDeferredPermissionUser.for_user(self.authed_user)
|
||||
|
||||
if self.token:
|
||||
return Identity(self.token.get_code(), 'token')
|
||||
|
||||
if self.signed_data:
|
||||
identity = Identity(None, 'signed_grant')
|
||||
identity.provides.update(self.signed_data['grants'])
|
||||
return identity
|
||||
|
||||
return None
|
||||
|
||||
@property
|
||||
def entity_reference(self):
|
||||
""" Returns the DB object reference for this context's entity. """
|
||||
if self.entity_kind == ContextEntityKind.anonymous:
|
||||
return None
|
||||
|
||||
return getattr(self, self.entity_kind.value)
|
||||
|
||||
@property
|
||||
def description(self):
|
||||
""" Returns a human-readable and *public* description of the current auth context. """
|
||||
handler = CONTEXT_ENTITY_HANDLERS[self.entity_kind]()
|
||||
return handler.description(self.entity_reference)
|
||||
|
||||
@property
|
||||
def credential_username(self):
|
||||
""" Returns the username to create credentials for this context's entity, if any. """
|
||||
handler = CONTEXT_ENTITY_HANDLERS[self.entity_kind]()
|
||||
return handler.credential_username(self.entity_reference)
|
||||
|
||||
def analytics_id_and_public_metadata(self):
|
||||
""" Returns the analytics ID and public log metadata for this auth context. """
|
||||
handler = CONTEXT_ENTITY_HANDLERS[self.entity_kind]()
|
||||
return handler.analytics_id_and_public_metadata(self.entity_reference)
|
||||
|
||||
def apply_to_request_context(self):
|
||||
""" Applies this auth result to the auth context and Flask-Principal. """
|
||||
# Save to the request context.
|
||||
set_authenticated_context(self)
|
||||
|
||||
# Set the identity for Flask-Principal.
|
||||
if self.identity:
|
||||
identity_changed.send(app, identity=self.identity)
|
||||
|
||||
@property
|
||||
def unique_key(self):
|
||||
signed_dict = self.to_signed_dict()
|
||||
return '%s-%s' % (signed_dict['entity_kind'], signed_dict.get('entity_reference', '(anon)'))
|
||||
|
||||
def to_signed_dict(self):
|
||||
""" Serializes the auth context into a dictionary suitable for inclusion in a JWT or other
|
||||
form of signed serialization.
|
||||
"""
|
||||
dict_data = {
|
||||
'version': 2,
|
||||
'entity_kind': self.entity_kind.value,
|
||||
}
|
||||
|
||||
if self.entity_kind != ContextEntityKind.anonymous:
|
||||
handler = CONTEXT_ENTITY_HANDLERS[self.entity_kind]()
|
||||
dict_data.update({
|
||||
'entity_reference': handler.get_serialized_entity_reference(self.entity_reference),
|
||||
})
|
||||
|
||||
# Add legacy information.
|
||||
# TODO: Remove this all once the new code is fully deployed.
|
||||
if self.token:
|
||||
dict_data.update({
|
||||
'kind': 'token',
|
||||
'token': self.token.code,
|
||||
})
|
||||
|
||||
if self.oauthtoken:
|
||||
dict_data.update({
|
||||
'kind': 'oauth',
|
||||
'oauth': self.oauthtoken.uuid,
|
||||
'user': self.authed_user.username,
|
||||
})
|
||||
|
||||
if self.user or self.robot:
|
||||
dict_data.update({
|
||||
'kind': 'user',
|
||||
'user': self.authed_user.username,
|
||||
})
|
||||
|
||||
if self.appspecifictoken:
|
||||
dict_data.update({
|
||||
'kind': 'user',
|
||||
'user': self.authed_user.username,
|
||||
})
|
||||
|
||||
if self.is_anonymous:
|
||||
dict_data.update({
|
||||
'kind': 'anonymous',
|
||||
})
|
||||
|
||||
# End of legacy information.
|
||||
return dict_data
|
||||
|
||||
class SignedAuthContext(AuthContext):
|
||||
""" SignedAuthContext represents an auth context loaded from a signed token of some kind,
|
||||
such as a JWT. Unlike ValidatedAuthContext, SignedAuthContext operates lazily, only loading
|
||||
the actual {user, robot, token, etc} when requested. This allows registry operations that
|
||||
only need to check if *some* entity is present to do so, without hitting the database.
|
||||
"""
|
||||
def __init__(self, kind, signed_data, v1_dict_format):
|
||||
self.kind = kind
|
||||
self.signed_data = signed_data
|
||||
self.v1_dict_format = v1_dict_format
|
||||
|
||||
@property
|
||||
def unique_key(self):
|
||||
if self.v1_dict_format:
|
||||
# Since V1 data format is verbose, just use the validated version to get the key.
|
||||
return self._get_validated().unique_key
|
||||
|
||||
signed_dict = self.signed_data
|
||||
return '%s-%s' % (signed_dict['entity_kind'], signed_dict.get('entity_reference', '(anon)'))
|
||||
|
||||
@classmethod
|
||||
def build_from_signed_dict(cls, dict_data, v1_dict_format=False):
|
||||
if not v1_dict_format:
|
||||
entity_kind = ContextEntityKind(dict_data.get('entity_kind', 'anonymous'))
|
||||
return SignedAuthContext(entity_kind, dict_data, v1_dict_format)
|
||||
|
||||
# Legacy handling.
|
||||
# TODO: Remove this all once the new code is fully deployed.
|
||||
kind_string = dict_data.get('kind', 'anonymous')
|
||||
if kind_string == 'oauth':
|
||||
kind_string = 'oauthtoken'
|
||||
|
||||
kind = ContextEntityKind(kind_string)
|
||||
return SignedAuthContext(kind, dict_data, v1_dict_format)
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
def _get_validated(self):
|
||||
""" Returns a ValidatedAuthContext for this signed context, resolving all the necessary
|
||||
references.
|
||||
"""
|
||||
if not self.v1_dict_format:
|
||||
if self.kind == ContextEntityKind.anonymous:
|
||||
return ValidatedAuthContext()
|
||||
|
||||
serialized_entity_reference = self.signed_data['entity_reference']
|
||||
handler = CONTEXT_ENTITY_HANDLERS[self.kind]()
|
||||
entity_reference = handler.deserialize_entity_reference(serialized_entity_reference)
|
||||
if entity_reference is None:
|
||||
logger.debug('Could not deserialize entity reference `%s` under kind `%s`',
|
||||
serialized_entity_reference, self.kind)
|
||||
return ValidatedAuthContext()
|
||||
|
||||
return ValidatedAuthContext(**{self.kind.value: entity_reference})
|
||||
|
||||
# Legacy handling.
|
||||
# TODO: Remove this all once the new code is fully deployed.
|
||||
kind_string = self.signed_data.get('kind', 'anonymous')
|
||||
if kind_string == 'oauth':
|
||||
kind_string = 'oauthtoken'
|
||||
|
||||
kind = ContextEntityKind(kind_string)
|
||||
if kind == ContextEntityKind.anonymous:
|
||||
return ValidatedAuthContext()
|
||||
|
||||
if kind == ContextEntityKind.user or kind == ContextEntityKind.robot:
|
||||
user = model.user.get_user(self.signed_data.get('user', ''))
|
||||
if not user:
|
||||
return None
|
||||
|
||||
return ValidatedAuthContext(robot=user) if user.robot else ValidatedAuthContext(user=user)
|
||||
|
||||
if kind == ContextEntityKind.token:
|
||||
token = model.token.load_token_data(self.signed_data.get('token'))
|
||||
if not token:
|
||||
return None
|
||||
|
||||
return ValidatedAuthContext(token=token)
|
||||
|
||||
if kind == ContextEntityKind.oauthtoken:
|
||||
user = model.user.get_user(self.signed_data.get('user', ''))
|
||||
if not user:
|
||||
return None
|
||||
|
||||
token_uuid = self.signed_data.get('oauth', '')
|
||||
oauthtoken = model.oauth.lookup_access_token_for_user(user, token_uuid)
|
||||
if not oauthtoken:
|
||||
return None
|
||||
|
||||
return ValidatedAuthContext(oauthtoken=oauthtoken)
|
||||
|
||||
raise Exception('Unknown auth context kind `%s` when deserializing %s' % (kind,
|
||||
self.signed_data))
|
||||
# End of legacy handling.
|
||||
|
||||
@property
|
||||
def entity_kind(self):
|
||||
""" Returns the kind of the entity in this auth context. """
|
||||
return self.kind
|
||||
|
||||
@property
|
||||
def is_anonymous(self):
|
||||
""" Returns true if this is an anonymous context. """
|
||||
return self.kind == ContextEntityKind.anonymous
|
||||
|
||||
@property
|
||||
def authed_user(self):
|
||||
""" Returns the authenticated user, whether directly, or via an OAuth or access token. Note that
|
||||
this property will also return robot accounts.
|
||||
"""
|
||||
if self.kind == ContextEntityKind.anonymous:
|
||||
return None
|
||||
|
||||
return self._get_validated().authed_user
|
||||
|
||||
@property
|
||||
def authed_oauth_token(self):
|
||||
if self.kind == ContextEntityKind.anonymous:
|
||||
return None
|
||||
|
||||
return self._get_validated().authed_oauth_token
|
||||
|
||||
@property
|
||||
def has_nonrobot_user(self):
|
||||
""" Returns whether a user (not a robot) was authenticated successfully. """
|
||||
if self.kind == ContextEntityKind.anonymous:
|
||||
return False
|
||||
|
||||
return self._get_validated().has_nonrobot_user
|
||||
|
||||
@property
|
||||
def identity(self):
|
||||
""" Returns the identity for the auth context. """
|
||||
return self._get_validated().identity
|
||||
|
||||
@property
|
||||
def description(self):
|
||||
""" Returns a human-readable and *public* description of the current auth context. """
|
||||
return self._get_validated().description
|
||||
|
||||
@property
|
||||
def credential_username(self):
|
||||
""" Returns the username to create credentials for this context's entity, if any. """
|
||||
return self._get_validated().credential_username
|
||||
|
||||
def analytics_id_and_public_metadata(self):
|
||||
""" Returns the analytics ID and public log metadata for this auth context. """
|
||||
return self._get_validated().analytics_id_and_public_metadata()
|
||||
|
||||
def apply_to_request_context(self):
|
||||
""" Applies this auth result to the auth context and Flask-Principal. """
|
||||
return self._get_validated().apply_to_request_context()
|
||||
|
||||
def to_signed_dict(self):
|
||||
""" Serializes the auth context into a dictionary suitable for inclusion in a JWT or other
|
||||
form of signed serialization.
|
||||
"""
|
||||
return self.signed_data
|
|
@ -15,7 +15,7 @@ def has_basic_auth(username):
|
|||
"""
|
||||
auth_header = request.headers.get('authorization', '')
|
||||
result = validate_basic_auth(auth_header)
|
||||
return result.has_user and result.user.username == username
|
||||
return result.has_nonrobot_user and result.context.user.username == username
|
||||
|
||||
|
||||
def validate_basic_auth(auth_header):
|
||||
|
@ -28,6 +28,7 @@ def validate_basic_auth(auth_header):
|
|||
logger.debug('Attempt to process basic auth header')
|
||||
|
||||
# Parse the basic auth header.
|
||||
assert isinstance(auth_header, basestring)
|
||||
credentials, err = _parse_basic_auth_header(auth_header)
|
||||
if err is not None:
|
||||
logger.debug('Got invalid basic auth header: %s', auth_header)
|
||||
|
@ -47,7 +48,7 @@ def _parse_basic_auth_header(auth):
|
|||
|
||||
try:
|
||||
credentials = [part.decode('utf-8') for part in b64decode(normalized[1]).split(':', 1)]
|
||||
except TypeError:
|
||||
except (TypeError, UnicodeDecodeError, ValueError):
|
||||
logger.exception('Exception when parsing basic auth header: %s', auth)
|
||||
return None, 'Could not parse basic auth header'
|
||||
|
||||
|
|
|
@ -0,0 +1,203 @@
|
|||
from abc import ABCMeta, abstractmethod
|
||||
from six import add_metaclass
|
||||
from enum import Enum
|
||||
|
||||
from data import model
|
||||
|
||||
from auth.credential_consts import (ACCESS_TOKEN_USERNAME, OAUTH_TOKEN_USERNAME,
|
||||
APP_SPECIFIC_TOKEN_USERNAME)
|
||||
|
||||
class ContextEntityKind(Enum):
|
||||
""" Defines the various kinds of entities in an auth context. Note that the string values of
|
||||
these fields *must* match the names of the fields in the ValidatedAuthContext class, as
|
||||
we fill them in directly based on the string names here.
|
||||
"""
|
||||
anonymous = 'anonymous'
|
||||
user = 'user'
|
||||
robot = 'robot'
|
||||
token = 'token'
|
||||
oauthtoken = 'oauthtoken'
|
||||
appspecifictoken = 'appspecifictoken'
|
||||
signed_data = 'signed_data'
|
||||
|
||||
|
||||
@add_metaclass(ABCMeta)
|
||||
class ContextEntityHandler(object):
|
||||
"""
|
||||
Interface that represents handling specific kinds of entities under an auth context.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def credential_username(self, entity_reference):
|
||||
""" Returns the username to create credentials for this entity, if any. """
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_serialized_entity_reference(self, entity_reference):
|
||||
""" Returns the entity reference for this kind of auth context, serialized into a form that can
|
||||
be placed into a JSON object and put into a JWT. This is typically a DB UUID or another
|
||||
unique identifier for the object in the DB.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def deserialize_entity_reference(self, serialized_entity_reference):
|
||||
""" Returns the deserialized reference to the entity in the database, or None if none. """
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def description(self, entity_reference):
|
||||
""" Returns a human-readable and *public* description of the current entity. """
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def analytics_id_and_public_metadata(self, entity_reference):
|
||||
""" Returns the analyitics ID and a dict of public metadata for the current entity. """
|
||||
pass
|
||||
|
||||
|
||||
class AnonymousEntityHandler(ContextEntityHandler):
|
||||
def credential_username(self, entity_reference):
|
||||
return None
|
||||
|
||||
def get_serialized_entity_reference(self, entity_reference):
|
||||
return None
|
||||
|
||||
def deserialize_entity_reference(self, serialized_entity_reference):
|
||||
return None
|
||||
|
||||
def description(self, entity_reference):
|
||||
return "anonymous"
|
||||
|
||||
def analytics_id_and_public_metadata(self, entity_reference):
|
||||
return "anonymous", {}
|
||||
|
||||
|
||||
class UserEntityHandler(ContextEntityHandler):
|
||||
def credential_username(self, entity_reference):
|
||||
return entity_reference.username
|
||||
|
||||
def get_serialized_entity_reference(self, entity_reference):
|
||||
return entity_reference.uuid
|
||||
|
||||
def deserialize_entity_reference(self, serialized_entity_reference):
|
||||
return model.user.get_user_by_uuid(serialized_entity_reference)
|
||||
|
||||
def description(self, entity_reference):
|
||||
return "user %s" % entity_reference.username
|
||||
|
||||
def analytics_id_and_public_metadata(self, entity_reference):
|
||||
return entity_reference.username, {
|
||||
'username': entity_reference.username,
|
||||
}
|
||||
|
||||
|
||||
class RobotEntityHandler(ContextEntityHandler):
|
||||
def credential_username(self, entity_reference):
|
||||
return entity_reference.username
|
||||
|
||||
def get_serialized_entity_reference(self, entity_reference):
|
||||
return entity_reference.username
|
||||
|
||||
def deserialize_entity_reference(self, serialized_entity_reference):
|
||||
return model.user.lookup_robot(serialized_entity_reference)
|
||||
|
||||
def description(self, entity_reference):
|
||||
return "robot %s" % entity_reference.username
|
||||
|
||||
def analytics_id_and_public_metadata(self, entity_reference):
|
||||
return entity_reference.username, {
|
||||
'username': entity_reference.username,
|
||||
'is_robot': True,
|
||||
}
|
||||
|
||||
|
||||
class TokenEntityHandler(ContextEntityHandler):
|
||||
def credential_username(self, entity_reference):
|
||||
return ACCESS_TOKEN_USERNAME
|
||||
|
||||
def get_serialized_entity_reference(self, entity_reference):
|
||||
return entity_reference.get_code()
|
||||
|
||||
def deserialize_entity_reference(self, serialized_entity_reference):
|
||||
return model.token.load_token_data(serialized_entity_reference)
|
||||
|
||||
def description(self, entity_reference):
|
||||
return "token %s" % entity_reference.friendly_name
|
||||
|
||||
def analytics_id_and_public_metadata(self, entity_reference):
|
||||
return 'token:%s' % entity_reference.id, {
|
||||
'token': entity_reference.friendly_name,
|
||||
}
|
||||
|
||||
|
||||
class OAuthTokenEntityHandler(ContextEntityHandler):
|
||||
def credential_username(self, entity_reference):
|
||||
return OAUTH_TOKEN_USERNAME
|
||||
|
||||
def get_serialized_entity_reference(self, entity_reference):
|
||||
return entity_reference.uuid
|
||||
|
||||
def deserialize_entity_reference(self, serialized_entity_reference):
|
||||
return model.oauth.lookup_access_token_by_uuid(serialized_entity_reference)
|
||||
|
||||
def description(self, entity_reference):
|
||||
return "oauthtoken for user %s" % entity_reference.authorized_user.username
|
||||
|
||||
def analytics_id_and_public_metadata(self, entity_reference):
|
||||
return 'oauthtoken:%s' % entity_reference.id, {
|
||||
'oauth_token_id': entity_reference.id,
|
||||
'oauth_token_application_id': entity_reference.application.client_id,
|
||||
'oauth_token_application': entity_reference.application.name,
|
||||
'username': entity_reference.authorized_user.username,
|
||||
}
|
||||
|
||||
|
||||
class AppSpecificTokenEntityHandler(ContextEntityHandler):
|
||||
def credential_username(self, entity_reference):
|
||||
return APP_SPECIFIC_TOKEN_USERNAME
|
||||
|
||||
def get_serialized_entity_reference(self, entity_reference):
|
||||
return entity_reference.uuid
|
||||
|
||||
def deserialize_entity_reference(self, serialized_entity_reference):
|
||||
return model.appspecifictoken.get_token_by_uuid(serialized_entity_reference)
|
||||
|
||||
def description(self, entity_reference):
|
||||
tpl = (entity_reference.title, entity_reference.user.username)
|
||||
return "app specific token %s for user %s" % tpl
|
||||
|
||||
def analytics_id_and_public_metadata(self, entity_reference):
|
||||
return 'appspecifictoken:%s' % entity_reference.id, {
|
||||
'app_specific_token': entity_reference.uuid,
|
||||
'app_specific_token_title': entity_reference.title,
|
||||
'username': entity_reference.user.username,
|
||||
}
|
||||
|
||||
|
||||
class SignedDataEntityHandler(ContextEntityHandler):
|
||||
def credential_username(self, entity_reference):
|
||||
return None
|
||||
|
||||
def get_serialized_entity_reference(self, entity_reference):
|
||||
raise NotImplementedError
|
||||
|
||||
def deserialize_entity_reference(self, serialized_entity_reference):
|
||||
raise NotImplementedError
|
||||
|
||||
def description(self, entity_reference):
|
||||
return "signed"
|
||||
|
||||
def analytics_id_and_public_metadata(self, entity_reference):
|
||||
return 'signed', {'signed': entity_reference}
|
||||
|
||||
|
||||
CONTEXT_ENTITY_HANDLERS = {
|
||||
ContextEntityKind.anonymous: AnonymousEntityHandler,
|
||||
ContextEntityKind.user: UserEntityHandler,
|
||||
ContextEntityKind.robot: RobotEntityHandler,
|
||||
ContextEntityKind.token: TokenEntityHandler,
|
||||
ContextEntityKind.oauthtoken: OAuthTokenEntityHandler,
|
||||
ContextEntityKind.appspecifictoken: AppSpecificTokenEntityHandler,
|
||||
ContextEntityKind.signed_data: SignedDataEntityHandler,
|
||||
}
|
|
@ -0,0 +1,3 @@
|
|||
ACCESS_TOKEN_USERNAME = '$token'
|
||||
OAUTH_TOKEN_USERNAME = '$oauthtoken'
|
||||
APP_SPECIFIC_TOKEN_USERNAME = '$app'
|
|
@ -7,15 +7,13 @@ import features
|
|||
from app import authentication
|
||||
from auth.oauth import validate_oauth_token
|
||||
from auth.validateresult import ValidateResult, AuthKind
|
||||
from auth.credential_consts import (ACCESS_TOKEN_USERNAME, OAUTH_TOKEN_USERNAME,
|
||||
APP_SPECIFIC_TOKEN_USERNAME)
|
||||
from data import model
|
||||
from util.names import parse_robot_username
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
ACCESS_TOKEN_USERNAME = '$token'
|
||||
OAUTH_TOKEN_USERNAME = '$oauthtoken'
|
||||
APP_SPECIFIC_TOKEN_USERNAME = '$app'
|
||||
|
||||
|
||||
class CredentialKind(Enum):
|
||||
user = 'user'
|
||||
|
@ -73,8 +71,8 @@ def validate_credentials(auth_username, auth_password_or_token):
|
|||
logger.debug('Successfully validated credentials for robot %s', auth_username)
|
||||
return ValidateResult(AuthKind.credentials, robot=robot), CredentialKind.robot
|
||||
except model.InvalidRobotException as ire:
|
||||
logger.warning('Failed to validate credentials for robot %s: %s', auth_username, ire.message)
|
||||
return ValidateResult(AuthKind.credentials, error_message=ire.message), CredentialKind.robot
|
||||
logger.warning('Failed to validate credentials for robot %s: %s', auth_username, ire)
|
||||
return ValidateResult(AuthKind.credentials, error_message=str(ire)), CredentialKind.robot
|
||||
|
||||
# Otherwise, treat as a standard user.
|
||||
(authenticated, err) = authentication.verify_and_link_user(auth_username, auth_password_or_token,
|
||||
|
|
|
@ -60,6 +60,7 @@ process_oauth = _auth_decorator(handlers=[validate_bearer_auth, validate_session
|
|||
process_auth = _auth_decorator(handlers=[validate_signed_grant, validate_basic_auth])
|
||||
process_auth_or_cookie = _auth_decorator(handlers=[validate_basic_auth, validate_session_cookie])
|
||||
process_basic_auth = _auth_decorator(handlers=[validate_basic_auth], pass_result=True)
|
||||
process_basic_auth_no_pass = _auth_decorator(handlers=[validate_basic_auth])
|
||||
|
||||
|
||||
def require_session_login(func):
|
||||
|
@ -69,7 +70,7 @@ def require_session_login(func):
|
|||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
result = validate_session_cookie()
|
||||
if result.has_user:
|
||||
if result.has_nonrobot_user:
|
||||
result.apply_to_context()
|
||||
metric_queue.authentication_count.Inc(labelvalues=[result.kind, True])
|
||||
return func(*args, **kwargs)
|
||||
|
|
|
@ -44,6 +44,5 @@ def validate_oauth_token(token):
|
|||
|
||||
# We have a valid token
|
||||
scope_set = scopes_from_scope_string(validated.scope)
|
||||
logger.debug('Successfully validated oauth access token: %s with scope: %s', token,
|
||||
scope_set)
|
||||
logger.debug('Successfully validated oauth access token with scope: %s', scope_set)
|
||||
return ValidateResult(AuthKind.oauth, oauthtoken=validated)
|
||||
|
|
|
@ -7,21 +7,18 @@ from flask import request, url_for
|
|||
from flask_principal import identity_changed, Identity
|
||||
|
||||
from app import app, get_app_url, instance_keys, metric_queue
|
||||
from auth.auth_context import (set_grant_context, get_grant_context)
|
||||
from auth.auth_context import set_authenticated_context
|
||||
from auth.auth_context_type import SignedAuthContext
|
||||
from auth.permissions import repository_read_grant, repository_write_grant, repository_admin_grant
|
||||
from util.http import abort
|
||||
from util.names import parse_namespace_repository
|
||||
from util.security.registry_jwt import (ANONYMOUS_SUB, decode_bearer_header,
|
||||
InvalidBearerTokenException)
|
||||
from data import model
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
CONTEXT_KINDS = ['user', 'token', 'oauth', 'app_specific_token']
|
||||
|
||||
|
||||
ACCESS_SCHEMA = {
|
||||
'type': 'array',
|
||||
'description': 'List of access granted to the subject',
|
||||
|
@ -65,71 +62,6 @@ class InvalidJWTException(Exception):
|
|||
pass
|
||||
|
||||
|
||||
class GrantedEntity(object):
|
||||
def __init__(self, user=None, token=None, oauth=None, app_specific_token=None):
|
||||
self.user = user
|
||||
self.token = token
|
||||
self.oauth = oauth
|
||||
self.app_specific_token = app_specific_token
|
||||
|
||||
|
||||
def get_granted_entity():
|
||||
""" Returns the entity granted in the current context, if any. Returns the GrantedEntity or None
|
||||
if none.
|
||||
"""
|
||||
context = get_grant_context()
|
||||
if not context:
|
||||
return None
|
||||
|
||||
kind = context.get('kind', 'anonymous')
|
||||
|
||||
if not kind in CONTEXT_KINDS:
|
||||
return None
|
||||
|
||||
if kind == 'app_specific_token':
|
||||
app_specific_token = model.appspecifictoken.get_token_by_uuid(context.get('ast', ''))
|
||||
if app_specific_token is None:
|
||||
return None
|
||||
|
||||
return GrantedEntity(app_specific_token=app_specific_token, user=app_specific_token.user)
|
||||
|
||||
if kind == 'user':
|
||||
user = model.user.get_user(context.get('user', ''))
|
||||
if not user:
|
||||
return None
|
||||
|
||||
return GrantedEntity(user=user)
|
||||
|
||||
if kind == 'token':
|
||||
token = model.token.load_token_data(context.get('token'))
|
||||
if not token:
|
||||
return None
|
||||
|
||||
return GrantedEntity(token=token)
|
||||
|
||||
if kind == 'oauth':
|
||||
user = model.user.get_user(context.get('user', ''))
|
||||
if not user:
|
||||
return None
|
||||
|
||||
oauthtoken = model.oauth.lookup_access_token_for_user(user, context.get('oauth', ''))
|
||||
if not oauthtoken:
|
||||
return None
|
||||
|
||||
return GrantedEntity(oauth=oauthtoken, user=user)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_granted_username():
|
||||
""" Returns the username inside the grant, if any. """
|
||||
granted = get_granted_entity()
|
||||
if not granted or not granted.user:
|
||||
return None
|
||||
|
||||
return granted.user.username
|
||||
|
||||
|
||||
def get_auth_headers(repository=None, scopes=None):
|
||||
""" Returns a dictionary of headers for auth responses. """
|
||||
headers = {}
|
||||
|
@ -198,6 +130,9 @@ def identity_from_bearer_token(bearer_header):
|
|||
|
||||
|
||||
def process_registry_jwt_auth(scopes=None):
|
||||
""" Processes the registry JWT auth token found in the authorization header. If none found,
|
||||
no error is returned. If an invalid token is found, raises a 401.
|
||||
"""
|
||||
def inner(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
|
@ -205,10 +140,15 @@ def process_registry_jwt_auth(scopes=None):
|
|||
auth = request.headers.get('authorization', '').strip()
|
||||
if auth:
|
||||
try:
|
||||
extracted_identity, context = identity_from_bearer_token(auth)
|
||||
extracted_identity, context_dict = identity_from_bearer_token(auth)
|
||||
identity_changed.send(app, identity=extracted_identity)
|
||||
set_grant_context(context)
|
||||
logger.debug('Identity changed to %s', extracted_identity.id)
|
||||
|
||||
auth_context = SignedAuthContext.build_from_signed_dict(context_dict)
|
||||
if auth_context is not None:
|
||||
logger.debug('Auth context set to %s', auth_context.signed_data)
|
||||
set_authenticated_context(auth_context)
|
||||
|
||||
except InvalidJWTException as ije:
|
||||
repository = None
|
||||
if 'namespace_name' in kwargs and 'repo_name' in kwargs:
|
||||
|
|
|
@ -96,11 +96,10 @@ IMPLIED_SCOPES = {
|
|||
|
||||
|
||||
def app_scopes(app_config):
|
||||
scopes_from_config = dict(ALL_SCOPES)
|
||||
if not app_config.get('FEATURE_SUPER_USERS', False):
|
||||
scopes_from_config = dict(ALL_SCOPES)
|
||||
del scopes_from_config[SUPERUSER.scope]
|
||||
return scopes_from_config
|
||||
return ALL_SCOPES
|
||||
return scopes_from_config
|
||||
|
||||
|
||||
def scopes_from_scope_string(scopes):
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
import pytest
|
||||
|
||||
from auth.auth_context_type import SignedAuthContext, ValidatedAuthContext, ContextEntityKind
|
||||
from data import model, database
|
||||
|
||||
from test.fixtures import *
|
||||
|
||||
def get_oauth_token(_):
|
||||
return database.OAuthAccessToken.get()
|
||||
|
||||
|
||||
@pytest.mark.parametrize('kind, entity_reference, loader', [
|
||||
(ContextEntityKind.anonymous, None, None),
|
||||
(ContextEntityKind.appspecifictoken, '%s%s' % ('a' * 60, 'b' * 60),
|
||||
model.appspecifictoken.access_valid_token),
|
||||
(ContextEntityKind.oauthtoken, None, get_oauth_token),
|
||||
(ContextEntityKind.robot, 'devtable+dtrobot', model.user.lookup_robot),
|
||||
(ContextEntityKind.user, 'devtable', model.user.get_user),
|
||||
])
|
||||
@pytest.mark.parametrize('v1_dict_format', [
|
||||
(True),
|
||||
(False),
|
||||
])
|
||||
def test_signed_auth_context(kind, entity_reference, loader, v1_dict_format, initialized_db):
|
||||
if kind == ContextEntityKind.anonymous:
|
||||
validated = ValidatedAuthContext()
|
||||
assert validated.is_anonymous
|
||||
else:
|
||||
ref = loader(entity_reference)
|
||||
validated = ValidatedAuthContext(**{kind.value: ref})
|
||||
assert not validated.is_anonymous
|
||||
|
||||
assert validated.entity_kind == kind
|
||||
assert validated.unique_key
|
||||
|
||||
signed = SignedAuthContext.build_from_signed_dict(validated.to_signed_dict(),
|
||||
v1_dict_format=v1_dict_format)
|
||||
|
||||
if not v1_dict_format:
|
||||
# Under legacy V1 format, we don't track the app specific token, merely its associated user.
|
||||
assert signed.entity_kind == kind
|
||||
assert signed.description == validated.description
|
||||
assert signed.credential_username == validated.credential_username
|
||||
assert signed.analytics_id_and_public_metadata() == validated.analytics_id_and_public_metadata()
|
||||
assert signed.unique_key == validated.unique_key
|
||||
|
||||
assert signed.is_anonymous == validated.is_anonymous
|
||||
assert signed.authed_user == validated.authed_user
|
||||
assert signed.has_nonrobot_user == validated.has_nonrobot_user
|
||||
|
||||
assert signed.to_signed_dict() == validated.to_signed_dict()
|
|
@ -1,3 +1,5 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
import pytest
|
||||
|
||||
from base64 import b64encode
|
||||
|
@ -12,6 +14,8 @@ from test.fixtures import *
|
|||
|
||||
|
||||
def _token(username, password):
|
||||
assert isinstance(username, basestring)
|
||||
assert isinstance(password, basestring)
|
||||
return 'basic ' + b64encode('%s:%s' % (username, password))
|
||||
|
||||
|
||||
|
@ -31,8 +35,7 @@ def _token(username, password):
|
|||
(_token('devtable', 'invalid'), ValidateResult(AuthKind.basic,
|
||||
error_message='Invalid Username or Password')),
|
||||
(_token('devtable+somebot', 'invalid'), ValidateResult(
|
||||
AuthKind.basic, error_message='Could not find robot with username: devtable+somebot ' +
|
||||
'and supplied password.')),
|
||||
AuthKind.basic, error_message='Could not find robot with username: devtable+somebot')),
|
||||
(_token('disabled', 'password'), ValidateResult(
|
||||
AuthKind.basic,
|
||||
error_message='This user has been disabled. Please contact your administrator.')),])
|
||||
|
@ -56,15 +59,16 @@ def test_valid_robot(app):
|
|||
|
||||
def test_valid_token(app):
|
||||
access_token = model.token.create_delegate_token('devtable', 'simple', 'sometoken')
|
||||
token = _token(ACCESS_TOKEN_USERNAME, access_token.code)
|
||||
token = _token(ACCESS_TOKEN_USERNAME, access_token.get_code())
|
||||
result = validate_basic_auth(token)
|
||||
assert result == ValidateResult(AuthKind.basic, token=access_token)
|
||||
|
||||
|
||||
def test_valid_oauth(app):
|
||||
user = model.user.get_user('devtable')
|
||||
oauth_token = list(model.oauth.list_access_tokens_for_user(user))[0]
|
||||
token = _token(OAUTH_TOKEN_USERNAME, oauth_token.access_token)
|
||||
app = model.oauth.list_applications_for_org(model.user.get_user_or_org('buynlarge'))[0]
|
||||
oauth_token, code = model.oauth.create_access_token_for_testing(user, app.client_id, 'repo:read')
|
||||
token = _token(OAUTH_TOKEN_USERNAME, code)
|
||||
result = validate_basic_auth(token)
|
||||
assert result == ValidateResult(AuthKind.basic, oauthtoken=oauth_token)
|
||||
|
||||
|
@ -72,6 +76,23 @@ def test_valid_oauth(app):
|
|||
def test_valid_app_specific_token(app):
|
||||
user = model.user.get_user('devtable')
|
||||
app_specific_token = model.appspecifictoken.create_token(user, 'some token')
|
||||
token = _token(APP_SPECIFIC_TOKEN_USERNAME, app_specific_token.token_code)
|
||||
full_token = model.appspecifictoken.get_full_token_string(app_specific_token)
|
||||
token = _token(APP_SPECIFIC_TOKEN_USERNAME, full_token)
|
||||
result = validate_basic_auth(token)
|
||||
assert result == ValidateResult(AuthKind.basic, appspecifictoken=app_specific_token)
|
||||
|
||||
|
||||
def test_invalid_unicode(app):
|
||||
token = '\xebOH'
|
||||
header = 'basic ' + b64encode(token)
|
||||
result = validate_basic_auth(header)
|
||||
assert result == ValidateResult(AuthKind.basic, missing=True)
|
||||
|
||||
|
||||
def test_invalid_unicode_2(app):
|
||||
token = '“4JPCOLIVMAY32Q3XGVPHC4CBF8SKII5FWNYMASOFDIVSXTC5I5NBU”'
|
||||
header = 'basic ' + b64encode('devtable+somerobot:%s' % token)
|
||||
result = validate_basic_auth(header)
|
||||
assert result == ValidateResult(
|
||||
AuthKind.basic,
|
||||
error_message='Could not find robot with username: devtable+somerobot and supplied password.')
|
||||
|
|
|
@ -20,8 +20,8 @@ def test_invalidformatted_cookie(app):
|
|||
# Ensure we get an invalid session cookie format error.
|
||||
result = validate_session_cookie()
|
||||
assert result.authed_user is None
|
||||
assert result.identity is None
|
||||
assert not result.has_user
|
||||
assert result.context.identity is None
|
||||
assert not result.has_nonrobot_user
|
||||
assert result.error_message == 'Invalid session cookie format'
|
||||
|
||||
|
||||
|
@ -33,8 +33,8 @@ def test_disabled_user(app):
|
|||
# Ensure we get an invalid session cookie format error.
|
||||
result = validate_session_cookie()
|
||||
assert result.authed_user is None
|
||||
assert result.identity is None
|
||||
assert not result.has_user
|
||||
assert result.context.identity is None
|
||||
assert not result.has_nonrobot_user
|
||||
assert result.error_message == 'User account is disabled'
|
||||
|
||||
|
||||
|
@ -45,8 +45,8 @@ def test_valid_user(app):
|
|||
|
||||
result = validate_session_cookie()
|
||||
assert result.authed_user == someuser
|
||||
assert result.identity is not None
|
||||
assert result.has_user
|
||||
assert result.context.identity is not None
|
||||
assert result.has_nonrobot_user
|
||||
assert result.error_message is None
|
||||
|
||||
|
||||
|
@ -61,6 +61,6 @@ def test_valid_organization(app):
|
|||
|
||||
result = validate_session_cookie()
|
||||
assert result.authed_user is None
|
||||
assert result.identity is None
|
||||
assert not result.has_user
|
||||
assert result.context.identity is None
|
||||
assert not result.has_nonrobot_user
|
||||
assert result.error_message == 'Cannot login to organization'
|
||||
|
|
|
@ -1,5 +1,8 @@
|
|||
from auth.credentials import (ACCESS_TOKEN_USERNAME, OAUTH_TOKEN_USERNAME, validate_credentials,
|
||||
CredentialKind, APP_SPECIFIC_TOKEN_USERNAME)
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from auth.credentials import validate_credentials, CredentialKind
|
||||
from auth.credential_consts import (ACCESS_TOKEN_USERNAME, OAUTH_TOKEN_USERNAME,
|
||||
APP_SPECIFIC_TOKEN_USERNAME)
|
||||
from auth.validateresult import AuthKind, ValidateResult
|
||||
from data import model
|
||||
|
||||
|
@ -30,14 +33,15 @@ def test_valid_robot_for_disabled_user(app):
|
|||
|
||||
def test_valid_token(app):
|
||||
access_token = model.token.create_delegate_token('devtable', 'simple', 'sometoken')
|
||||
result, kind = validate_credentials(ACCESS_TOKEN_USERNAME, access_token.code)
|
||||
result, kind = validate_credentials(ACCESS_TOKEN_USERNAME, access_token.get_code())
|
||||
assert kind == CredentialKind.token
|
||||
assert result == ValidateResult(AuthKind.credentials, token=access_token)
|
||||
|
||||
def test_valid_oauth(app):
|
||||
user = model.user.get_user('devtable')
|
||||
oauth_token = list(model.oauth.list_access_tokens_for_user(user))[0]
|
||||
result, kind = validate_credentials(OAUTH_TOKEN_USERNAME, oauth_token.access_token)
|
||||
app = model.oauth.list_applications_for_org(model.user.get_user_or_org('buynlarge'))[0]
|
||||
oauth_token, code = model.oauth.create_access_token_for_testing(user, app.client_id, 'repo:read')
|
||||
result, kind = validate_credentials(OAUTH_TOKEN_USERNAME, code)
|
||||
assert kind == CredentialKind.oauth_token
|
||||
assert result == ValidateResult(AuthKind.oauth, oauthtoken=oauth_token)
|
||||
|
||||
|
@ -50,8 +54,8 @@ def test_invalid_user(app):
|
|||
def test_valid_app_specific_token(app):
|
||||
user = model.user.get_user('devtable')
|
||||
app_specific_token = model.appspecifictoken.create_token(user, 'some token')
|
||||
|
||||
result, kind = validate_credentials(APP_SPECIFIC_TOKEN_USERNAME, app_specific_token.token_code)
|
||||
full_token = model.appspecifictoken.get_full_token_string(app_specific_token)
|
||||
result, kind = validate_credentials(APP_SPECIFIC_TOKEN_USERNAME, full_token)
|
||||
assert kind == CredentialKind.app_specific_token
|
||||
assert result == ValidateResult(AuthKind.credentials, appspecifictoken=app_specific_token)
|
||||
|
||||
|
@ -61,8 +65,8 @@ def test_valid_app_specific_token_for_disabled_user(app):
|
|||
user.save()
|
||||
|
||||
app_specific_token = model.appspecifictoken.create_token(user, 'some token')
|
||||
|
||||
result, kind = validate_credentials(APP_SPECIFIC_TOKEN_USERNAME, app_specific_token.token_code)
|
||||
full_token = model.appspecifictoken.get_full_token_string(app_specific_token)
|
||||
result, kind = validate_credentials(APP_SPECIFIC_TOKEN_USERNAME, full_token)
|
||||
assert kind == CredentialKind.app_specific_token
|
||||
|
||||
err = 'This user has been disabled. Please contact your administrator.'
|
||||
|
@ -72,3 +76,72 @@ def test_invalid_app_specific_token(app):
|
|||
result, kind = validate_credentials(APP_SPECIFIC_TOKEN_USERNAME, 'somecode')
|
||||
assert kind == CredentialKind.app_specific_token
|
||||
assert result == ValidateResult(AuthKind.credentials, error_message='Invalid token')
|
||||
|
||||
def test_invalid_app_specific_token_code(app):
|
||||
user = model.user.get_user('devtable')
|
||||
app_specific_token = model.appspecifictoken.create_token(user, 'some token')
|
||||
full_token = app_specific_token.token_name + 'something'
|
||||
result, kind = validate_credentials(APP_SPECIFIC_TOKEN_USERNAME, full_token)
|
||||
assert kind == CredentialKind.app_specific_token
|
||||
assert result == ValidateResult(AuthKind.credentials, error_message='Invalid token')
|
||||
|
||||
def test_unicode(app):
|
||||
result, kind = validate_credentials('someusername', 'some₪code')
|
||||
assert kind == CredentialKind.user
|
||||
assert not result.auth_valid
|
||||
assert result == ValidateResult(AuthKind.credentials,
|
||||
error_message='Invalid Username or Password')
|
||||
|
||||
def test_unicode_robot(app):
|
||||
robot, _ = model.user.create_robot('somerobot', model.user.get_user('devtable'))
|
||||
result, kind = validate_credentials(robot.username, 'some₪code')
|
||||
|
||||
assert kind == CredentialKind.robot
|
||||
assert not result.auth_valid
|
||||
|
||||
msg = 'Could not find robot with username: devtable+somerobot and supplied password.'
|
||||
assert result == ValidateResult(AuthKind.credentials, error_message=msg)
|
||||
|
||||
def test_invalid_user(app):
|
||||
result, kind = validate_credentials('someinvaliduser', 'password')
|
||||
assert kind == CredentialKind.user
|
||||
assert not result.authed_user
|
||||
assert not result.auth_valid
|
||||
|
||||
def test_invalid_user_password(app):
|
||||
result, kind = validate_credentials('devtable', 'somepassword')
|
||||
assert kind == CredentialKind.user
|
||||
assert not result.authed_user
|
||||
assert not result.auth_valid
|
||||
|
||||
def test_invalid_robot(app):
|
||||
result, kind = validate_credentials('devtable+doesnotexist', 'password')
|
||||
assert kind == CredentialKind.robot
|
||||
assert not result.authed_user
|
||||
assert not result.auth_valid
|
||||
|
||||
def test_invalid_robot_token(app):
|
||||
robot, _ = model.user.create_robot('somerobot', model.user.get_user('devtable'))
|
||||
result, kind = validate_credentials(robot.username, 'invalidpassword')
|
||||
assert kind == CredentialKind.robot
|
||||
assert not result.authed_user
|
||||
assert not result.auth_valid
|
||||
|
||||
def test_invalid_unicode_robot(app):
|
||||
token = '“4JPCOLIVMAY32Q3XGVPHC4CBF8SKII5FWNYMASOFDIVSXTC5I5NBU”'
|
||||
result, kind = validate_credentials('devtable+somerobot', token)
|
||||
assert kind == CredentialKind.robot
|
||||
assert not result.auth_valid
|
||||
msg = 'Could not find robot with username: devtable+somerobot'
|
||||
assert result == ValidateResult(AuthKind.credentials, error_message=msg)
|
||||
|
||||
def test_invalid_unicode_robot_2(app):
|
||||
user = model.user.get_user('devtable')
|
||||
robot, password = model.user.create_robot('somerobot', user)
|
||||
|
||||
token = '“4JPCOLIVMAY32Q3XGVPHC4CBF8SKII5FWNYMASOFDIVSXTC5I5NBU”'
|
||||
result, kind = validate_credentials('devtable+somerobot', token)
|
||||
assert kind == CredentialKind.robot
|
||||
assert not result.auth_valid
|
||||
msg = 'Could not find robot with username: devtable+somerobot and supplied password.'
|
||||
assert result == ValidateResult(AuthKind.credentials, error_message=msg)
|
||||
|
|
|
@ -18,21 +18,24 @@ def test_bearer(header, expected_result, app):
|
|||
|
||||
def test_valid_oauth(app):
|
||||
user = model.user.get_user('devtable')
|
||||
token = list(model.oauth.list_access_tokens_for_user(user))[0]
|
||||
|
||||
result = validate_bearer_auth('bearer ' + token.access_token)
|
||||
assert result.oauthtoken == token
|
||||
app = model.oauth.list_applications_for_org(model.user.get_user_or_org('buynlarge'))[0]
|
||||
token_string = '%s%s' % ('a' * 20, 'b' * 20)
|
||||
oauth_token, _ = model.oauth.create_access_token_for_testing(user, app.client_id, 'repo:read',
|
||||
access_token=token_string)
|
||||
result = validate_bearer_auth('bearer ' + token_string)
|
||||
assert result.context.oauthtoken == oauth_token
|
||||
assert result.authed_user == user
|
||||
assert result.auth_valid
|
||||
|
||||
|
||||
def test_disabled_user_oauth(app):
|
||||
user = model.user.get_user('disabled')
|
||||
token = model.oauth.create_access_token_for_testing(user, 'deadbeef', 'repo:admin',
|
||||
access_token='foo')
|
||||
token_string = '%s%s' % ('a' * 20, 'b' * 20)
|
||||
oauth_token, _ = model.oauth.create_access_token_for_testing(user, 'deadbeef', 'repo:admin',
|
||||
access_token=token_string)
|
||||
|
||||
result = validate_bearer_auth('bearer ' + token.access_token)
|
||||
assert result.oauthtoken is None
|
||||
result = validate_bearer_auth('bearer ' + token_string)
|
||||
assert result.context.oauthtoken is None
|
||||
assert result.authed_user is None
|
||||
assert not result.auth_valid
|
||||
assert result.error_message == 'Granter of the oauth access token is disabled'
|
||||
|
@ -40,11 +43,13 @@ def test_disabled_user_oauth(app):
|
|||
|
||||
def test_expired_token(app):
|
||||
user = model.user.get_user('devtable')
|
||||
token = model.oauth.create_access_token_for_testing(user, 'deadbeef', 'repo:admin',
|
||||
access_token='bar', expires_in=-1000)
|
||||
token_string = '%s%s' % ('a' * 20, 'b' * 20)
|
||||
oauth_token, _ = model.oauth.create_access_token_for_testing(user, 'deadbeef', 'repo:admin',
|
||||
access_token=token_string,
|
||||
expires_in=-1000)
|
||||
|
||||
result = validate_bearer_auth('bearer ' + token.access_token)
|
||||
assert result.oauthtoken is None
|
||||
result = validate_bearer_auth('bearer ' + token_string)
|
||||
assert result.context.oauthtoken is None
|
||||
assert result.authed_user is None
|
||||
assert not result.auth_valid
|
||||
assert result.error_message == 'OAuth access token has expired'
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
import pytest
|
||||
|
||||
from auth import scopes
|
||||
from auth.permissions import SuperUserPermission, QuayDeferredPermissionUser
|
||||
from data import model
|
||||
|
||||
from test.fixtures import *
|
||||
|
||||
SUPER_USERNAME = 'devtable'
|
||||
UNSUPER_USERNAME = 'freshuser'
|
||||
|
||||
@pytest.fixture()
|
||||
def superuser(initialized_db):
|
||||
return model.user.get_user(SUPER_USERNAME)
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def normie(initialized_db):
|
||||
return model.user.get_user(UNSUPER_USERNAME)
|
||||
|
||||
|
||||
def test_superuser_matrix(superuser, normie):
|
||||
test_cases = [
|
||||
(superuser, {scopes.SUPERUSER}, True),
|
||||
(superuser, {scopes.DIRECT_LOGIN}, True),
|
||||
(superuser, {scopes.READ_USER, scopes.SUPERUSER}, True),
|
||||
(superuser, {scopes.READ_USER}, False),
|
||||
(normie, {scopes.SUPERUSER}, False),
|
||||
(normie, {scopes.DIRECT_LOGIN}, False),
|
||||
(normie, {scopes.READ_USER, scopes.SUPERUSER}, False),
|
||||
(normie, {scopes.READ_USER}, False),
|
||||
]
|
||||
|
||||
for user_obj, scope_set, expected in test_cases:
|
||||
perm_user = QuayDeferredPermissionUser.for_user(user_obj, scope_set)
|
||||
has_su = perm_user.can(SuperUserPermission())
|
||||
assert has_su == expected
|
|
@ -1,18 +1,21 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
import time
|
||||
|
||||
import jwt
|
||||
import pytest
|
||||
|
||||
from app import app, instance_keys
|
||||
from auth.auth_context_type import ValidatedAuthContext
|
||||
from auth.registry_jwt_auth import identity_from_bearer_token, InvalidJWTException
|
||||
from data import model # TODO(jzelinskie): remove this after service keys are decoupled
|
||||
from data import model # TODO: remove this after service keys are decoupled
|
||||
from data.database import ServiceKeyApprovalType
|
||||
from initdb import setup_database_for_testing, finished_database_for_testing
|
||||
from util.morecollections import AttrDict
|
||||
from util.security.registry_jwt import ANONYMOUS_SUB, build_context_and_subject
|
||||
|
||||
TEST_AUDIENCE = app.config['SERVER_HOSTNAME']
|
||||
TEST_USER = AttrDict({'username': 'joeuser'})
|
||||
TEST_USER = AttrDict({'username': 'joeuser', 'uuid': 'foobar', 'enabled': True})
|
||||
MAX_SIGNED_S = 3660
|
||||
TOKEN_VALIDITY_LIFETIME_S = 60 * 60 # 1 hour
|
||||
ANONYMOUS_SUB = '(anonymous)'
|
||||
|
@ -27,7 +30,8 @@ def _access(typ='repository', name='somens/somerepo', actions=None):
|
|||
return [{
|
||||
'type': typ,
|
||||
'name': name,
|
||||
'actions': actions,}]
|
||||
'actions': actions,
|
||||
}]
|
||||
|
||||
|
||||
def _delete_field(token_data, field_name):
|
||||
|
@ -38,7 +42,7 @@ def _delete_field(token_data, field_name):
|
|||
def _token_data(access=[], context=None, audience=TEST_AUDIENCE, user=TEST_USER, iat=None,
|
||||
exp=None, nbf=None, iss=None, subject=None):
|
||||
if subject is None:
|
||||
_, subject = build_context_and_subject(user=user)
|
||||
_, subject = build_context_and_subject(ValidatedAuthContext(user=user))
|
||||
return {
|
||||
'iss': iss or instance_keys.service_name,
|
||||
'aud': audience,
|
||||
|
@ -47,7 +51,8 @@ def _token_data(access=[], context=None, audience=TEST_AUDIENCE, user=TEST_USER,
|
|||
'exp': exp if exp is not None else int(time.time() + TOKEN_VALIDITY_LIFETIME_S),
|
||||
'sub': subject,
|
||||
'access': access,
|
||||
'context': context,}
|
||||
'context': context,
|
||||
}
|
||||
|
||||
|
||||
def _token(token_data, key_id=None, private_key=None, skip_header=False, alg=None):
|
||||
|
@ -107,30 +112,31 @@ def test_token_with_access(access, initialized_db):
|
|||
|
||||
|
||||
@pytest.mark.parametrize('token', [
|
||||
(_token(
|
||||
pytest.param(_token(
|
||||
_token_data(access=[{
|
||||
'toipe': 'repository',
|
||||
'namesies': 'somens/somerepo',
|
||||
'akshuns': ['pull', 'push', '*']}]))),
|
||||
(_token(_token_data(audience='someotherapp'))),
|
||||
(_token(_delete_field(_token_data(), 'aud'))),
|
||||
(_token(_token_data(nbf=int(time.time()) + 600))),
|
||||
(_token(_delete_field(_token_data(), 'nbf'))),
|
||||
(_token(_token_data(iat=int(time.time()) + 600))),
|
||||
(_token(_delete_field(_token_data(), 'iat'))),
|
||||
(_token(_token_data(exp=int(time.time()) + MAX_SIGNED_S * 2))),
|
||||
(_token(_token_data(exp=int(time.time()) - 60))),
|
||||
(_token(_delete_field(_token_data(), 'exp'))),
|
||||
(_token(_delete_field(_token_data(), 'sub'))),
|
||||
(_token(_token_data(iss='badissuer'))),
|
||||
(_token(_delete_field(_token_data(), 'iss'))),
|
||||
(_token(_token_data(), skip_header=True)),
|
||||
(_token(_token_data(), key_id='someunknownkey')),
|
||||
(_token(_token_data(), key_id='kid7')),
|
||||
(_token(_token_data(), alg='none', private_key=None)),
|
||||
('some random token'),
|
||||
('Bearer: sometokenhere'),
|
||||
('\nBearer: dGVzdA'),])
|
||||
'akshuns': ['pull', 'push', '*']}])), id='bad access'),
|
||||
pytest.param(_token(_token_data(audience='someotherapp')), id='bad aud'),
|
||||
pytest.param(_token(_delete_field(_token_data(), 'aud')), id='no aud'),
|
||||
pytest.param(_token(_token_data(nbf=int(time.time()) + 600)), id='future nbf'),
|
||||
pytest.param(_token(_delete_field(_token_data(), 'nbf')), id='no nbf'),
|
||||
pytest.param(_token(_token_data(iat=int(time.time()) + 600)), id='future iat'),
|
||||
pytest.param(_token(_delete_field(_token_data(), 'iat')), id='no iat'),
|
||||
pytest.param(_token(_token_data(exp=int(time.time()) + MAX_SIGNED_S * 2)), id='exp too long'),
|
||||
pytest.param(_token(_token_data(exp=int(time.time()) - 60)), id='expired'),
|
||||
pytest.param(_token(_delete_field(_token_data(), 'exp')), id='no exp'),
|
||||
pytest.param(_token(_delete_field(_token_data(), 'sub')), id='no sub'),
|
||||
pytest.param(_token(_token_data(iss='badissuer')), id='bad iss'),
|
||||
pytest.param(_token(_delete_field(_token_data(), 'iss')), id='no iss'),
|
||||
pytest.param(_token(_token_data(), skip_header=True), id='no header'),
|
||||
pytest.param(_token(_token_data(), key_id='someunknownkey'), id='bad key'),
|
||||
pytest.param(_token(_token_data(), key_id='kid7'), id='bad key :: kid7'),
|
||||
pytest.param(_token(_token_data(), alg='none', private_key=None), id='none alg'),
|
||||
pytest.param('some random token', id='random token'),
|
||||
pytest.param('Bearer: sometokenhere', id='extra bearer'),
|
||||
pytest.param('\nBearer: dGVzdA', id='leading newline'),
|
||||
])
|
||||
def test_invalid_jwt(token, initialized_db):
|
||||
with pytest.raises(InvalidJWTException):
|
||||
_parse_token(token)
|
||||
|
@ -151,7 +157,7 @@ def test_mixing_keys_e2e(initialized_db):
|
|||
|
||||
# Approve the key and try again.
|
||||
admin_user = model.user.get_user('devtable')
|
||||
model.service_keys.approve_service_key(key.kid, admin_user, ServiceKeyApprovalType.SUPERUSER)
|
||||
model.service_keys.approve_service_key(key.kid, ServiceKeyApprovalType.SUPERUSER, approver=admin_user)
|
||||
|
||||
valid_token = _token(token_data, key_id='newkey', private_key=private_key)
|
||||
|
||||
|
@ -186,3 +192,12 @@ def test_mixing_keys_e2e(initialized_db):
|
|||
# Ensure the key no longer works.
|
||||
with pytest.raises(InvalidJWTException):
|
||||
_parse_token(deleted_key_token)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('token', [
|
||||
u'someunicodetoken✡',
|
||||
u'\xc9\xad\xbd',
|
||||
])
|
||||
def test_unicode_token(token):
|
||||
with pytest.raises(InvalidJWTException):
|
||||
_parse_token(token)
|
||||
|
|
|
@ -5,16 +5,28 @@ from auth.validateresult import AuthKind, ValidateResult
|
|||
|
||||
|
||||
@pytest.mark.parametrize('header, expected_result', [
|
||||
('', ValidateResult(AuthKind.signed_grant, missing=True)),
|
||||
('somerandomtoken', ValidateResult(AuthKind.signed_grant, missing=True)),
|
||||
('token somerandomtoken', ValidateResult(AuthKind.signed_grant, missing=True)),
|
||||
('token ' + SIGNATURE_PREFIX + 'foo',
|
||||
ValidateResult(AuthKind.signed_grant, error_message='Signed grant could not be validated')),
|
||||
('token ' + generate_signed_token({
|
||||
'a': 'b'}, {'c': 'd'}), ValidateResult(AuthKind.signed_grant, signed_data={
|
||||
'grants': {
|
||||
'a': 'b'},
|
||||
'user_context': {
|
||||
'c': 'd'}})),])
|
||||
pytest.param('', ValidateResult(AuthKind.signed_grant, missing=True), id='Missing'),
|
||||
pytest.param('somerandomtoken', ValidateResult(AuthKind.signed_grant, missing=True),
|
||||
id='Invalid header'),
|
||||
pytest.param('token somerandomtoken', ValidateResult(AuthKind.signed_grant, missing=True),
|
||||
id='Random Token'),
|
||||
pytest.param('token ' + SIGNATURE_PREFIX + 'foo',
|
||||
ValidateResult(AuthKind.signed_grant,
|
||||
error_message='Signed grant could not be validated'),
|
||||
id='Invalid token'),
|
||||
])
|
||||
def test_token(header, expected_result):
|
||||
assert validate_signed_grant(header) == expected_result
|
||||
|
||||
|
||||
def test_valid_grant():
|
||||
header = 'token ' + generate_signed_token({'a': 'b'}, {'c': 'd'})
|
||||
expected = ValidateResult(AuthKind.signed_grant, signed_data={
|
||||
'grants': {
|
||||
'a': 'b',
|
||||
},
|
||||
'user_context': {
|
||||
'c': 'd'
|
||||
},
|
||||
})
|
||||
assert validate_signed_grant(header) == expected
|
||||
|
|
|
@ -1,45 +1,41 @@
|
|||
import pytest
|
||||
|
||||
from auth.auth_context import (
|
||||
get_authenticated_user, get_grant_context, get_validated_token, get_validated_oauth_token)
|
||||
from auth.auth_context import get_authenticated_context
|
||||
from auth.validateresult import AuthKind, ValidateResult
|
||||
from data import model
|
||||
from data.database import AppSpecificAuthToken
|
||||
from test.fixtures import *
|
||||
|
||||
|
||||
def get_user():
|
||||
return model.user.get_user('devtable')
|
||||
|
||||
def get_app_specific_token():
|
||||
return AppSpecificAuthToken.get()
|
||||
|
||||
def get_robot():
|
||||
robot, _ = model.user.create_robot('somebot', get_user())
|
||||
return robot
|
||||
|
||||
|
||||
def get_token():
|
||||
return model.token.create_delegate_token('devtable', 'simple', 'sometoken')
|
||||
|
||||
|
||||
def get_oauthtoken():
|
||||
user = model.user.get_user('devtable')
|
||||
return list(model.oauth.list_access_tokens_for_user(user))[0]
|
||||
|
||||
|
||||
def get_signeddata():
|
||||
return {'grants': {'a': 'b'}, 'user_context': {'c': 'd'}}
|
||||
|
||||
|
||||
@pytest.mark.parametrize('get_entity,entity_kind', [
|
||||
(get_user, 'user'),
|
||||
(get_robot, 'robot'),
|
||||
(get_token, 'token'),
|
||||
(get_oauthtoken, 'oauthtoken'),
|
||||
(get_signeddata, 'signed_data'),])
|
||||
(get_signeddata, 'signed_data'),
|
||||
(get_app_specific_token, 'appspecifictoken'),
|
||||
])
|
||||
def test_apply_context(get_entity, entity_kind, app):
|
||||
assert get_authenticated_user() is None
|
||||
assert get_validated_token() is None
|
||||
assert get_validated_oauth_token() is None
|
||||
assert get_grant_context() is None
|
||||
assert get_authenticated_context() is None
|
||||
|
||||
entity = get_entity()
|
||||
args = {}
|
||||
|
@ -52,16 +48,16 @@ def test_apply_context(get_entity, entity_kind, app):
|
|||
if entity_kind == 'oauthtoken':
|
||||
expected_user = entity.authorized_user
|
||||
|
||||
if entity_kind == 'appspecifictoken':
|
||||
expected_user = entity.user
|
||||
|
||||
expected_token = entity if entity_kind == 'token' else None
|
||||
expected_oauth = entity if entity_kind == 'oauthtoken' else None
|
||||
expected_appspecifictoken = entity if entity_kind == 'appspecifictoken' else None
|
||||
expected_grant = entity if entity_kind == 'signed_data' else None
|
||||
|
||||
fake_grant = {
|
||||
'user': {
|
||||
'c': 'd'},
|
||||
'kind': 'user',}
|
||||
expected_grant = fake_grant if entity_kind == 'signed_data' else None
|
||||
|
||||
assert get_authenticated_user() == expected_user
|
||||
assert get_validated_token() == expected_token
|
||||
assert get_validated_oauth_token() == expected_oauth
|
||||
assert get_grant_context() == expected_grant
|
||||
assert get_authenticated_context().authed_user == expected_user
|
||||
assert get_authenticated_context().token == expected_token
|
||||
assert get_authenticated_context().oauthtoken == expected_oauth
|
||||
assert get_authenticated_context().appspecifictoken == expected_appspecifictoken
|
||||
assert get_authenticated_context().signed_data == expected_grant
|
||||
|
|
|
@ -1,11 +1,5 @@
|
|||
from enum import Enum
|
||||
from flask_principal import Identity, identity_changed
|
||||
|
||||
from app import app
|
||||
from auth.auth_context import (set_authenticated_user, set_validated_token, set_grant_context,
|
||||
set_validated_oauth_token, set_validated_app_specific_token)
|
||||
from auth.scopes import scopes_from_scope_string
|
||||
from auth.permissions import QuayDeferredPermissionUser
|
||||
from auth.auth_context_type import ValidatedAuthContext, ContextEntityKind
|
||||
|
||||
|
||||
class AuthKind(Enum):
|
||||
|
@ -22,94 +16,41 @@ class ValidateResult(object):
|
|||
robot=None, appspecifictoken=None, signed_data=None, error_message=None):
|
||||
self.kind = kind
|
||||
self.missing = missing
|
||||
self.user = user
|
||||
self.robot = robot
|
||||
self.token = token
|
||||
self.oauthtoken = oauthtoken
|
||||
self.appspecifictoken = appspecifictoken
|
||||
self.signed_data = signed_data
|
||||
self.error_message = error_message
|
||||
self.context = ValidatedAuthContext(user=user, token=token, oauthtoken=oauthtoken, robot=robot,
|
||||
appspecifictoken=appspecifictoken, signed_data=signed_data)
|
||||
|
||||
def tuple(self):
|
||||
return (self.kind, self.missing, self.user, self.token, self.oauthtoken, self.robot,
|
||||
self.appspecifictoken, self.signed_data, self.error_message)
|
||||
return (self.kind, self.missing, self.error_message, self.context.tuple())
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.tuple() == other.tuple()
|
||||
|
||||
def apply_to_context(self):
|
||||
""" Applies this auth result to the auth context and Flask-Principal. """
|
||||
# Set the various pieces of the auth context.
|
||||
if self.oauthtoken:
|
||||
set_authenticated_user(self.authed_user)
|
||||
set_validated_oauth_token(self.oauthtoken)
|
||||
elif self.appspecifictoken:
|
||||
set_authenticated_user(self.authed_user)
|
||||
set_validated_app_specific_token(self.appspecifictoken)
|
||||
elif self.authed_user:
|
||||
set_authenticated_user(self.authed_user)
|
||||
elif self.token:
|
||||
set_validated_token(self.token)
|
||||
elif self.signed_data:
|
||||
if self.signed_data['user_context']:
|
||||
set_grant_context({
|
||||
'user': self.signed_data['user_context'],
|
||||
'kind': 'user',
|
||||
})
|
||||
|
||||
# Set the identity for Flask-Principal.
|
||||
if self.identity:
|
||||
identity_changed.send(app, identity=self.identity)
|
||||
self.context.apply_to_request_context()
|
||||
|
||||
def with_kind(self, kind):
|
||||
""" Returns a copy of this result, but with the kind replaced. """
|
||||
return ValidateResult(kind, self.missing, self.user, self.token, self.oauthtoken, self.robot,
|
||||
self.appspecifictoken, self.signed_data, self.error_message)
|
||||
result = ValidateResult(kind, missing=self.missing, error_message=self.error_message)
|
||||
result.context = self.context
|
||||
return result
|
||||
|
||||
def __repr__(self):
|
||||
return 'ValidateResult: %s (missing: %s, error: %s)' % (self.kind, self.missing,
|
||||
self.error_message)
|
||||
|
||||
@property
|
||||
def authed_user(self):
|
||||
""" Returns the authenticated user, whether directly, or via an OAuth token. """
|
||||
if not self.auth_valid:
|
||||
return None
|
||||
|
||||
if self.oauthtoken:
|
||||
return self.oauthtoken.authorized_user
|
||||
|
||||
if self.appspecifictoken:
|
||||
return self.appspecifictoken.user
|
||||
|
||||
return self.user if self.user else self.robot
|
||||
return self.context.authed_user
|
||||
|
||||
@property
|
||||
def identity(self):
|
||||
""" Returns the identity for the auth result. """
|
||||
if not self.auth_valid:
|
||||
return None
|
||||
|
||||
if self.oauthtoken:
|
||||
scope_set = scopes_from_scope_string(self.oauthtoken.scope)
|
||||
return QuayDeferredPermissionUser.for_user(self.oauthtoken.authorized_user, scope_set)
|
||||
|
||||
if self.authed_user:
|
||||
return QuayDeferredPermissionUser.for_user(self.authed_user)
|
||||
|
||||
if self.token:
|
||||
return Identity(self.token.code, 'token')
|
||||
|
||||
if self.signed_data:
|
||||
identity = Identity(None, 'signed_grant')
|
||||
identity.provides.update(self.signed_data['grants'])
|
||||
return identity
|
||||
|
||||
return None
|
||||
|
||||
@property
|
||||
def has_user(self):
|
||||
def has_nonrobot_user(self):
|
||||
""" Returns whether a user (not a robot) was authenticated successfully. """
|
||||
return bool(self.user)
|
||||
return self.context.has_nonrobot_user
|
||||
|
||||
@property
|
||||
def auth_valid(self):
|
||||
""" Returns whether authentication successfully occurred. """
|
||||
return (self.user or self.token or self.oauthtoken or self.appspecifictoken or self.robot or
|
||||
self.signed_data)
|
||||
return self.context.entity_kind != ContextEntityKind.anonymous
|
||||
|
|
File diff suppressed because it is too large
Load Diff
76
boot.py
76
boot.py
|
@ -4,7 +4,7 @@ from datetime import datetime, timedelta
|
|||
from urlparse import urlunparse
|
||||
|
||||
from jinja2 import Template
|
||||
from cachetools import lru_cache
|
||||
from cachetools.func import lru_cache
|
||||
|
||||
import logging
|
||||
import release
|
||||
|
@ -45,40 +45,55 @@ def get_audience():
|
|||
return urlunparse((scheme, hostname + ':' + port, '', '', '', ''))
|
||||
|
||||
|
||||
def _verify_service_key():
|
||||
try:
|
||||
with open(app.config['INSTANCE_SERVICE_KEY_KID_LOCATION']) as f:
|
||||
quay_key_id = f.read()
|
||||
|
||||
try:
|
||||
get_service_key(quay_key_id, approved_only=False)
|
||||
assert os.path.exists(app.config['INSTANCE_SERVICE_KEY_LOCATION'])
|
||||
return quay_key_id
|
||||
except ServiceKeyDoesNotExist:
|
||||
logger.exception('Could not find non-expired existing service key %s; creating a new one',
|
||||
quay_key_id)
|
||||
return None
|
||||
|
||||
# Found a valid service key, so exiting.
|
||||
except IOError:
|
||||
logger.exception('Could not load existing service key; creating a new one')
|
||||
return None
|
||||
|
||||
|
||||
def setup_jwt_proxy():
|
||||
"""
|
||||
Creates a service key for quay to use in the jwtproxy and generates the JWT proxy configuration.
|
||||
"""
|
||||
if os.path.exists(os.path.join(CONF_DIR, 'jwtproxy_conf.yaml')):
|
||||
# Proxy is already setup. Make sure the service key is still valid.
|
||||
try:
|
||||
with open(app.config['INSTANCE_SERVICE_KEY_KID_LOCATION']) as f:
|
||||
quay_key_id = f.read()
|
||||
quay_key_id = _verify_service_key()
|
||||
if quay_key_id is not None:
|
||||
return
|
||||
|
||||
try:
|
||||
get_service_key(quay_key_id, approved_only=False)
|
||||
return
|
||||
except ServiceKeyDoesNotExist:
|
||||
logger.exception('Could not find non-expired existing service key %s; creating a new one',
|
||||
quay_key_id)
|
||||
# Ensure we have an existing key if in read-only mode.
|
||||
if app.config.get('REGISTRY_STATE', 'normal') == 'readonly':
|
||||
quay_key_id = _verify_service_key()
|
||||
if quay_key_id is None:
|
||||
raise Exception('No valid service key found for read-only registry.')
|
||||
else:
|
||||
# Generate the key for this Quay instance to use.
|
||||
minutes_until_expiration = app.config.get('INSTANCE_SERVICE_KEY_EXPIRATION', 120)
|
||||
expiration = datetime.now() + timedelta(minutes=minutes_until_expiration)
|
||||
quay_key, quay_key_id = generate_key(app.config['INSTANCE_SERVICE_KEY_SERVICE'],
|
||||
get_audience(), expiration_date=expiration)
|
||||
|
||||
# Found a valid service key, so exiting.
|
||||
except IOError:
|
||||
logger.exception('Could not load existing service key; creating a new one')
|
||||
with open(app.config['INSTANCE_SERVICE_KEY_KID_LOCATION'], mode='w') as f:
|
||||
f.truncate(0)
|
||||
f.write(quay_key_id)
|
||||
|
||||
# Generate the key for this Quay instance to use.
|
||||
minutes_until_expiration = app.config.get('INSTANCE_SERVICE_KEY_EXPIRATION', 120)
|
||||
expiration = datetime.now() + timedelta(minutes=minutes_until_expiration)
|
||||
quay_key, quay_key_id = generate_key(app.config['INSTANCE_SERVICE_KEY_SERVICE'],
|
||||
get_audience(), expiration_date=expiration)
|
||||
|
||||
with open(app.config['INSTANCE_SERVICE_KEY_KID_LOCATION'], mode='w') as f:
|
||||
f.truncate(0)
|
||||
f.write(quay_key_id)
|
||||
|
||||
with open(app.config['INSTANCE_SERVICE_KEY_LOCATION'], mode='w') as f:
|
||||
f.truncate(0)
|
||||
f.write(quay_key.exportKey())
|
||||
with open(app.config['INSTANCE_SERVICE_KEY_LOCATION'], mode='w') as f:
|
||||
f.truncate(0)
|
||||
f.write(quay_key.exportKey())
|
||||
|
||||
# Generate the JWT proxy configuration.
|
||||
audience = get_audience()
|
||||
|
@ -93,6 +108,7 @@ def setup_jwt_proxy():
|
|||
registry=registry,
|
||||
key_id=quay_key_id,
|
||||
security_issuer=security_issuer,
|
||||
service_key_location=app.config['INSTANCE_SERVICE_KEY_LOCATION'],
|
||||
)
|
||||
|
||||
with open(os.path.join(CONF_DIR, 'jwtproxy_conf.yaml'), 'w') as f:
|
||||
|
@ -100,9 +116,11 @@ def setup_jwt_proxy():
|
|||
|
||||
|
||||
def main():
|
||||
if app.config.get('SETUP_COMPLETE', False):
|
||||
sync_database_with_config(app.config)
|
||||
setup_jwt_proxy()
|
||||
if not app.config.get('SETUP_COMPLETE', False):
|
||||
raise Exception('Your configuration bundle is either not mounted or setup has not been completed')
|
||||
|
||||
sync_database_with_config(app.config)
|
||||
setup_jwt_proxy()
|
||||
|
||||
# Record deploy
|
||||
if release.REGION and release.GIT_HEAD:
|
||||
|
|
|
@ -1,7 +1,17 @@
|
|||
from concurrent.futures import ThreadPoolExecutor
|
||||
from functools import partial
|
||||
|
||||
from trollius import get_event_loop, coroutine
|
||||
|
||||
|
||||
def wrap_with_threadpool(obj, worker_threads=1):
|
||||
"""
|
||||
Wraps a class in an async executor so that it can be safely used in an event loop like trollius.
|
||||
"""
|
||||
async_executor = ThreadPoolExecutor(worker_threads)
|
||||
return AsyncWrapper(obj, executor=async_executor), async_executor
|
||||
|
||||
|
||||
class AsyncWrapper(object):
|
||||
""" Wrapper class which will transform a syncronous library to one that can be used with
|
||||
trollius coroutines.
|
||||
|
|
|
@ -37,6 +37,12 @@ def run_build_manager():
|
|||
time.sleep(1000)
|
||||
return
|
||||
|
||||
if app.config.get('REGISTRY_STATE', 'normal') == 'readonly':
|
||||
logger.debug('Building is disabled while in read-only mode.')
|
||||
while True:
|
||||
time.sleep(1000)
|
||||
return
|
||||
|
||||
build_manager_config = app.config.get('BUILD_MANAGER')
|
||||
if build_manager_config is None:
|
||||
return
|
||||
|
|
|
@ -4,11 +4,11 @@ import time
|
|||
import logging
|
||||
import json
|
||||
import trollius
|
||||
import re
|
||||
|
||||
from autobahn.wamp.exception import ApplicationError
|
||||
from trollius import From, Return
|
||||
|
||||
from active_migration import ActiveDataMigration, ERTMigrationFlags
|
||||
from buildman.server import BuildJobResult
|
||||
from buildman.component.basecomponent import BaseComponent
|
||||
from buildman.component.buildparse import extract_current_step
|
||||
|
@ -17,9 +17,9 @@ from buildman.jobutil.buildstatus import StatusHandler
|
|||
from buildman.jobutil.workererror import WorkerError
|
||||
|
||||
from app import app
|
||||
from data import model
|
||||
from data.database import BUILD_PHASE, UseThenDisconnect
|
||||
from data.model import InvalidRepositoryBuildException
|
||||
from data.registry_model import registry_model
|
||||
from util import slash_join
|
||||
|
||||
HEARTBEAT_DELTA = datetime.timedelta(seconds=60)
|
||||
|
@ -29,6 +29,9 @@ INITIAL_TIMEOUT = 25
|
|||
|
||||
SUPPORTED_WORKER_VERSIONS = ['0.3']
|
||||
|
||||
# Label which marks a manifest with its source build ID.
|
||||
INTERNAL_LABEL_BUILD_UUID = 'quay.build.uuid'
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class ComponentStatus(object):
|
||||
|
@ -119,25 +122,22 @@ class BuildComponent(BaseComponent):
|
|||
# defaults to empty string to avoid requiring a pointer on the builder.
|
||||
# sub_directory: The location within the build package of the Dockerfile and the build context.
|
||||
# repository: The repository for which this build is occurring.
|
||||
# registry: The registry for which this build is occuring (e.g. 'quay.io', 'staging.quay.io').
|
||||
# registry: The registry for which this build is occuring (e.g. 'quay.io').
|
||||
# pull_token: The token to use when pulling the cache for building.
|
||||
# push_token: The token to use to push the built image.
|
||||
# tag_names: The name(s) of the tag(s) for the newly built image.
|
||||
# base_image: The image name and credentials to use to conduct the base image pull.
|
||||
# username: The username for pulling the base image (if any).
|
||||
# password: The password for pulling the base image (if any).
|
||||
|
||||
# TODO: Charlie Tuesday, March 28, 2017 come back and clean up build_subdir.
|
||||
context, dockerfile_path = self.extract_dockerfile_args(build_config)
|
||||
|
||||
build_arguments = {
|
||||
'build_package': build_job.get_build_package_url(self.user_files),
|
||||
'context': context,
|
||||
'dockerfile_path': dockerfile_path,
|
||||
'repository': repository_name,
|
||||
'registry': self.registry_hostname,
|
||||
'pull_token': build_job.repo_build.access_token.code,
|
||||
'push_token': build_job.repo_build.access_token.code,
|
||||
'pull_token': build_job.repo_build.access_token.get_code(),
|
||||
'push_token': build_job.repo_build.access_token.get_code(),
|
||||
'tag_names': build_config.get('docker_tags', ['latest']),
|
||||
'base_image': base_image_information,
|
||||
}
|
||||
|
@ -147,11 +147,23 @@ class BuildComponent(BaseComponent):
|
|||
# url: url used to clone the git repository
|
||||
# sha: the sha1 identifier of the commit to check out
|
||||
# private_key: the key used to get read access to the git repository
|
||||
if build_job.repo_build.trigger.private_key is not None:
|
||||
|
||||
# TODO(remove-unenc): Remove legacy field.
|
||||
private_key = None
|
||||
if build_job.repo_build.trigger is not None and \
|
||||
build_job.repo_build.trigger.secure_private_key is not None:
|
||||
private_key = build_job.repo_build.trigger.secure_private_key.decrypt()
|
||||
|
||||
if ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS) and \
|
||||
private_key is None and \
|
||||
build_job.repo_build.trigger is not None:
|
||||
private_key = build_job.repo_build.trigger.private_key
|
||||
|
||||
if private_key is not None:
|
||||
build_arguments['git'] = {
|
||||
'url': build_config['trigger_metadata'].get('git_url', ''),
|
||||
'sha': BuildComponent._commit_sha(build_config),
|
||||
'private_key': build_job.repo_build.trigger.private_key,
|
||||
'private_key': private_key or '',
|
||||
}
|
||||
|
||||
# If the build args have no buildpack, mark it as a failure before sending
|
||||
|
@ -340,7 +352,7 @@ class BuildComponent(BaseComponent):
|
|||
kwargs = {}
|
||||
|
||||
# Note: If we are hitting an older builder that didn't return ANY map data, then the result
|
||||
# value will be a bool instead of a proper CallResult object (because autobahn sucks).
|
||||
# value will be a bool instead of a proper CallResult object.
|
||||
# Therefore: we have a try-except guard here to ensure we don't hit this pitfall.
|
||||
try:
|
||||
kwargs = result_value.kwresults
|
||||
|
@ -357,18 +369,18 @@ class BuildComponent(BaseComponent):
|
|||
|
||||
# Label the pushed manifests with the build metadata.
|
||||
manifest_digests = kwargs.get('digests') or []
|
||||
for digest in manifest_digests:
|
||||
with UseThenDisconnect(app.config):
|
||||
try:
|
||||
manifest = model.tag.load_manifest_by_digest(self._current_job.namespace,
|
||||
self._current_job.repo_name, digest)
|
||||
model.label.create_manifest_label(manifest, model.label.INTERNAL_LABEL_BUILD_UUID,
|
||||
build_id, 'internal', 'text/plain')
|
||||
except model.InvalidManifestException:
|
||||
logger.debug('Could not find built manifest with digest %s under repo %s/%s for build %s',
|
||||
digest, self._current_job.namespace, self._current_job.repo_name,
|
||||
build_id)
|
||||
continue
|
||||
repository = registry_model.lookup_repository(self._current_job.namespace,
|
||||
self._current_job.repo_name)
|
||||
if repository is not None:
|
||||
for digest in manifest_digests:
|
||||
with UseThenDisconnect(app.config):
|
||||
manifest = registry_model.lookup_manifest_by_digest(repository, digest,
|
||||
require_available=True)
|
||||
if manifest is None:
|
||||
continue
|
||||
|
||||
registry_model.create_manifest_label(manifest, INTERNAL_LABEL_BUILD_UUID,
|
||||
build_id, 'internal', 'text/plain')
|
||||
|
||||
# Send the notification that the build has completed successfully.
|
||||
self._current_job.send_notification('build_success',
|
||||
|
|
|
@ -2,11 +2,12 @@ import json
|
|||
import logging
|
||||
|
||||
from app import app
|
||||
from cachetools import lru_cache
|
||||
from cachetools.func import lru_cache
|
||||
from notifications import spawn_notification
|
||||
from data import model
|
||||
from data.registry_model import registry_model
|
||||
from data.registry_model.datatypes import RepositoryReference
|
||||
from data.database import UseThenDisconnect
|
||||
from util.imagetree import ImageTree
|
||||
from util.morecollections import AttrDict
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -27,7 +28,7 @@ class BuildJob(object):
|
|||
self.build_notifier = BuildJobNotifier(self.build_uuid)
|
||||
except ValueError:
|
||||
raise BuildJobLoadException(
|
||||
'Could not parse build queue item config with ID %s' % self.job_details['build_uuid']
|
||||
'Could not parse build queue item config with ID %s' % self.job_details['build_uuid']
|
||||
)
|
||||
|
||||
@property
|
||||
|
@ -95,70 +96,24 @@ class BuildJob(object):
|
|||
|
||||
def determine_cached_tag(self, base_image_id=None, cache_comments=None):
|
||||
""" Returns the tag to pull to prime the cache or None if none. """
|
||||
cached_tag = None
|
||||
if base_image_id and cache_comments:
|
||||
cached_tag = self._determine_cached_tag_by_comments(base_image_id, cache_comments)
|
||||
|
||||
if not cached_tag:
|
||||
cached_tag = self._determine_cached_tag_by_tag()
|
||||
|
||||
cached_tag = self._determine_cached_tag_by_tag()
|
||||
logger.debug('Determined cached tag %s for %s: %s', cached_tag, base_image_id, cache_comments)
|
||||
|
||||
return cached_tag
|
||||
|
||||
def _determine_cached_tag_by_comments(self, base_image_id, cache_commands):
|
||||
""" Determines the tag to use for priming the cache for this build job, by matching commands
|
||||
starting at the given base_image_id. This mimics the Docker cache checking, so it should,
|
||||
in theory, provide "perfect" caching.
|
||||
"""
|
||||
with UseThenDisconnect(app.config):
|
||||
# Lookup the base image in the repository. If it doesn't exist, nothing more to do.
|
||||
repo_build = self.repo_build
|
||||
repo_namespace = repo_build.repository.namespace_user.username
|
||||
repo_name = repo_build.repository.name
|
||||
|
||||
base_image = model.image.get_image(repo_build.repository, base_image_id)
|
||||
if base_image is None:
|
||||
return None
|
||||
|
||||
# Build an in-memory tree of the full heirarchy of images in the repository.
|
||||
all_images = model.image.get_repository_images_without_placements(repo_build.repository,
|
||||
with_ancestor=base_image)
|
||||
|
||||
all_tags = model.tag.list_repository_tags(repo_namespace, repo_name)
|
||||
tree = ImageTree(all_images, all_tags, base_filter=base_image.id)
|
||||
|
||||
# Find a path in the tree, starting at the base image, that matches the cache comments
|
||||
# or some subset thereof.
|
||||
def checker(step, image):
|
||||
if step >= len(cache_commands):
|
||||
return False
|
||||
|
||||
full_command = '["/bin/sh", "-c", "%s"]' % cache_commands[step]
|
||||
logger.debug('Checking step #%s: %s, %s == %s', step, image.id, image.command, full_command)
|
||||
|
||||
return image.command == full_command
|
||||
|
||||
path = tree.find_longest_path(base_image.id, checker)
|
||||
if not path:
|
||||
return None
|
||||
|
||||
# Find any tag associated with the last image in the path.
|
||||
return tree.tag_containing_image(path[-1])
|
||||
|
||||
|
||||
def _determine_cached_tag_by_tag(self):
|
||||
""" Determines the cached tag by looking for one of the tags being built, and seeing if it
|
||||
exists in the repository. This is a fallback for when no comment information is available.
|
||||
"""
|
||||
with UseThenDisconnect(app.config):
|
||||
tags = self.build_config.get('docker_tags', ['latest'])
|
||||
repository = self.repo_build.repository
|
||||
existing_tags = model.tag.list_repository_tags(repository.namespace_user.username,
|
||||
repository.name)
|
||||
cached_tags = set(tags) & set([tag.name for tag in existing_tags])
|
||||
if cached_tags:
|
||||
return list(cached_tags)[0]
|
||||
repository = RepositoryReference.for_repo_obj(self.repo_build.repository)
|
||||
matching_tag = registry_model.find_matching_tag(repository, tags)
|
||||
if matching_tag is not None:
|
||||
return matching_tag.name
|
||||
|
||||
most_recent_tag = registry_model.get_most_recent_tag(repository)
|
||||
if most_recent_tag is not None:
|
||||
return most_recent_tag.name
|
||||
|
||||
return None
|
||||
|
||||
|
@ -193,12 +148,18 @@ class BuildJobNotifier(object):
|
|||
def send_notification(self, kind, error_message=None, image_id=None, manifest_digests=None):
|
||||
with UseThenDisconnect(app.config):
|
||||
tags = self.build_config.get('docker_tags', ['latest'])
|
||||
trigger = self.repo_build.trigger
|
||||
if trigger is not None and trigger.id is not None:
|
||||
trigger_kind = trigger.service.name
|
||||
else:
|
||||
trigger_kind = None
|
||||
|
||||
event_data = {
|
||||
'build_id': self.repo_build.uuid,
|
||||
'build_name': self.repo_build.display_name,
|
||||
'docker_tags': tags,
|
||||
'trigger_id': self.repo_build.trigger.uuid,
|
||||
'trigger_kind': self.repo_build.trigger.service.name,
|
||||
'trigger_id': trigger.uuid if trigger is not None else None,
|
||||
'trigger_kind': trigger_kind,
|
||||
'trigger_metadata': self.build_config.get('trigger_metadata', {})
|
||||
}
|
||||
|
||||
|
@ -211,7 +172,7 @@ class BuildJobNotifier(object):
|
|||
if error_message is not None:
|
||||
event_data['error_message'] = error_message
|
||||
|
||||
# TODO(jzelinskie): remove when more endpoints have been converted to using
|
||||
# TODO: remove when more endpoints have been converted to using
|
||||
# interfaces
|
||||
repo = AttrDict({
|
||||
'namespace_name': self.repo_build.repository.namespace_user.username,
|
||||
|
|
|
@ -1,25 +1,27 @@
|
|||
import logging
|
||||
|
||||
from buildman.manager.etcd_canceller import EtcdCanceller
|
||||
from buildman.manager.orchestrator_canceller import OrchestratorCanceller
|
||||
from buildman.manager.noop_canceller import NoopCanceller
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CANCELLERS = {'ephemeral': EtcdCanceller}
|
||||
CANCELLERS = {'ephemeral': OrchestratorCanceller}
|
||||
|
||||
|
||||
class BuildCanceller(object):
|
||||
""" A class to manage cancelling a build """
|
||||
|
||||
def __init__(self, app=None):
|
||||
build_manager_config = app.config.get('BUILD_MANAGER')
|
||||
if app is None or build_manager_config is None:
|
||||
self.build_manager_config = app.config.get('BUILD_MANAGER')
|
||||
if app is None or self.build_manager_config is None:
|
||||
self.handler = NoopCanceller()
|
||||
return
|
||||
|
||||
canceller = CANCELLERS.get(build_manager_config[0], NoopCanceller)
|
||||
self.handler = canceller(build_manager_config[1])
|
||||
else:
|
||||
self.handler = None
|
||||
|
||||
def try_cancel_build(self, uuid):
|
||||
""" A method to kill a running build """
|
||||
if self.handler is None:
|
||||
canceller = CANCELLERS.get(self.build_manager_config[0], NoopCanceller)
|
||||
self.handler = canceller(self.build_manager_config[1])
|
||||
|
||||
return self.handler.try_cancel_build(uuid)
|
||||
|
|
|
@ -1,56 +1,49 @@
|
|||
import logging
|
||||
import etcd
|
||||
import uuid
|
||||
import calendar
|
||||
import os.path
|
||||
import json
|
||||
import time
|
||||
|
||||
from collections import namedtuple
|
||||
from datetime import datetime, timedelta
|
||||
from trollius import From, coroutine, Return, async
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from urllib3.exceptions import ReadTimeoutError, ProtocolError
|
||||
from six import iteritems
|
||||
|
||||
from trollius import From, coroutine, Return, async, sleep
|
||||
|
||||
from app import metric_queue
|
||||
from buildman.orchestrator import (orchestrator_from_config, KeyEvent,
|
||||
OrchestratorError, OrchestratorConnectionError,
|
||||
ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION)
|
||||
from buildman.manager.basemanager import BaseManager
|
||||
from buildman.manager.executor import PopenExecutor, EC2Executor, KubernetesExecutor
|
||||
from buildman.component.buildcomponent import BuildComponent
|
||||
from buildman.jobutil.buildjob import BuildJob
|
||||
from buildman.asyncutil import AsyncWrapper
|
||||
from buildman.server import BuildJobResult
|
||||
from util import slash_join
|
||||
from util.morecollections import AttrDict
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
ETCD_MAX_WATCH_TIMEOUT = 30
|
||||
ETCD_ATOMIC_OP_TIMEOUT = 10000
|
||||
RETRY_IMMEDIATELY_TIMEOUT = 0
|
||||
NO_WORKER_AVAILABLE_TIMEOUT = 10
|
||||
DEFAULT_EPHEMERAL_API_TIMEOUT = 20
|
||||
DEFAULT_EPHEMERAL_SETUP_TIMEOUT = 500
|
||||
JOB_PREFIX = 'building/'
|
||||
LOCK_PREFIX = 'lock/'
|
||||
REALM_PREFIX = 'realm/'
|
||||
CANCEL_PREFIX = 'cancel/'
|
||||
METRIC_PREFIX = 'metric/'
|
||||
|
||||
CANCELED_LOCK_PREFIX = slash_join(LOCK_PREFIX, 'job-cancelled')
|
||||
EXPIRED_LOCK_PREFIX = slash_join(LOCK_PREFIX, 'job-expired')
|
||||
|
||||
EPHEMERAL_API_TIMEOUT = 20
|
||||
EPHEMERAL_SETUP_TIMEOUT = 500
|
||||
|
||||
RETRY_IMMEDIATELY_SLEEP_DURATION = 0
|
||||
TOO_MANY_WORKERS_SLEEP_DURATION = 10
|
||||
|
||||
class EtcdAction(object):
|
||||
""" Enumeration of the various kinds of etcd actions we can observe via a watch. """
|
||||
GET = 'get'
|
||||
SET = 'set'
|
||||
EXPIRE = 'expire'
|
||||
UPDATE = 'update'
|
||||
DELETE = 'delete'
|
||||
CREATE = 'create'
|
||||
COMPARE_AND_SWAP = 'compareAndSwap'
|
||||
COMPARE_AND_DELETE = 'compareAndDelete'
|
||||
|
||||
BuildInfo = namedtuple('BuildInfo', ['component', 'build_job', 'execution_id', 'executor_name'])
|
||||
|
||||
def _create_async_etcd_client(worker_threads=1, **kwargs):
|
||||
client = etcd.Client(**kwargs)
|
||||
async_executor = ThreadPoolExecutor(worker_threads)
|
||||
return AsyncWrapper(client, executor=async_executor), async_executor
|
||||
|
||||
|
||||
class EphemeralBuilderManager(BaseManager):
|
||||
""" Build manager implementation for the Enterprise Registry. """
|
||||
|
@ -62,24 +55,12 @@ class EphemeralBuilderManager(BaseManager):
|
|||
}
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self._etcd_client_creator = kwargs.pop('etcd_creator', _create_async_etcd_client)
|
||||
|
||||
super(EphemeralBuilderManager, self).__init__(*args, **kwargs)
|
||||
|
||||
self._shutting_down = False
|
||||
|
||||
self._manager_config = None
|
||||
self._async_thread_executor = None
|
||||
self._etcd_client = None
|
||||
|
||||
self._etcd_realm_prefix = None
|
||||
self._etcd_job_prefix = None
|
||||
self._etcd_lock_prefix = None
|
||||
self._etcd_metric_prefix = None
|
||||
self._etcd_cancel_build_prefix = None
|
||||
|
||||
self._ephemeral_api_timeout = DEFAULT_EPHEMERAL_API_TIMEOUT
|
||||
self._ephemeral_setup_timeout = DEFAULT_EPHEMERAL_SETUP_TIMEOUT
|
||||
self._orchestrator = None
|
||||
|
||||
# The registered executors available for running jobs, in order.
|
||||
self._ordered_executors = []
|
||||
|
@ -87,74 +68,14 @@ class EphemeralBuilderManager(BaseManager):
|
|||
# The registered executors, mapped by their unique name.
|
||||
self._executor_name_to_executor = {}
|
||||
|
||||
# Map of etcd keys being watched to the tasks watching them
|
||||
self._watch_tasks = {}
|
||||
|
||||
# Map from builder component to its associated job.
|
||||
self._component_to_job = {}
|
||||
|
||||
# Map from build UUID to a BuildInfo tuple with information about the build.
|
||||
self._build_uuid_to_info = {}
|
||||
|
||||
def _watch_etcd(self, etcd_key, change_coroutine_callback, start_index=None, recursive=True,
|
||||
restarter=None):
|
||||
watch_task_key = (etcd_key, recursive)
|
||||
def callback_wrapper(changed_key_future):
|
||||
new_index = start_index
|
||||
etcd_result = None
|
||||
|
||||
if not changed_key_future.cancelled():
|
||||
try:
|
||||
etcd_result = changed_key_future.result()
|
||||
existing_index = getattr(etcd_result, 'etcd_index', None)
|
||||
new_index = etcd_result.modifiedIndex + 1
|
||||
|
||||
logger.debug('Got watch of key: %s%s at #%s with result: %s', etcd_key,
|
||||
'*' if recursive else '', existing_index, etcd_result)
|
||||
|
||||
except ReadTimeoutError:
|
||||
logger.debug('Read-timeout on etcd watch %s, rescheduling', etcd_key)
|
||||
|
||||
except etcd.EtcdEventIndexCleared:
|
||||
# This happens if etcd2 has moved forward too fast for us to start watching
|
||||
# at the index we retrieved. We therefore start a new watch at HEAD and
|
||||
# (if specified) call the restarter method which should conduct a read and
|
||||
# reset the state of the manager.
|
||||
logger.debug('Etcd moved forward too quickly. Restarting watch cycle.')
|
||||
new_index = None
|
||||
if restarter is not None:
|
||||
async(restarter())
|
||||
|
||||
except (KeyError, etcd.EtcdKeyError):
|
||||
logger.debug('Etcd key already cleared: %s', etcd_key)
|
||||
return
|
||||
|
||||
except etcd.EtcdException as eex:
|
||||
# TODO(jschorr): This is a quick and dirty hack and should be replaced
|
||||
# with a proper exception check.
|
||||
if str(eex.message).find('Read timed out') >= 0:
|
||||
logger.debug('Read-timeout on etcd watch %s, rescheduling', etcd_key)
|
||||
else:
|
||||
logger.exception('Exception on etcd watch: %s', etcd_key)
|
||||
|
||||
except ProtocolError:
|
||||
logger.exception('Exception on etcd watch: %s', etcd_key)
|
||||
|
||||
if watch_task_key not in self._watch_tasks or self._watch_tasks[watch_task_key].done():
|
||||
self._watch_etcd(etcd_key, change_coroutine_callback, start_index=new_index,
|
||||
restarter=restarter)
|
||||
|
||||
if etcd_result:
|
||||
async(change_coroutine_callback(etcd_result))
|
||||
|
||||
if not self._shutting_down:
|
||||
logger.debug('Scheduling watch of key: %s%s at start index %s', etcd_key,
|
||||
'*' if recursive else '', start_index)
|
||||
watch_future = self._etcd_client.watch(etcd_key, recursive=recursive, index=start_index,
|
||||
timeout=ETCD_MAX_WATCH_TIMEOUT)
|
||||
watch_future.add_done_callback(callback_wrapper)
|
||||
|
||||
self._watch_tasks[watch_task_key] = async(watch_future)
|
||||
def overall_setup_time(self):
|
||||
return EPHEMERAL_SETUP_TIMEOUT
|
||||
|
||||
@coroutine
|
||||
def _mark_job_incomplete(self, build_job, build_info):
|
||||
|
@ -163,101 +84,97 @@ class EphemeralBuilderManager(BaseManager):
|
|||
execution_id = build_info.execution_id
|
||||
|
||||
logger.warning('Build executor failed to successfully boot with execution id %s',
|
||||
execution_id)
|
||||
execution_id)
|
||||
|
||||
# Take a lock to ensure that only one manager reports the build as incomplete for this
|
||||
# execution.
|
||||
got_lock = yield From(self._take_etcd_atomic_lock('job-expired', build_job.build_uuid,
|
||||
execution_id))
|
||||
if got_lock:
|
||||
lock_key = slash_join(self._expired_lock_prefix, build_job.build_uuid, execution_id)
|
||||
acquired_lock = yield From(self._orchestrator.lock(lock_key))
|
||||
if acquired_lock:
|
||||
try:
|
||||
# Clean up the bookkeeping for the job.
|
||||
yield From(self._etcd_client.delete(self._etcd_job_key(build_job)))
|
||||
except (KeyError, etcd.EtcdKeyError):
|
||||
yield From(self._orchestrator.delete_key(self._job_key(build_job)))
|
||||
except KeyError:
|
||||
logger.debug('Could not delete job key %s; might have been removed already',
|
||||
build_job.build_uuid)
|
||||
|
||||
logger.error('[BUILD INTERNAL ERROR] Build ID: %s. Exec name: %s. Exec ID: %s',
|
||||
build_job.build_uuid, executor_name, execution_id)
|
||||
build_job.build_uuid, executor_name, execution_id)
|
||||
yield From(self.job_complete_callback(build_job, BuildJobResult.INCOMPLETE, executor_name,
|
||||
update_phase=True))
|
||||
else:
|
||||
logger.debug('Did not get lock for job-expiration for job %s', build_job.build_uuid)
|
||||
|
||||
@coroutine
|
||||
def _handle_job_change(self, etcd_result):
|
||||
""" Handler invoked whenever a job expires or is deleted in etcd. """
|
||||
if etcd_result is None:
|
||||
def _job_callback(self, key_change):
|
||||
"""
|
||||
This is the callback invoked when keys related to jobs are changed.
|
||||
It ignores all events related to the creation of new jobs.
|
||||
Deletes or expirations cause checks to ensure they've been properly marked as completed.
|
||||
|
||||
:param key_change: the event and value produced by a key changing in the orchestrator
|
||||
:type key_change: :class:`KeyChange`
|
||||
"""
|
||||
if key_change.event in (KeyEvent.CREATE, KeyEvent.SET):
|
||||
raise Return()
|
||||
|
||||
if etcd_result.action in (EtcdAction.CREATE, EtcdAction.SET):
|
||||
raise Return()
|
||||
|
||||
elif etcd_result.action in (EtcdAction.DELETE, EtcdAction.EXPIRE):
|
||||
elif key_change.event in (KeyEvent.DELETE, KeyEvent.EXPIRE):
|
||||
# Handle the expiration/deletion.
|
||||
job_metadata = json.loads(etcd_result._prev_node.value)
|
||||
job_metadata = json.loads(key_change.value)
|
||||
build_job = BuildJob(AttrDict(job_metadata['job_queue_item']))
|
||||
logger.debug('Got "%s" of job %s', etcd_result.action, build_job.build_uuid)
|
||||
logger.debug('Got "%s" of job %s', key_change.event, build_job.build_uuid)
|
||||
|
||||
# Get the build info.
|
||||
build_info = self._build_uuid_to_info.get(build_job.build_uuid, None)
|
||||
if build_info is None:
|
||||
logger.debug('No build info for "%s" job %s (%s); probably already deleted by this manager',
|
||||
etcd_result.action, build_job.build_uuid, job_metadata)
|
||||
key_change.event, build_job.build_uuid, job_metadata)
|
||||
raise Return()
|
||||
|
||||
# If the etcd action was not an expiration, then it was already deleted by some manager and
|
||||
# the execution was therefore already shutdown.
|
||||
if etcd_result.action != EtcdAction.EXPIRE:
|
||||
# Build information will no longer be needed; pop it off.
|
||||
if key_change.event != KeyEvent.EXPIRE:
|
||||
# If the etcd action was not an expiration, then it was already deleted by some manager and
|
||||
# the execution was therefore already shutdown. All that's left is to remove the build info.
|
||||
self._build_uuid_to_info.pop(build_job.build_uuid, None)
|
||||
raise Return()
|
||||
|
||||
logger.debug('Got expiration for job %s with metadata: %s', build_job.build_uuid,
|
||||
logger.debug('got expiration for job %s with metadata: %s', build_job.build_uuid,
|
||||
job_metadata)
|
||||
|
||||
executor_name = build_info.executor_name
|
||||
execution_id = build_info.execution_id
|
||||
|
||||
# If we have not yet received a heartbeat, then the node failed to boot in some way. We mark
|
||||
# the job as incomplete here.
|
||||
if not job_metadata.get('had_heartbeat', False):
|
||||
# If we have not yet received a heartbeat, then the node failed to boot in some way.
|
||||
# We mark the job as incomplete here.
|
||||
yield From(self._mark_job_incomplete(build_job, build_info))
|
||||
|
||||
# Finally, we terminate the build execution for the job. We don't do this under a lock as
|
||||
# terminating a node is an atomic operation; better to make sure it is terminated than not.
|
||||
logger.info('Terminating expired build executor for job %s with execution id %s',
|
||||
build_job.build_uuid, execution_id)
|
||||
build_job.build_uuid, build_info.execution_id)
|
||||
yield From(self.kill_builder_executor(build_job.build_uuid))
|
||||
|
||||
else:
|
||||
logger.warning('Unexpected action (%s) on job key: %s', etcd_result.action, etcd_result.key)
|
||||
logger.warning('Unexpected KeyEvent (%s) on job key: %s', key_change.event, key_change.key)
|
||||
|
||||
|
||||
@coroutine
|
||||
def _handle_realm_change(self, etcd_result):
|
||||
if etcd_result is None:
|
||||
raise Return()
|
||||
|
||||
if etcd_result.action == EtcdAction.CREATE:
|
||||
# We must listen on the realm created by ourselves or another worker
|
||||
realm_spec = json.loads(etcd_result.value)
|
||||
def _realm_callback(self, key_change):
|
||||
logger.debug('realm callback for key: %s', key_change.key)
|
||||
if key_change.event == KeyEvent.CREATE:
|
||||
# Listen on the realm created by ourselves or another worker.
|
||||
realm_spec = json.loads(key_change.value)
|
||||
self._register_realm(realm_spec)
|
||||
|
||||
elif etcd_result.action in (EtcdAction.DELETE, EtcdAction.EXPIRE):
|
||||
# We must stop listening for new connections on the specified realm, if we did not get the
|
||||
# connection
|
||||
realm_spec = json.loads(etcd_result._prev_node.value)
|
||||
elif key_change.event in (KeyEvent.DELETE, KeyEvent.EXPIRE):
|
||||
# Stop listening for new connections on the realm, if we did not get the connection.
|
||||
realm_spec = json.loads(key_change.value)
|
||||
realm_id = realm_spec['realm']
|
||||
|
||||
build_job = BuildJob(AttrDict(realm_spec['job_queue_item']))
|
||||
build_uuid = build_job.build_uuid
|
||||
|
||||
logger.debug('Realm key %s for build %s was %s', realm_id, build_uuid, etcd_result.action)
|
||||
logger.debug('Realm key %s for build %s was %s', realm_id, build_uuid, key_change.event)
|
||||
build_info = self._build_uuid_to_info.get(build_uuid, None)
|
||||
if build_info is not None:
|
||||
# Pop the component off. If we find one, then the build has not connected to this manager,
|
||||
# so we can safely unregister its component.
|
||||
# Pop off the component and if we find one, then the build has not connected to this
|
||||
# manager, so we can safely unregister its component.
|
||||
component = self._component_to_job.pop(build_info.component, None)
|
||||
if component is not None:
|
||||
# We were not the manager which the worker connected to, remove the bookkeeping for it
|
||||
|
@ -265,7 +182,7 @@ class EphemeralBuilderManager(BaseManager):
|
|||
self.unregister_component(build_info.component)
|
||||
|
||||
# If the realm has expired, then perform cleanup of the executor.
|
||||
if etcd_result.action == EtcdAction.EXPIRE:
|
||||
if key_change.event == KeyEvent.EXPIRE:
|
||||
execution_id = realm_spec.get('execution_id', None)
|
||||
executor_name = realm_spec.get('executor_name', 'EC2Executor')
|
||||
|
||||
|
@ -280,7 +197,8 @@ class EphemeralBuilderManager(BaseManager):
|
|||
yield From(self.terminate_executor(executor_name, execution_id))
|
||||
|
||||
else:
|
||||
logger.warning('Unexpected action (%s) on realm key: %s', etcd_result.action, etcd_result.key)
|
||||
logger.warning('Unexpected action (%s) on realm key: %s', key_change.event, key_change.key)
|
||||
|
||||
|
||||
def _register_realm(self, realm_spec):
|
||||
logger.debug('Got call to register realm %s with manager', realm_spec['realm'])
|
||||
|
@ -310,23 +228,19 @@ class EphemeralBuilderManager(BaseManager):
|
|||
@coroutine
|
||||
def _register_existing_realms(self):
|
||||
try:
|
||||
all_realms = yield From(self._etcd_client.read(self._etcd_realm_prefix, recursive=True))
|
||||
all_realms = yield From(self._orchestrator.get_prefixed_keys(self._realm_prefix))
|
||||
|
||||
# Register all existing realms found.
|
||||
encountered = set()
|
||||
for realm in all_realms.children:
|
||||
if not realm.dir:
|
||||
component = self._register_realm(json.loads(realm.value))
|
||||
encountered.add(component)
|
||||
encountered = {self._register_realm(json.loads(realm_data))
|
||||
for _realm, realm_data in all_realms}
|
||||
|
||||
# Remove any components not encountered so we can clean up.
|
||||
for component, job in list(self._component_to_job.items()):
|
||||
for component, job in iteritems(self._component_to_job):
|
||||
if not component in encountered:
|
||||
self._component_to_job.pop(component, None)
|
||||
self._build_uuid_to_info.pop(job.build_uuid, None)
|
||||
|
||||
except (KeyError, etcd.EtcdKeyError):
|
||||
# no realms have been registered yet
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def _load_executor(self, executor_kind_name, executor_config):
|
||||
|
@ -342,6 +256,71 @@ class EphemeralBuilderManager(BaseManager):
|
|||
self._ordered_executors.append(executor)
|
||||
self._executor_name_to_executor[executor.name] = executor
|
||||
|
||||
def _config_prefix(self, key):
|
||||
if self._manager_config.get('ORCHESTRATOR') is None:
|
||||
return key
|
||||
|
||||
prefix = self._manager_config.get('ORCHESTRATOR_PREFIX', '')
|
||||
return slash_join(prefix, key).lstrip('/') + '/'
|
||||
|
||||
@property
|
||||
def _job_prefix(self):
|
||||
return self._config_prefix(JOB_PREFIX)
|
||||
|
||||
@property
|
||||
def _realm_prefix(self):
|
||||
return self._config_prefix(REALM_PREFIX)
|
||||
|
||||
@property
|
||||
def _cancel_prefix(self):
|
||||
return self._config_prefix(CANCEL_PREFIX)
|
||||
|
||||
@property
|
||||
def _metric_prefix(self):
|
||||
return self._config_prefix(METRIC_PREFIX)
|
||||
|
||||
@property
|
||||
def _expired_lock_prefix(self):
|
||||
return self._config_prefix(EXPIRED_LOCK_PREFIX)
|
||||
|
||||
@property
|
||||
def _canceled_lock_prefix(self):
|
||||
return self._config_prefix(CANCELED_LOCK_PREFIX)
|
||||
|
||||
def _metric_key(self, realm):
|
||||
"""
|
||||
Create a key which is used to track a job in the Orchestrator.
|
||||
|
||||
:param realm: realm for the build
|
||||
:type realm: str
|
||||
:returns: key used to track jobs
|
||||
:rtype: str
|
||||
"""
|
||||
return slash_join(self._metric_prefix, realm)
|
||||
|
||||
def _job_key(self, build_job):
|
||||
"""
|
||||
Creates a key which is used to track a job in the Orchestrator.
|
||||
|
||||
:param build_job: unique job identifier for a build
|
||||
:type build_job: str
|
||||
:returns: key used to track the job
|
||||
:rtype: str
|
||||
"""
|
||||
return slash_join(self._job_prefix, build_job.job_details['build_uuid'])
|
||||
|
||||
def _realm_key(self, realm):
|
||||
"""
|
||||
Create a key which is used to track an incoming connection on a realm.
|
||||
|
||||
:param realm: realm for the build
|
||||
:type realm: str
|
||||
:returns: key used to track the connection to the realm
|
||||
:rtype: str
|
||||
"""
|
||||
return slash_join(self._realm_prefix, realm)
|
||||
|
||||
|
||||
def initialize(self, manager_config):
|
||||
logger.debug('Calling initialize')
|
||||
self._manager_config = manager_config
|
||||
|
@ -355,90 +334,50 @@ class EphemeralBuilderManager(BaseManager):
|
|||
else:
|
||||
self._load_executor(manager_config.get('EXECUTOR'), manager_config.get('EXECUTOR_CONFIG'))
|
||||
|
||||
etcd_host = self._manager_config.get('ETCD_HOST', '127.0.0.1')
|
||||
etcd_port = self._manager_config.get('ETCD_PORT', 2379)
|
||||
etcd_ca_cert = self._manager_config.get('ETCD_CA_CERT', None)
|
||||
logger.debug('calling orchestrator_from_config')
|
||||
self._orchestrator = orchestrator_from_config(manager_config)
|
||||
|
||||
etcd_auth = self._manager_config.get('ETCD_CERT_AND_KEY', None)
|
||||
if etcd_auth is not None:
|
||||
etcd_auth = tuple(etcd_auth) # Convert YAML list to a tuple
|
||||
|
||||
etcd_protocol = 'http' if etcd_auth is None else 'https'
|
||||
logger.debug('Connecting to etcd on %s:%s', etcd_host, etcd_port)
|
||||
|
||||
worker_threads = self._manager_config.get('ETCD_WORKER_THREADS', 5)
|
||||
(self._etcd_client, self._async_thread_executor) = self._etcd_client_creator(
|
||||
worker_threads,
|
||||
host=etcd_host,
|
||||
port=etcd_port,
|
||||
cert=etcd_auth,
|
||||
ca_cert=etcd_ca_cert,
|
||||
protocol=etcd_protocol,
|
||||
read_timeout=5,
|
||||
)
|
||||
|
||||
self._etcd_job_prefix = self._manager_config.get('ETCD_BUILDER_PREFIX', 'building/')
|
||||
self._watch_etcd(self._etcd_job_prefix, self._handle_job_change)
|
||||
|
||||
self._etcd_realm_prefix = self._manager_config.get('ETCD_REALM_PREFIX', 'realm/')
|
||||
self._watch_etcd(self._etcd_realm_prefix, self._handle_realm_change,
|
||||
restarter=self._register_existing_realms)
|
||||
|
||||
self._etcd_cancel_build_prefix = self._manager_config.get('ETCD_CANCEL_PREFIX', 'cancel/')
|
||||
self._watch_etcd(self._etcd_cancel_build_prefix, self._cancel_build)
|
||||
|
||||
self._etcd_lock_prefix = self._manager_config.get('ETCD_LOCK_PREFIX', 'lock/')
|
||||
self._etcd_metric_prefix = self._manager_config.get('ETCD_METRIC_PREFIX', 'metric/')
|
||||
|
||||
self._ephemeral_api_timeout = self._manager_config.get('API_TIMEOUT',
|
||||
DEFAULT_EPHEMERAL_API_TIMEOUT)
|
||||
|
||||
self._ephemeral_setup_timeout = self._manager_config.get('SETUP_TIMEOUT',
|
||||
DEFAULT_EPHEMERAL_SETUP_TIMEOUT)
|
||||
logger.debug('setting on_key_change callbacks for job, cancel, realm')
|
||||
self._orchestrator.on_key_change(self._job_prefix, self._job_callback)
|
||||
self._orchestrator.on_key_change(self._cancel_prefix, self._cancel_callback)
|
||||
self._orchestrator.on_key_change(self._realm_prefix, self._realm_callback,
|
||||
restarter=self._register_existing_realms)
|
||||
|
||||
# Load components for all realms currently known to the cluster
|
||||
async(self._register_existing_realms())
|
||||
|
||||
def overall_setup_time(self):
|
||||
return self._ephemeral_setup_timeout
|
||||
|
||||
def shutdown(self):
|
||||
logger.debug('Shutting down worker.')
|
||||
self._shutting_down = True
|
||||
|
||||
for (etcd_key, _), task in self._watch_tasks.items():
|
||||
if not task.done():
|
||||
logger.debug('Canceling watch task for %s', etcd_key)
|
||||
task.cancel()
|
||||
|
||||
if self._async_thread_executor is not None:
|
||||
logger.debug('Shutting down thread pool executor.')
|
||||
self._async_thread_executor.shutdown()
|
||||
if self._orchestrator is not None:
|
||||
self._orchestrator.shutdown()
|
||||
|
||||
@coroutine
|
||||
def schedule(self, build_job):
|
||||
build_uuid = build_job.job_details['build_uuid']
|
||||
logger.debug('Calling schedule with job: %s', build_uuid)
|
||||
|
||||
# Check if there are worker slots available by checking the number of jobs in etcd
|
||||
# Check if there are worker slots available by checking the number of jobs in the orchestrator
|
||||
allowed_worker_count = self._manager_config.get('ALLOWED_WORKER_COUNT', 1)
|
||||
try:
|
||||
active_jobs = yield From(self._etcd_client.read(self._etcd_job_prefix, recursive=True))
|
||||
workers_alive = sum(1 for child in active_jobs.children if not child.dir)
|
||||
except (KeyError, etcd.EtcdKeyError):
|
||||
active_jobs = yield From(self._orchestrator.get_prefixed_keys(self._job_prefix))
|
||||
workers_alive = len(active_jobs)
|
||||
except KeyError:
|
||||
workers_alive = 0
|
||||
except etcd.EtcdException:
|
||||
logger.exception('Exception when reading job count from etcd for job: %s', build_uuid)
|
||||
raise Return(False, RETRY_IMMEDIATELY_TIMEOUT)
|
||||
except OrchestratorConnectionError:
|
||||
logger.exception('Could not read job count from orchestrator for job due to orchestrator being down')
|
||||
raise Return(False, ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION)
|
||||
except OrchestratorError:
|
||||
logger.exception('Exception when reading job count from orchestrator for job: %s', build_uuid)
|
||||
raise Return(False, RETRY_IMMEDIATELY_SLEEP_DURATION)
|
||||
|
||||
logger.debug('Total jobs (scheduling job %s): %s', build_uuid, workers_alive)
|
||||
|
||||
if workers_alive >= allowed_worker_count:
|
||||
logger.info('Too many workers alive, unable to start new worker for build job: %s. %s >= %s',
|
||||
build_uuid, workers_alive, allowed_worker_count)
|
||||
raise Return(False, NO_WORKER_AVAILABLE_TIMEOUT)
|
||||
raise Return(False, TOO_MANY_WORKERS_SLEEP_DURATION)
|
||||
|
||||
job_key = self._etcd_job_key(build_job)
|
||||
job_key = self._job_key(build_job)
|
||||
|
||||
# First try to take a lock for this job, meaning we will be responsible for its lifeline
|
||||
realm = str(uuid.uuid4())
|
||||
|
@ -457,17 +396,22 @@ class EphemeralBuilderManager(BaseManager):
|
|||
|
||||
lock_payload = json.dumps(payload)
|
||||
logger.debug('Writing key for job %s with expiration in %s seconds', build_uuid,
|
||||
self._ephemeral_setup_timeout)
|
||||
EPHEMERAL_SETUP_TIMEOUT)
|
||||
|
||||
try:
|
||||
yield From(self._etcd_client.write(job_key, lock_payload, prevExist=False,
|
||||
ttl=self._ephemeral_setup_timeout))
|
||||
except (KeyError, etcd.EtcdKeyError):
|
||||
# The job was already taken by someone else, we are probably a retry
|
||||
logger.warning('Job: %s already exists in etcd, timeout may be misconfigured', build_uuid)
|
||||
raise Return(False, self._ephemeral_api_timeout)
|
||||
except etcd.EtcdException:
|
||||
logger.exception('Exception when writing job %s to etcd', build_uuid)
|
||||
raise Return(False, RETRY_IMMEDIATELY_TIMEOUT)
|
||||
yield From(self._orchestrator.set_key(job_key, lock_payload, overwrite=False,
|
||||
expiration=EPHEMERAL_SETUP_TIMEOUT))
|
||||
except KeyError:
|
||||
logger.warning('Job: %s already exists in orchestrator, timeout may be misconfigured',
|
||||
build_uuid)
|
||||
raise Return(False, EPHEMERAL_API_TIMEOUT)
|
||||
except OrchestratorConnectionError:
|
||||
logger.exception('Exception when writing job %s to orchestrator; could not connect',
|
||||
build_uuid)
|
||||
raise Return(False, ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION)
|
||||
except OrchestratorError:
|
||||
logger.exception('Exception when writing job %s to orchestrator', build_uuid)
|
||||
raise Return(False, RETRY_IMMEDIATELY_SLEEP_DURATION)
|
||||
|
||||
# Got a lock, now lets boot the job via one of the registered executors.
|
||||
started_with_executor = None
|
||||
|
@ -528,8 +472,8 @@ class EphemeralBuilderManager(BaseManager):
|
|||
logger.error('Could not start ephemeral worker for build %s', build_uuid)
|
||||
|
||||
# Delete the associated build job record.
|
||||
yield From(self.delete_etcd_key(job_key))
|
||||
raise Return(False, self._ephemeral_api_timeout)
|
||||
yield From(self._orchestrator.delete_key(job_key))
|
||||
raise Return(False, EPHEMERAL_API_TIMEOUT)
|
||||
|
||||
# Job was started!
|
||||
logger.debug('Started execution with ID %s for job: %s with executor: %s',
|
||||
|
@ -540,14 +484,16 @@ class EphemeralBuilderManager(BaseManager):
|
|||
'executor_name': started_with_executor.name,
|
||||
'start_time': time.time(),
|
||||
})
|
||||
|
||||
try:
|
||||
yield From(self._etcd_client.write(self._etcd_metric_key(realm), metric_spec, prevExist=False,
|
||||
ttl=machine_max_expiration + 10))
|
||||
except (KeyError, etcd.EtcdKeyError):
|
||||
logger.error('Realm %s already exists in etcd for job %s ' +
|
||||
yield From(self._orchestrator.set_key(self._metric_key(realm), metric_spec, overwrite=False,
|
||||
expiration=machine_max_expiration + 10))
|
||||
except KeyError:
|
||||
logger.error('Realm %s already exists in orchestrator for job %s ' +
|
||||
'UUID collision or something is very very wrong.', realm, build_uuid)
|
||||
except etcd.EtcdException:
|
||||
logger.exception('Exception when writing realm %s to etcd for job %s', realm, build_uuid)
|
||||
except OrchestratorError:
|
||||
logger.exception('Exception when writing realm %s to orchestrator for job %s',
|
||||
realm, build_uuid)
|
||||
|
||||
# Store the realm spec which will allow any manager to accept this builder when it connects
|
||||
realm_spec = json.dumps({
|
||||
|
@ -562,30 +508,31 @@ class EphemeralBuilderManager(BaseManager):
|
|||
setup_time = started_with_executor.setup_time or self.overall_setup_time()
|
||||
logger.debug('Writing job key for job %s using executor %s with ID %s and ttl %s', build_uuid,
|
||||
started_with_executor.name, execution_id, setup_time)
|
||||
yield From(self._etcd_client.write(self._etcd_realm_key(realm), realm_spec, prevExist=False,
|
||||
ttl=setup_time))
|
||||
except (KeyError, etcd.EtcdKeyError):
|
||||
logger.error('Realm %s already exists in etcd for job %s ' +
|
||||
'UUID collision or something is very very wrong.', realm, build_uuid)
|
||||
raise Return(False, setup_time)
|
||||
except etcd.EtcdException:
|
||||
logger.exception('Exception when writing realm %s to etcd for job %s', realm, build_uuid)
|
||||
yield From(self._orchestrator.set_key(self._realm_key(realm), realm_spec,
|
||||
expiration=setup_time))
|
||||
except OrchestratorConnectionError:
|
||||
logger.exception('Exception when writing realm %s to orchestrator for job %s',
|
||||
realm, build_uuid)
|
||||
raise Return(False, ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION)
|
||||
except OrchestratorError:
|
||||
logger.exception('Exception when writing realm %s to orchestrator for job %s',
|
||||
realm, build_uuid)
|
||||
raise Return(False, setup_time)
|
||||
|
||||
logger.debug('Builder spawn complete for job %s using executor %s with ID %s ', build_uuid,
|
||||
started_with_executor.name, execution_id)
|
||||
logger.debug('Builder spawn complete for job %s using executor %s with ID %s ',
|
||||
build_uuid, started_with_executor.name, execution_id)
|
||||
raise Return(True, None)
|
||||
|
||||
@coroutine
|
||||
def build_component_ready(self, build_component):
|
||||
logger.debug('Got component ready for component with realm %s', build_component.builder_realm)
|
||||
|
||||
# Pop off the job for the component. We do so before we send out the etcd watch below,
|
||||
# as it will also remove this mapping.
|
||||
# Pop off the job for the component.
|
||||
# We do so before we send out the watch below, as it will also remove this mapping.
|
||||
job = self._component_to_job.pop(build_component, None)
|
||||
if job is None:
|
||||
# This will occur once the build finishes, so no need to worry about it. We log in case it
|
||||
# happens outside of the expected flow.
|
||||
# This will occur once the build finishes, so no need to worry about it.
|
||||
# We log in case it happens outside of the expected flow.
|
||||
logger.debug('Could not find job for the build component on realm %s; component is ready',
|
||||
build_component.builder_realm)
|
||||
raise Return()
|
||||
|
@ -598,10 +545,10 @@ class EphemeralBuilderManager(BaseManager):
|
|||
yield From(self._write_duration_metric(metric_queue.builder_time_to_build,
|
||||
build_component.builder_realm))
|
||||
|
||||
# Clean up the bookkeeping for allowing any manager to take the job.
|
||||
try:
|
||||
# Clean up the bookkeeping for allowing any manager to take the job.
|
||||
yield From(self._etcd_client.delete(self._etcd_realm_key(build_component.builder_realm)))
|
||||
except (KeyError, etcd.EtcdKeyError):
|
||||
yield From(self._orchestrator.delete_key(self._realm_key(build_component.builder_realm)))
|
||||
except KeyError:
|
||||
logger.warning('Could not delete realm key %s', build_component.builder_realm)
|
||||
|
||||
def build_component_disposed(self, build_component, timed_out):
|
||||
|
@ -624,19 +571,27 @@ class EphemeralBuilderManager(BaseManager):
|
|||
# Kill the ephemeral builder.
|
||||
yield From(self.kill_builder_executor(build_job.build_uuid))
|
||||
|
||||
# Delete the build job from etcd.
|
||||
job_key = self._etcd_job_key(build_job)
|
||||
# Delete the build job from the orchestrator.
|
||||
try:
|
||||
yield From(self._etcd_client.delete(job_key))
|
||||
except (KeyError, etcd.EtcdKeyError):
|
||||
job_key = self._job_key(build_job)
|
||||
yield From(self._orchestrator.delete_key(job_key))
|
||||
except KeyError:
|
||||
logger.debug('Builder is asking for job to be removed, but work already completed')
|
||||
except OrchestratorConnectionError:
|
||||
logger.exception('Could not remove job key as orchestrator is not available')
|
||||
yield From(sleep(ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION))
|
||||
raise Return()
|
||||
|
||||
# Delete the metric from etcd
|
||||
metric_key = self._etcd_metric_key(build_component.builder_realm)
|
||||
# Delete the metric from the orchestrator.
|
||||
try:
|
||||
yield From(self._etcd_client.delete(metric_key))
|
||||
except (KeyError, etcd.EtcdKeyError):
|
||||
metric_key = self._metric_key(build_component.builder_realm)
|
||||
yield From(self._orchestrator.delete_key(metric_key))
|
||||
except KeyError:
|
||||
logger.debug('Builder is asking for metric to be removed, but key not found')
|
||||
except OrchestratorConnectionError:
|
||||
logger.exception('Could not remove metric key as orchestrator is not available')
|
||||
yield From(sleep(ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION))
|
||||
raise Return()
|
||||
|
||||
logger.debug('job_completed for job %s with status: %s', build_job.build_uuid, job_status)
|
||||
|
||||
|
@ -667,19 +622,24 @@ class EphemeralBuilderManager(BaseManager):
|
|||
|
||||
@coroutine
|
||||
def job_heartbeat(self, build_job):
|
||||
# Extend the queue item.
|
||||
"""
|
||||
:param build_job: the identifier for the build
|
||||
:type build_job: str
|
||||
"""
|
||||
self.job_heartbeat_callback(build_job)
|
||||
self._extend_job_in_orchestrator(build_job)
|
||||
|
||||
# Extend the deadline in etcd.
|
||||
job_key = self._etcd_job_key(build_job)
|
||||
|
||||
@coroutine
|
||||
def _extend_job_in_orchestrator(self, build_job):
|
||||
try:
|
||||
build_job_metadata_response = yield From(self._etcd_client.read(job_key))
|
||||
except (KeyError, etcd.EtcdKeyError):
|
||||
logger.info('Job %s no longer exists in etcd', build_job.build_uuid)
|
||||
job_data = yield From(self._orchestrator.get_key(self._job_key(build_job)))
|
||||
except KeyError:
|
||||
logger.info('Job %s no longer exists in the orchestrator', build_job.build_uuid)
|
||||
raise Return()
|
||||
except OrchestratorConnectionError:
|
||||
logger.exception('failed to connect when attempted to extend job')
|
||||
|
||||
build_job_metadata = json.loads(build_job_metadata_response.value)
|
||||
build_job_metadata = json.loads(job_data)
|
||||
|
||||
max_expiration = datetime.utcfromtimestamp(build_job_metadata['max_expiration'])
|
||||
max_expiration_remaining = max_expiration - datetime.utcnow()
|
||||
|
@ -692,34 +652,22 @@ class EphemeralBuilderManager(BaseManager):
|
|||
'had_heartbeat': True,
|
||||
}
|
||||
|
||||
# Note: A TTL of < 0 in etcd results in the key *never being expired*. We use a max here
|
||||
# to ensure that if the TTL is < 0, the key will expire immediately.
|
||||
etcd_ttl = max(ttl, 0)
|
||||
yield From(self._etcd_client.write(job_key, json.dumps(payload), ttl=etcd_ttl))
|
||||
|
||||
|
||||
@coroutine
|
||||
def _take_etcd_atomic_lock(self, path, *args):
|
||||
""" Takes a lock for atomic operations via etcd over the given path. Returns true if the lock
|
||||
was granted and false otherwise.
|
||||
"""
|
||||
pieces = [self._etcd_lock_prefix, path]
|
||||
pieces.extend(args)
|
||||
|
||||
lock_key = os.path.join(*pieces)
|
||||
try:
|
||||
yield From(self._etcd_client.write(lock_key, {}, prevExist=False, ttl=ETCD_ATOMIC_OP_TIMEOUT))
|
||||
raise Return(True)
|
||||
except (KeyError, etcd.EtcdKeyError):
|
||||
raise Return(False)
|
||||
yield From(self._orchestrator.set_key(self._job_key(build_job), json.dumps(payload),
|
||||
expiration=ttl))
|
||||
except OrchestratorConnectionError:
|
||||
logger.exception('Could not update heartbeat for job as the orchestrator is not available')
|
||||
yield From(sleep(ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION))
|
||||
|
||||
@coroutine
|
||||
def _write_duration_metric(self, metric, realm):
|
||||
""" Returns true if the metric was written and and false otherwise.
|
||||
"""
|
||||
:returns: True if the metric was written, otherwise False
|
||||
:rtype: bool
|
||||
"""
|
||||
try:
|
||||
metric_data = yield From(self._etcd_client.read(self._etcd_metric_key(realm)))
|
||||
parsed_metric_data = json.loads(metric_data.value)
|
||||
metric_data = yield From(self._orchestrator.get_key(self._metric_key(realm)))
|
||||
parsed_metric_data = json.loads(metric_data)
|
||||
start_time = parsed_metric_data['start_time']
|
||||
metric.Observe(time.time() - start_time,
|
||||
labelvalues=[parsed_metric_data.get('executor_name',
|
||||
|
@ -727,54 +675,36 @@ class EphemeralBuilderManager(BaseManager):
|
|||
except Exception:
|
||||
logger.exception("Could not write metric for realm %s", realm)
|
||||
|
||||
def _etcd_metric_key(self, realm):
|
||||
""" Create a key which is used to track a job in etcd.
|
||||
"""
|
||||
return os.path.join(self._etcd_metric_prefix, realm)
|
||||
|
||||
def _etcd_job_key(self, build_job):
|
||||
""" Create a key which is used to track a job in etcd.
|
||||
"""
|
||||
return os.path.join(self._etcd_job_prefix, build_job.job_details['build_uuid'])
|
||||
|
||||
def _etcd_realm_key(self, realm):
|
||||
""" Create a key which is used to track an incoming connection on a realm.
|
||||
"""
|
||||
return os.path.join(self._etcd_realm_prefix, realm)
|
||||
|
||||
def num_workers(self):
|
||||
""" Return the number of workers we're managing locally.
|
||||
"""
|
||||
The number of workers we're managing locally.
|
||||
|
||||
:returns: the number of the workers locally managed
|
||||
:rtype: int
|
||||
"""
|
||||
return len(self._component_to_job)
|
||||
|
||||
|
||||
@coroutine
|
||||
def _cancel_build(self, etcd_result):
|
||||
""" Listens for etcd event and then cancels the build
|
||||
"""
|
||||
if etcd_result is None:
|
||||
raise Return(False)
|
||||
def _cancel_callback(self, key_change):
|
||||
if key_change.event not in (KeyEvent.CREATE, KeyEvent.SET):
|
||||
raise Return()
|
||||
|
||||
if etcd_result.action not in (EtcdAction.CREATE, EtcdAction.SET):
|
||||
raise Return(False)
|
||||
|
||||
build_uuid = etcd_result.value
|
||||
build_uuid = key_change.value
|
||||
build_info = self._build_uuid_to_info.get(build_uuid, None)
|
||||
|
||||
if build_info is None:
|
||||
logger.debug('No build info for "%s" job %s', etcd_result.action, build_uuid)
|
||||
logger.debug('No build info for "%s" job %s', key_change.event, build_uuid)
|
||||
raise Return(False)
|
||||
got_lock = yield From(self._take_etcd_atomic_lock('job-cancelled', build_uuid, build_info.execution_id))
|
||||
if got_lock:
|
||||
|
||||
lock_key = slash_join(self._canceled_lock_prefix,
|
||||
build_uuid, build_info.execution_id)
|
||||
lock_acquired = yield From(self._orchestrator.lock(lock_key))
|
||||
if lock_acquired:
|
||||
builder_realm = build_info.component.builder_realm
|
||||
yield From(self.kill_builder_executor(build_uuid))
|
||||
yield From(self.delete_etcd_key(self._etcd_realm_key(build_info.component.builder_realm)))
|
||||
yield From(self.delete_etcd_key(self._etcd_metric_key(build_info.component.builder_realm)))
|
||||
yield From(self.delete_etcd_key(os.path.join(self._etcd_job_prefix, build_uuid)))
|
||||
yield From(self._orchestrator.delete_key(self._realm_key(builder_realm)))
|
||||
yield From(self._orchestrator.delete_key(self._metric_key(builder_realm)))
|
||||
yield From(self._orchestrator.delete_key(slash_join(self._job_prefix, build_uuid)))
|
||||
|
||||
# This is outside the lock so we can un-register the component wherever it is registered to.
|
||||
yield From(build_info.component.cancel_build())
|
||||
|
||||
@coroutine
|
||||
def delete_etcd_key(self, etcd_key):
|
||||
try:
|
||||
yield From(self._etcd_client.delete(etcd_key))
|
||||
except (KeyError, etcd.EtcdKeyError):
|
||||
logger.warning('Could not delete etcd key %s', etcd_key)
|
||||
|
|
|
@ -1,26 +1,31 @@
|
|||
import datetime
|
||||
import hashlib
|
||||
import logging
|
||||
import os
|
||||
import uuid
|
||||
import threading
|
||||
import boto.ec2
|
||||
import requests
|
||||
import cachetools
|
||||
import trollius
|
||||
import datetime
|
||||
import release
|
||||
import socket
|
||||
import hashlib
|
||||
import subprocess
|
||||
import threading
|
||||
import uuid
|
||||
|
||||
from jinja2 import FileSystemLoader, Environment
|
||||
from trollius import coroutine, From, Return, get_event_loop
|
||||
from functools import partial
|
||||
|
||||
from buildman.asyncutil import AsyncWrapper
|
||||
import boto.ec2
|
||||
import cachetools.func
|
||||
import requests
|
||||
import trollius
|
||||
|
||||
from container_cloud_config import CloudConfigContext
|
||||
from jinja2 import FileSystemLoader, Environment
|
||||
from trollius import coroutine, From, Return, get_event_loop
|
||||
|
||||
import release
|
||||
|
||||
from buildman.asyncutil import AsyncWrapper
|
||||
from app import metric_queue, app
|
||||
from util.metrics.metricqueue import duration_collector_async
|
||||
from _init import ROOT_DIR
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
@ -109,17 +114,20 @@ class BuilderExecutor(object):
|
|||
quay_password = self.executor_config['QUAY_PASSWORD']
|
||||
|
||||
return TEMPLATE.render(
|
||||
realm=realm,
|
||||
token=token,
|
||||
build_uuid=build_uuid,
|
||||
quay_username=quay_username,
|
||||
quay_password=quay_password,
|
||||
manager_hostname=manager_hostname,
|
||||
websocket_scheme=self.websocket_scheme,
|
||||
coreos_channel=coreos_channel,
|
||||
worker_tag=self.executor_config['WORKER_TAG'],
|
||||
logentries_token=self.executor_config.get('LOGENTRIES_TOKEN', None),
|
||||
volume_size=self.executor_config.get('VOLUME_SIZE', '42G'),
|
||||
realm=realm,
|
||||
token=token,
|
||||
build_uuid=build_uuid,
|
||||
quay_username=quay_username,
|
||||
quay_password=quay_password,
|
||||
manager_hostname=manager_hostname,
|
||||
websocket_scheme=self.websocket_scheme,
|
||||
coreos_channel=coreos_channel,
|
||||
worker_image=self.executor_config.get('WORKER_IMAGE', 'quay.io/coreos/registry-build-worker'),
|
||||
worker_tag=self.executor_config['WORKER_TAG'],
|
||||
logentries_token=self.executor_config.get('LOGENTRIES_TOKEN', None),
|
||||
volume_size=self.executor_config.get('VOLUME_SIZE', '42G'),
|
||||
max_lifetime_s=self.executor_config.get('MAX_LIFETIME_S', 10800),
|
||||
ssh_authorized_keys=self.executor_config.get('SSH_AUTHORIZED_KEYS', []),
|
||||
)
|
||||
|
||||
|
||||
|
@ -143,7 +151,7 @@ class EC2Executor(BuilderExecutor):
|
|||
))
|
||||
|
||||
@classmethod
|
||||
@cachetools.ttl_cache(ttl=ONE_HOUR)
|
||||
@cachetools.func.ttl_cache(ttl=ONE_HOUR)
|
||||
def _get_coreos_ami(cls, ec2_region, coreos_channel):
|
||||
""" Retrieve the CoreOS AMI id from the canonical listing.
|
||||
"""
|
||||
|
@ -265,7 +273,6 @@ class PopenExecutor(BuilderExecutor):
|
|||
def start_builder(self, realm, token, build_uuid):
|
||||
# Now start a machine for this job, adding the machine id to the etcd information
|
||||
logger.debug('Forking process for build')
|
||||
import subprocess
|
||||
|
||||
ws_host = os.environ.get("BUILDMAN_WS_HOST", "localhost")
|
||||
ws_port = os.environ.get("BUILDMAN_WS_PORT", "8787")
|
||||
|
@ -280,7 +287,8 @@ class PopenExecutor(BuilderExecutor):
|
|||
}
|
||||
|
||||
logpipe = LogPipe(logging.INFO)
|
||||
spawned = subprocess.Popen(os.environ.get("BUILDER_BINARY_LOCATION", '/usr/local/bin/quay-builder'),
|
||||
spawned = subprocess.Popen(os.environ.get('BUILDER_BINARY_LOCATION',
|
||||
'/usr/local/bin/quay-builder'),
|
||||
stdout=logpipe,
|
||||
stderr=logpipe,
|
||||
env=builder_env)
|
||||
|
@ -320,13 +328,19 @@ class KubernetesExecutor(BuilderExecutor):
|
|||
tls_cert = self.executor_config.get('K8S_API_TLS_CERT')
|
||||
tls_key = self.executor_config.get('K8S_API_TLS_KEY')
|
||||
tls_ca = self.executor_config.get('K8S_API_TLS_CA')
|
||||
service_account_token = self.executor_config.get('SERVICE_ACCOUNT_TOKEN')
|
||||
|
||||
if 'timeout' not in request_options:
|
||||
request_options['timeout'] = self.executor_config.get("K8S_API_TIMEOUT", 20)
|
||||
|
||||
if tls_cert and tls_key:
|
||||
if service_account_token:
|
||||
scheme = 'https'
|
||||
request_options['headers'] = {'Authorization': 'Bearer ' + service_account_token}
|
||||
logger.debug('Using service account token for Kubernetes authentication')
|
||||
elif tls_cert and tls_key:
|
||||
scheme = 'https'
|
||||
request_options['cert'] = (tls_cert, tls_key)
|
||||
logger.debug('Using tls certificate and key for Kubernetes authentication')
|
||||
if tls_ca:
|
||||
request_options['verify'] = tls_ca
|
||||
else:
|
||||
|
@ -347,22 +361,75 @@ class KubernetesExecutor(BuilderExecutor):
|
|||
def _job_path(self, build_uuid):
|
||||
return '%s/%s' % (self._jobs_path(), build_uuid)
|
||||
|
||||
def _job_resource(self, build_uuid, user_data, coreos_channel='stable'):
|
||||
vm_memory_limit = self.executor_config.get('VM_MEMORY_LIMIT', '4G')
|
||||
vm_volume_size = self.executor_config.get('VOLUME_SIZE', '32G')
|
||||
def _kubernetes_distribution(self):
|
||||
return self.executor_config.get('KUBERNETES_DISTRIBUTION', 'basic').lower()
|
||||
|
||||
def _is_basic_kubernetes_distribution(self):
|
||||
return self._kubernetes_distribution() == 'basic'
|
||||
|
||||
def _is_openshift_kubernetes_distribution(self):
|
||||
return self._kubernetes_distribution() == 'openshift'
|
||||
|
||||
def _build_job_container_resources(self):
|
||||
# Minimum acceptable free resources for this container to "fit" in a quota
|
||||
# These may be lower than the aboslute limits if the cluster is knowingly
|
||||
# These may be lower than the absolute limits if the cluster is knowingly
|
||||
# oversubscribed by some amount.
|
||||
container_requests = {
|
||||
'memory' : self.executor_config.get('CONTAINER_MEMORY_REQUEST', '3968Mi'),
|
||||
}
|
||||
|
||||
container_limits = {
|
||||
'memory' : self.executor_config.get('CONTAINER_MEMORY_LIMITS', '5120Mi'),
|
||||
'cpu' : self.executor_config.get('CONTAINER_CPU_LIMITS', '1000m'),
|
||||
}
|
||||
|
||||
resources = {
|
||||
'requests': container_requests,
|
||||
}
|
||||
|
||||
if self._is_openshift_kubernetes_distribution():
|
||||
resources['requests']['cpu'] = self.executor_config.get('CONTAINER_CPU_REQUEST', '500m')
|
||||
resources['limits'] = container_limits
|
||||
|
||||
return resources
|
||||
|
||||
def _build_job_containers(self, user_data):
|
||||
vm_memory_limit = self.executor_config.get('VM_MEMORY_LIMIT', '4G')
|
||||
vm_volume_size = self.executor_config.get('VOLUME_SIZE', '32G')
|
||||
|
||||
container = {
|
||||
'name': 'builder',
|
||||
'imagePullPolicy': 'IfNotPresent',
|
||||
'image': self.image,
|
||||
'securityContext': {'privileged': True},
|
||||
'env': [
|
||||
{'name': 'USERDATA', 'value': user_data},
|
||||
{'name': 'VM_MEMORY', 'value': vm_memory_limit},
|
||||
{'name': 'VM_VOLUME_SIZE', 'value': vm_volume_size},
|
||||
],
|
||||
'resources': self._build_job_container_resources(),
|
||||
}
|
||||
|
||||
if self._is_basic_kubernetes_distribution():
|
||||
container['volumeMounts'] = [{'name': 'secrets-mask','mountPath': '/var/run/secrets/kubernetes.io/serviceaccount'}]
|
||||
|
||||
return container
|
||||
|
||||
def _job_resource(self, build_uuid, user_data, coreos_channel='stable'):
|
||||
image_pull_secret_name = self.executor_config.get('IMAGE_PULL_SECRET_NAME', 'builder')
|
||||
service_account = self.executor_config.get('SERVICE_ACCOUNT_NAME', 'quay-builder-sa')
|
||||
node_selector_label_key = self.executor_config.get('NODE_SELECTOR_LABEL_KEY', 'beta.kubernetes.io/instance-type')
|
||||
node_selector_label_value = self.executor_config.get('NODE_SELECTOR_LABEL_VALUE', '')
|
||||
|
||||
node_selector = {
|
||||
node_selector_label_key : node_selector_label_value
|
||||
}
|
||||
|
||||
release_sha = release.GIT_HEAD or 'none'
|
||||
if ' ' in release_sha:
|
||||
release_sha = 'HEAD'
|
||||
|
||||
return {
|
||||
job_resource = {
|
||||
'apiVersion': 'batch/v1',
|
||||
'kind': 'Job',
|
||||
'metadata': {
|
||||
|
@ -387,52 +454,42 @@ class KubernetesExecutor(BuilderExecutor):
|
|||
},
|
||||
},
|
||||
'spec': {
|
||||
# This volume is a hack to mask the token for the namespace's
|
||||
# default service account, which is placed in a file mounted under
|
||||
# `/var/run/secrets/kubernetes.io/serviceaccount` in all pods.
|
||||
# There's currently no other way to just disable the service
|
||||
# account at either the pod or namespace level.
|
||||
#
|
||||
# https://github.com/kubernetes/kubernetes/issues/16779
|
||||
#
|
||||
'volumes': [
|
||||
{
|
||||
'name': 'secrets-mask',
|
||||
'emptyDir': {
|
||||
'medium': 'Memory',
|
||||
},
|
||||
},
|
||||
],
|
||||
'containers': [
|
||||
{
|
||||
'name': 'builder',
|
||||
'imagePullPolicy': 'IfNotPresent',
|
||||
'image': self.image,
|
||||
'securityContext': {'privileged': True},
|
||||
'env': [
|
||||
{'name': 'USERDATA', 'value': user_data},
|
||||
{'name': 'VM_MEMORY', 'value': vm_memory_limit},
|
||||
{'name': 'VM_VOLUME_SIZE', 'value': vm_volume_size},
|
||||
],
|
||||
'resources': {
|
||||
'requests': container_requests,
|
||||
},
|
||||
'volumeMounts': [
|
||||
{
|
||||
'name': 'secrets-mask',
|
||||
'mountPath': '/var/run/secrets/kubernetes.io/serviceaccount',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
'imagePullSecrets': [{'name': 'builder'}],
|
||||
'imagePullSecrets': [{ 'name': image_pull_secret_name }],
|
||||
'restartPolicy': 'Never',
|
||||
'dnsPolicy': 'Default',
|
||||
'containers': [self._build_job_containers(user_data)],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if self._is_openshift_kubernetes_distribution():
|
||||
# Setting `automountServiceAccountToken` to false will prevent automounting API credentials for a service account.
|
||||
job_resource['spec']['template']['spec']['automountServiceAccountToken'] = False
|
||||
|
||||
# Use dedicated service account that has no authorization to any resources.
|
||||
job_resource['spec']['template']['spec']['serviceAccount'] = service_account
|
||||
|
||||
# Setting `enableServiceLinks` to false prevents information about other services from being injected into pod's
|
||||
# environment variables. Pod has no visibility into other services on the cluster.
|
||||
job_resource['spec']['template']['spec']['enableServiceLinks'] = False
|
||||
|
||||
if node_selector_label_value.strip() != '':
|
||||
job_resource['spec']['template']['spec']['nodeSelector'] = node_selector
|
||||
|
||||
if self._is_basic_kubernetes_distribution():
|
||||
# This volume is a hack to mask the token for the namespace's
|
||||
# default service account, which is placed in a file mounted under
|
||||
# `/var/run/secrets/kubernetes.io/serviceaccount` in all pods.
|
||||
# There's currently no other way to just disable the service
|
||||
# account at either the pod or namespace level.
|
||||
#
|
||||
# https://github.com/kubernetes/kubernetes/issues/16779
|
||||
#
|
||||
job_resource['spec']['template']['spec']['volumes'] = [{'name': 'secrets-mask','emptyDir': {'medium': 'Memory'}}]
|
||||
|
||||
return job_resource
|
||||
|
||||
@coroutine
|
||||
@duration_collector_async(metric_queue.builder_time_to_start, ['k8s'])
|
||||
def start_builder(self, realm, token, build_uuid):
|
||||
|
@ -440,6 +497,7 @@ class KubernetesExecutor(BuilderExecutor):
|
|||
channel = self.executor_config.get('COREOS_CHANNEL', 'stable')
|
||||
user_data = self.generate_cloud_config(realm, token, build_uuid, channel, self.manager_hostname)
|
||||
resource = self._job_resource(build_uuid, user_data, channel)
|
||||
logger.debug('Using Kubernetes Distribution: %s', self._kubernetes_distribution())
|
||||
logger.debug('Generated kubernetes resource:\n%s', resource)
|
||||
|
||||
# schedule
|
||||
|
|
|
@ -0,0 +1,26 @@
|
|||
import logging
|
||||
|
||||
from buildman.orchestrator import orchestrator_from_config, OrchestratorError
|
||||
from util import slash_join
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
CANCEL_PREFIX = 'cancel/'
|
||||
|
||||
|
||||
class OrchestratorCanceller(object):
|
||||
""" An asynchronous way to cancel a build with any Orchestrator. """
|
||||
def __init__(self, config):
|
||||
self._orchestrator = orchestrator_from_config(config, canceller_only=True)
|
||||
|
||||
def try_cancel_build(self, build_uuid):
|
||||
logger.info('Cancelling build %s', build_uuid)
|
||||
cancel_key = slash_join(CANCEL_PREFIX, build_uuid)
|
||||
try:
|
||||
self._orchestrator.set_key_sync(cancel_key, build_uuid, expiration=60)
|
||||
return True
|
||||
except OrchestratorError:
|
||||
logger.exception('Failed to write cancel action to redis with uuid %s', build_uuid)
|
||||
return False
|
|
@ -0,0 +1,753 @@
|
|||
from abc import ABCMeta, abstractmethod
|
||||
from collections import namedtuple
|
||||
|
||||
import datetime
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import time
|
||||
|
||||
from enum import IntEnum, unique
|
||||
from six import add_metaclass, iteritems
|
||||
from trollius import async, coroutine, From, Return
|
||||
from urllib3.exceptions import ReadTimeoutError, ProtocolError
|
||||
|
||||
import etcd
|
||||
import redis
|
||||
|
||||
from buildman.asyncutil import wrap_with_threadpool
|
||||
from util import slash_join
|
||||
from util.expiresdict import ExpiresDict
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
ONE_DAY = 60 * 60 * 24
|
||||
ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION = 5
|
||||
DEFAULT_LOCK_EXPIRATION = 10000
|
||||
|
||||
ETCD_READ_TIMEOUT = 5
|
||||
ETCD_MAX_WATCH_TIMEOUT = 30
|
||||
|
||||
REDIS_EXPIRING_SUFFIX = '/expiring'
|
||||
REDIS_DEFAULT_PUBSUB_KEY = 'orchestrator_events'
|
||||
REDIS_EVENT_KIND_MESSAGE = 'message'
|
||||
REDIS_EVENT_KIND_PMESSAGE = 'pmessage'
|
||||
REDIS_NONEXPIRING_KEY = -1
|
||||
|
||||
# This constant defines the Redis configuration flags used to watch [K]eyspace and e[x]pired
|
||||
# events on keys. For more info, see https://redis.io/topics/notifications#configuration
|
||||
REDIS_KEYSPACE_EVENT_CONFIG_VALUE = 'Kx'
|
||||
REDIS_KEYSPACE_EVENT_CONFIG_KEY = 'notify-keyspace-events'
|
||||
REDIS_KEYSPACE_KEY_PATTERN = '__keyspace@%s__:%s'
|
||||
REDIS_EXPIRED_KEYSPACE_PATTERN = slash_join(REDIS_KEYSPACE_KEY_PATTERN, REDIS_EXPIRING_SUFFIX)
|
||||
REDIS_EXPIRED_KEYSPACE_REGEX = re.compile(REDIS_EXPIRED_KEYSPACE_PATTERN % (r'(\S+)', r'(\S+)'))
|
||||
|
||||
|
||||
def orchestrator_from_config(manager_config, canceller_only=False):
|
||||
"""
|
||||
Allocates a new Orchestrator from the 'ORCHESTRATOR' block from provided manager config.
|
||||
Checks for legacy configuration prefixed with 'ETCD_' when the 'ORCHESTRATOR' is not present.
|
||||
|
||||
:param manager_config: the configuration for the orchestrator
|
||||
:type manager_config: dict
|
||||
:rtype: :class: Orchestrator
|
||||
"""
|
||||
# Legacy codepath only knows how to configure etcd.
|
||||
if manager_config.get('ORCHESTRATOR') is None:
|
||||
manager_config['ORCHESTRATOR'] = {key: value
|
||||
for (key, value) in iteritems(manager_config)
|
||||
if key.startswith('ETCD_') and not key.endswith('_PREFIX')}
|
||||
|
||||
# Sanity check that legacy prefixes are no longer being used.
|
||||
for key in manager_config['ORCHESTRATOR'].keys():
|
||||
words = key.split('_')
|
||||
if len(words) > 1 and words[-1].lower() == 'prefix':
|
||||
raise AssertionError('legacy prefix used, use ORCHESTRATOR_PREFIX instead')
|
||||
|
||||
def _dict_key_prefix(d):
|
||||
"""
|
||||
:param d: the dict that has keys prefixed with underscore
|
||||
:type d: {str: any}
|
||||
:rtype: str
|
||||
"""
|
||||
return d.keys()[0].split('_', 1)[0].lower()
|
||||
|
||||
orchestrator_name = _dict_key_prefix(manager_config['ORCHESTRATOR'])
|
||||
|
||||
def format_key(key):
|
||||
return key.lower().split('_', 1)[1]
|
||||
|
||||
orchestrator_kwargs = {format_key(key): value
|
||||
for (key, value) in iteritems(manager_config['ORCHESTRATOR'])}
|
||||
|
||||
if manager_config.get('ORCHESTRATOR_PREFIX') is not None:
|
||||
orchestrator_kwargs['orchestrator_prefix'] = manager_config['ORCHESTRATOR_PREFIX']
|
||||
|
||||
orchestrator_kwargs['canceller_only'] = canceller_only
|
||||
|
||||
logger.debug('attempting to create orchestrator %s with kwargs %s',
|
||||
orchestrator_name, orchestrator_kwargs)
|
||||
return orchestrator_by_name(orchestrator_name, **orchestrator_kwargs)
|
||||
|
||||
|
||||
def orchestrator_by_name(name, **kwargs):
|
||||
_ORCHESTRATORS = {
|
||||
'etcd': Etcd2Orchestrator,
|
||||
'mem': MemoryOrchestrator,
|
||||
'redis': RedisOrchestrator,
|
||||
}
|
||||
return _ORCHESTRATORS.get(name, MemoryOrchestrator)(**kwargs)
|
||||
|
||||
|
||||
class OrchestratorError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
# TODO: replace with ConnectionError when this codebase is Python 3.
|
||||
class OrchestratorConnectionError(OrchestratorError):
|
||||
pass
|
||||
|
||||
|
||||
@unique
|
||||
class KeyEvent(IntEnum):
|
||||
CREATE = 1
|
||||
SET = 2
|
||||
DELETE = 3
|
||||
EXPIRE = 4
|
||||
|
||||
|
||||
class KeyChange(namedtuple('KeyChange', ['event', 'key', 'value'])):
|
||||
pass
|
||||
|
||||
|
||||
@add_metaclass(ABCMeta)
|
||||
class Orchestrator(object):
|
||||
"""
|
||||
Orchestrator is the interface that is used to synchronize the build states
|
||||
across build managers.
|
||||
|
||||
This interface assumes that storage is being done by a key-value store
|
||||
that supports watching for events on keys.
|
||||
|
||||
Missing keys should return KeyError; otherwise, errors should raise an
|
||||
OrchestratorError.
|
||||
|
||||
:param key_prefix: the prefix of keys being watched
|
||||
:type key_prefix: str
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def on_key_change(self, key, callback, restarter=None):
|
||||
"""
|
||||
|
||||
The callback parameter takes in a KeyChange object as a parameter.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_prefixed_keys(self, prefix):
|
||||
"""
|
||||
|
||||
:returns: a dict of key value pairs beginning with prefix
|
||||
:rtype: {str: str}
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_key(self, key):
|
||||
"""
|
||||
|
||||
:returns: the value stored at the provided key
|
||||
:rtype: str
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def set_key(self, key, value, overwrite=False, expiration=None):
|
||||
"""
|
||||
|
||||
:param key: the identifier for the value
|
||||
:type key: str
|
||||
:param value: the value being stored
|
||||
:type value: str
|
||||
:param overwrite: whether or not a KeyError is thrown if the key already exists
|
||||
:type overwrite: bool
|
||||
:param expiration: the duration in seconds that a key should be available
|
||||
:type expiration: int
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def set_key_sync(self, key, value, overwrite=False, expiration=None):
|
||||
"""
|
||||
set_key, but without trollius coroutines.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def delete_key(self, key):
|
||||
"""
|
||||
Deletes a key that has been set in the orchestrator.
|
||||
|
||||
:param key: the identifier for the key
|
||||
:type key: str
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def lock(self, key, expiration=DEFAULT_LOCK_EXPIRATION):
|
||||
"""
|
||||
Takes a lock for synchronizing exclusive operations cluster-wide.
|
||||
|
||||
:param key: the identifier for the lock
|
||||
:type key: str
|
||||
:param expiration: the duration until the lock expires
|
||||
:type expiration: :class:`datetime.timedelta` or int (seconds)
|
||||
:returns: whether or not the lock was acquired
|
||||
:rtype: bool
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def shutdown():
|
||||
"""
|
||||
This function should shutdown any final resources allocated by the Orchestrator.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
def _sleep_orchestrator():
|
||||
"""
|
||||
This function blocks the trollius event loop by sleeping in order to backoff if a failure
|
||||
such as a ConnectionError has occurred.
|
||||
"""
|
||||
logger.exception('Connecting to etcd failed; sleeping for %s and then trying again',
|
||||
ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION)
|
||||
time.sleep(ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION)
|
||||
logger.exception('Connecting to etcd failed; slept for %s and now trying again',
|
||||
ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION)
|
||||
|
||||
|
||||
class EtcdAction(object):
|
||||
""" Enumeration of the various kinds of etcd actions we can observe via a watch. """
|
||||
GET = 'get'
|
||||
SET = 'set'
|
||||
EXPIRE = 'expire'
|
||||
UPDATE = 'update'
|
||||
DELETE = 'delete'
|
||||
CREATE = 'create'
|
||||
COMPARE_AND_SWAP = 'compareAndSwap'
|
||||
COMPARE_AND_DELETE = 'compareAndDelete'
|
||||
|
||||
|
||||
class Etcd2Orchestrator(Orchestrator):
|
||||
def __init__(self, host='127.0.0.1', port=2379, cert_and_key=None, ca_cert=None,
|
||||
client_threads=5, canceller_only=False, **kwargs):
|
||||
self.is_canceller_only = canceller_only
|
||||
|
||||
logger.debug('initializing async etcd client')
|
||||
self._sync_etcd_client = etcd.Client(
|
||||
host=host,
|
||||
port=port,
|
||||
cert=tuple(cert_and_key) if cert_and_key is not None else None,
|
||||
ca_cert=ca_cert,
|
||||
protocol='http' if cert_and_key is None else 'https',
|
||||
read_timeout=ETCD_READ_TIMEOUT,
|
||||
)
|
||||
|
||||
if not self.is_canceller_only:
|
||||
(self._etcd_client, self._async_executor) = wrap_with_threadpool(self._sync_etcd_client,
|
||||
client_threads)
|
||||
|
||||
logger.debug('creating initial orchestrator state')
|
||||
self._shutting_down = False
|
||||
self._watch_tasks = {}
|
||||
|
||||
@staticmethod
|
||||
def _sanity_check_ttl(ttl):
|
||||
"""
|
||||
A TTL of < 0 in etcd results in the key *never being expired*.
|
||||
We use a max here to ensure that if the TTL is < 0, the key will expire immediately.
|
||||
"""
|
||||
return max(ttl, 0)
|
||||
|
||||
def _watch_etcd(self, key, callback, restarter=None, start_index=None):
|
||||
def callback_wrapper(changed_key_future):
|
||||
new_index = start_index
|
||||
etcd_result = None
|
||||
|
||||
if not changed_key_future.cancelled():
|
||||
try:
|
||||
etcd_result = changed_key_future.result()
|
||||
existing_index = getattr(etcd_result, 'etcd_index', None)
|
||||
new_index = etcd_result.modifiedIndex + 1
|
||||
|
||||
logger.debug('Got watch of key: %s at #%s with result: %s',
|
||||
key, existing_index, etcd_result)
|
||||
|
||||
except ReadTimeoutError:
|
||||
logger.debug('Read-timeout on etcd watch %s, rescheduling', key)
|
||||
|
||||
except etcd.EtcdEventIndexCleared:
|
||||
# This happens if etcd2 has moved forward too fast for us to start watching at the index
|
||||
# we retrieved. We therefore start a new watch at HEAD and (if specified) call the
|
||||
# restarter method which should conduct a read and reset the state of the manager.
|
||||
logger.debug('Etcd moved forward too quickly. Restarting watch cycle.')
|
||||
new_index = None
|
||||
if restarter is not None:
|
||||
async(restarter())
|
||||
|
||||
except (KeyError, etcd.EtcdKeyError):
|
||||
logger.debug('Etcd key already cleared: %s', key)
|
||||
return
|
||||
|
||||
except etcd.EtcdConnectionFailed:
|
||||
_sleep_orchestrator()
|
||||
|
||||
except etcd.EtcdException as eex:
|
||||
# TODO: This is a quick and dirty hack and should be replaced with a proper
|
||||
# exception check.
|
||||
if str(eex.message).find('Read timed out') >= 0:
|
||||
logger.debug('Read-timeout on etcd watch %s, rescheduling', key)
|
||||
else:
|
||||
logger.exception('Exception on etcd watch: %s', key)
|
||||
|
||||
except ProtocolError:
|
||||
logger.exception('Exception on etcd watch: %s', key)
|
||||
|
||||
if key not in self._watch_tasks or self._watch_tasks[key].done():
|
||||
self._watch_etcd(key, callback, start_index=new_index, restarter=restarter)
|
||||
|
||||
if etcd_result and etcd_result.value is not None:
|
||||
async(callback(self._etcd_result_to_keychange(etcd_result)))
|
||||
|
||||
if not self._shutting_down:
|
||||
logger.debug('Scheduling watch of key: %s at start index %s', key, start_index)
|
||||
watch_future = self._etcd_client.watch(key, recursive=True, index=start_index,
|
||||
timeout=ETCD_MAX_WATCH_TIMEOUT)
|
||||
watch_future.add_done_callback(callback_wrapper)
|
||||
|
||||
self._watch_tasks[key] = async(watch_future)
|
||||
|
||||
@staticmethod
|
||||
def _etcd_result_to_keychange(etcd_result):
|
||||
event = Etcd2Orchestrator._etcd_result_to_keyevent(etcd_result)
|
||||
return KeyChange(event, etcd_result.key, etcd_result.value)
|
||||
|
||||
@staticmethod
|
||||
def _etcd_result_to_keyevent(etcd_result):
|
||||
if etcd_result.action == EtcdAction.CREATE:
|
||||
return KeyEvent.CREATE
|
||||
if etcd_result.action == EtcdAction.SET:
|
||||
return KeyEvent.CREATE if etcd_result.createdIndex == etcd_result.modifiedIndex else KeyEvent.SET
|
||||
if etcd_result.action == EtcdAction.DELETE:
|
||||
return KeyEvent.DELETE
|
||||
if etcd_result.action == EtcdAction.EXPIRE:
|
||||
return KeyEvent.EXPIRE
|
||||
raise AssertionError('etcd action must have equivalant keyevent')
|
||||
|
||||
def on_key_change(self, key, callback, restarter=None):
|
||||
assert not self.is_canceller_only
|
||||
|
||||
logger.debug('creating watch on %s', key)
|
||||
self._watch_etcd(key, callback, restarter=restarter)
|
||||
|
||||
@coroutine
|
||||
def get_prefixed_keys(self, prefix):
|
||||
assert not self.is_canceller_only
|
||||
|
||||
try:
|
||||
etcd_result = yield From(self._etcd_client.read(prefix, recursive=True))
|
||||
raise Return({leaf.key: leaf.value for leaf in etcd_result.leaves})
|
||||
except etcd.EtcdKeyError:
|
||||
raise KeyError
|
||||
except etcd.EtcdConnectionFailed as ex:
|
||||
raise OrchestratorConnectionError(ex)
|
||||
except etcd.EtcdException as ex:
|
||||
raise OrchestratorError(ex)
|
||||
|
||||
@coroutine
|
||||
def get_key(self, key):
|
||||
assert not self.is_canceller_only
|
||||
|
||||
try:
|
||||
# Ignore pylint: the value property on EtcdResult is added dynamically using setattr.
|
||||
etcd_result = yield From(self._etcd_client.read(key))
|
||||
raise Return(etcd_result.value)
|
||||
except etcd.EtcdKeyError:
|
||||
raise KeyError
|
||||
except etcd.EtcdConnectionFailed as ex:
|
||||
raise OrchestratorConnectionError(ex)
|
||||
except etcd.EtcdException as ex:
|
||||
raise OrchestratorError(ex)
|
||||
|
||||
@coroutine
|
||||
def set_key(self, key, value, overwrite=False, expiration=None):
|
||||
assert not self.is_canceller_only
|
||||
|
||||
yield From(self._etcd_client.write(key, value, prevExists=overwrite,
|
||||
ttl=self._sanity_check_ttl(expiration)))
|
||||
|
||||
def set_key_sync(self, key, value, overwrite=False, expiration=None):
|
||||
self._sync_etcd_client.write(key, value, prevExists=overwrite,
|
||||
ttl=self._sanity_check_ttl(expiration))
|
||||
|
||||
@coroutine
|
||||
def delete_key(self, key):
|
||||
assert not self.is_canceller_only
|
||||
|
||||
try:
|
||||
yield From(self._etcd_client.delete(key))
|
||||
except etcd.EtcdKeyError:
|
||||
raise KeyError
|
||||
except etcd.EtcdConnectionFailed as ex:
|
||||
raise OrchestratorConnectionError(ex)
|
||||
except etcd.EtcdException as ex:
|
||||
raise OrchestratorError(ex)
|
||||
|
||||
@coroutine
|
||||
def lock(self, key, expiration=DEFAULT_LOCK_EXPIRATION):
|
||||
assert not self.is_canceller_only
|
||||
|
||||
try:
|
||||
yield From(self._etcd_client.write(key, {}, prevExist=False,
|
||||
ttl=self._sanity_check_ttl(expiration)))
|
||||
raise Return(True)
|
||||
except (KeyError, etcd.EtcdKeyError):
|
||||
raise Return(False)
|
||||
except etcd.EtcdConnectionFailed:
|
||||
logger.exception('Could not get etcd atomic lock as etcd is down')
|
||||
raise Return(False)
|
||||
except etcd.EtcdException as ex:
|
||||
raise OrchestratorError(ex)
|
||||
|
||||
def shutdown(self):
|
||||
logger.debug('Shutting down etcd client.')
|
||||
self._shutting_down = True
|
||||
|
||||
if self.is_canceller_only:
|
||||
return
|
||||
|
||||
for (key, _), task in self._watch_tasks.items():
|
||||
if not task.done():
|
||||
logger.debug('Canceling watch task for %s', key)
|
||||
task.cancel()
|
||||
|
||||
if self._async_executor is not None:
|
||||
self._async_executor.shutdown()
|
||||
|
||||
|
||||
class MemoryOrchestrator(Orchestrator):
|
||||
def __init__(self, **kwargs):
|
||||
self.state = ExpiresDict()
|
||||
self.callbacks = {}
|
||||
|
||||
def _callbacks_prefixed(self, prefix):
|
||||
return (callback for (key, callback) in iteritems(self.callbacks)
|
||||
if key.startswith(prefix))
|
||||
|
||||
def on_key_change(self, key, callback, restarter=None):
|
||||
self.callbacks[key] = callback
|
||||
|
||||
@coroutine
|
||||
def get_prefixed_keys(self, prefix):
|
||||
raise Return({k: value for (k, value) in self.state.items()
|
||||
if k.startswith(prefix)})
|
||||
|
||||
@coroutine
|
||||
def get_key(self, key):
|
||||
raise Return(self.state[key])
|
||||
|
||||
@coroutine
|
||||
def set_key(self, key, value, overwrite=False, expiration=None):
|
||||
preexisting_key = 'key' in self.state
|
||||
if preexisting_key and not overwrite:
|
||||
raise KeyError
|
||||
|
||||
absolute_expiration = None
|
||||
if expiration is not None:
|
||||
absolute_expiration = datetime.datetime.now() + datetime.timedelta(seconds=expiration)
|
||||
|
||||
self.state.set(key, value, expires=absolute_expiration)
|
||||
|
||||
event = KeyEvent.CREATE if not preexisting_key else KeyEvent.SET
|
||||
for callback in self._callbacks_prefixed(key):
|
||||
yield From(callback(KeyChange(event, key, value)))
|
||||
|
||||
def set_key_sync(self, key, value, overwrite=False, expiration=None):
|
||||
"""
|
||||
set_key, but without trollius coroutines.
|
||||
"""
|
||||
preexisting_key = 'key' in self.state
|
||||
if preexisting_key and not overwrite:
|
||||
raise KeyError
|
||||
|
||||
absolute_expiration = None
|
||||
if expiration is not None:
|
||||
absolute_expiration = datetime.datetime.now() + datetime.timedelta(seconds=expiration)
|
||||
|
||||
self.state.set(key, value, expires=absolute_expiration)
|
||||
|
||||
event = KeyEvent.CREATE if not preexisting_key else KeyEvent.SET
|
||||
for callback in self._callbacks_prefixed(key):
|
||||
callback(KeyChange(event, key, value))
|
||||
|
||||
@coroutine
|
||||
def delete_key(self, key):
|
||||
value = self.state[key]
|
||||
del self.state[key]
|
||||
|
||||
for callback in self._callbacks_prefixed(key):
|
||||
yield From(callback(KeyChange(KeyEvent.DELETE, key, value)))
|
||||
|
||||
@coroutine
|
||||
def lock(self, key, expiration=DEFAULT_LOCK_EXPIRATION):
|
||||
if key in self.state:
|
||||
raise Return(False)
|
||||
self.state.set(key, None, expires=expiration)
|
||||
raise Return(True)
|
||||
|
||||
def shutdown(self):
|
||||
self.state = None
|
||||
self.callbacks = None
|
||||
|
||||
|
||||
class RedisOrchestrator(Orchestrator):
|
||||
def __init__(self, host='127.0.0.1', port=6379, password=None, db=0, cert_and_key=None,
|
||||
ca_cert=None, client_threads=5, ssl=False, skip_keyspace_event_setup=False,
|
||||
canceller_only=False, **kwargs):
|
||||
self.is_canceller_only = canceller_only
|
||||
(cert, key) = tuple(cert_and_key) if cert_and_key is not None else (None, None)
|
||||
self._sync_client = redis.StrictRedis(
|
||||
host=host,
|
||||
port=port,
|
||||
password=password,
|
||||
db=db,
|
||||
ssl_certfile=cert,
|
||||
ssl_keyfile=key,
|
||||
ssl_ca_certs=ca_cert,
|
||||
ssl=ssl,
|
||||
)
|
||||
|
||||
self._shutting_down = False
|
||||
self._tasks = {}
|
||||
self._watched_keys = {}
|
||||
self._pubsub_key = slash_join(kwargs.get('orchestrator_prefix', ''),
|
||||
REDIS_DEFAULT_PUBSUB_KEY).lstrip('/')
|
||||
|
||||
if not self.is_canceller_only:
|
||||
(self._client, self._async_executor) = wrap_with_threadpool(self._sync_client, client_threads)
|
||||
|
||||
# Configure a subscription to watch events that the orchestrator manually publishes.
|
||||
logger.debug('creating pubsub with key %s', self._pubsub_key)
|
||||
published_pubsub = self._sync_client.pubsub()
|
||||
published_pubsub.subscribe(self._pubsub_key)
|
||||
(self._pubsub, self._async_executor_pub) = wrap_with_threadpool(published_pubsub)
|
||||
self._watch_published_key()
|
||||
|
||||
# Configure a subscription to watch expired keyspace events.
|
||||
if not skip_keyspace_event_setup:
|
||||
self._sync_client.config_set(REDIS_KEYSPACE_EVENT_CONFIG_KEY,
|
||||
REDIS_KEYSPACE_EVENT_CONFIG_VALUE)
|
||||
|
||||
expiring_pubsub = self._sync_client.pubsub()
|
||||
expiring_pubsub.psubscribe(REDIS_EXPIRED_KEYSPACE_PATTERN % (db, '*'))
|
||||
(self._pubsub_expiring, self._async_executor_ex) = wrap_with_threadpool(expiring_pubsub)
|
||||
self._watch_expiring_key()
|
||||
|
||||
def _watch_published_key(self):
|
||||
def published_callback_wrapper(event_future):
|
||||
logger.debug('published callback called')
|
||||
event_result = None
|
||||
|
||||
if not event_future.cancelled():
|
||||
try:
|
||||
event_result = event_future.result()
|
||||
(redis_event, event_key, event_value) = event_result
|
||||
logger.debug('Got watch of key: (%s, %s, %s)', redis_event, event_key, event_value)
|
||||
except redis.ConnectionError:
|
||||
_sleep_orchestrator()
|
||||
except redis.RedisError:
|
||||
logger.exception('Exception watching redis publish: %s', event_key)
|
||||
|
||||
# Schedule creating a new future if this one has been consumed.
|
||||
if 'pub' not in self._tasks or self._tasks['pub'].done():
|
||||
self._watch_published_key()
|
||||
|
||||
if event_result is not None and redis_event == REDIS_EVENT_KIND_MESSAGE:
|
||||
keychange = self._publish_to_keychange(event_value)
|
||||
for watched_key, callback in iteritems(self._watched_keys):
|
||||
if keychange.key.startswith(watched_key):
|
||||
async(callback(keychange))
|
||||
|
||||
if not self._shutting_down:
|
||||
logger.debug('Scheduling watch of publish stream')
|
||||
watch_future = self._pubsub.parse_response()
|
||||
watch_future.add_done_callback(published_callback_wrapper)
|
||||
self._tasks['pub'] = async(watch_future)
|
||||
|
||||
def _watch_expiring_key(self):
|
||||
def expiring_callback_wrapper(event_future):
|
||||
logger.debug('expiring callback called')
|
||||
event_result = None
|
||||
|
||||
if not event_future.cancelled():
|
||||
try:
|
||||
event_result = event_future.result()
|
||||
if self._is_expired_keyspace_event(event_result):
|
||||
# Get the value of the original key before the expiration happened.
|
||||
key = self._key_from_expiration(event_future)
|
||||
expired_value = yield From(self._client.get(key))
|
||||
|
||||
# $KEY/expiring is gone, but the original key still remains, set an expiration for it
|
||||
# so that other managers have time to get the event and still read the expired value.
|
||||
yield From(self._client.expire(key, ONE_DAY))
|
||||
except redis.ConnectionError:
|
||||
_sleep_orchestrator()
|
||||
except redis.RedisError:
|
||||
logger.exception('Exception watching redis expirations: %s', key)
|
||||
|
||||
# Schedule creating a new future if this one has been consumed.
|
||||
if 'expire' not in self._tasks or self._tasks['expire'].done():
|
||||
self._watch_expiring_key()
|
||||
|
||||
if self._is_expired_keyspace_event(event_result) and expired_value is not None:
|
||||
for watched_key, callback in iteritems(self._watched_keys):
|
||||
if key.startswith(watched_key):
|
||||
async(callback(KeyChange(KeyEvent.EXPIRE, key, expired_value)))
|
||||
|
||||
if not self._shutting_down:
|
||||
logger.debug('Scheduling watch of expiration')
|
||||
watch_future = self._pubsub_expiring.parse_response()
|
||||
watch_future.add_done_callback(expiring_callback_wrapper)
|
||||
self._tasks['expire'] = async(watch_future)
|
||||
|
||||
def on_key_change(self, key, callback, restarter=None):
|
||||
assert not self.is_canceller_only
|
||||
|
||||
logger.debug('watching key: %s', key)
|
||||
self._watched_keys[key] = callback
|
||||
|
||||
@staticmethod
|
||||
def _is_expired_keyspace_event(event_result):
|
||||
"""
|
||||
Sanity check that this isn't an unrelated keyspace event.
|
||||
There could be a more efficient keyspace event config to avoid this client-side filter.
|
||||
"""
|
||||
if event_result is None:
|
||||
return False
|
||||
|
||||
(redis_event, _pattern, matched_key, expired) = event_result
|
||||
return (redis_event == REDIS_EVENT_KIND_PMESSAGE and
|
||||
expired == 'expired' and
|
||||
REDIS_EXPIRED_KEYSPACE_REGEX.match(matched_key) is not None)
|
||||
|
||||
@staticmethod
|
||||
def _key_from_expiration(event_result):
|
||||
(_redis_event, _pattern, matched_key, _expired) = event_result
|
||||
return REDIS_EXPIRED_KEYSPACE_REGEX.match(matched_key).groups()[1]
|
||||
|
||||
@staticmethod
|
||||
def _publish_to_keychange(event_value):
|
||||
e = json.loads(event_value)
|
||||
return KeyChange(KeyEvent(e['event']), e['key'], e['value'])
|
||||
|
||||
@coroutine
|
||||
def get_prefixed_keys(self, prefix):
|
||||
assert not self.is_canceller_only
|
||||
|
||||
# TODO: This can probably be done with redis pipelines to make it transactional.
|
||||
keys = yield From(self._client.keys(prefix + '*'))
|
||||
|
||||
# Yielding to the event loop is required, thus this cannot be written as a dict comprehension.
|
||||
results = {}
|
||||
for key in keys:
|
||||
if key.endswith(REDIS_EXPIRING_SUFFIX):
|
||||
continue
|
||||
ttl = yield From(self._client.ttl(key))
|
||||
if ttl != REDIS_NONEXPIRING_KEY:
|
||||
# Only redis keys without expirations are live build manager keys.
|
||||
value = yield From(self._client.get(key))
|
||||
results.update({key: value})
|
||||
|
||||
raise Return(results)
|
||||
|
||||
@coroutine
|
||||
def get_key(self, key):
|
||||
assert not self.is_canceller_only
|
||||
|
||||
value = yield From(self._client.get(key))
|
||||
raise Return(value)
|
||||
|
||||
@coroutine
|
||||
def set_key(self, key, value, overwrite=False, expiration=None):
|
||||
assert not self.is_canceller_only
|
||||
|
||||
already_exists = yield From(self._client.exists(key))
|
||||
|
||||
yield From(self._client.set(key, value, xx=overwrite))
|
||||
if expiration is not None:
|
||||
yield From(self._client.set(slash_join(key, REDIS_EXPIRING_SUFFIX), value,
|
||||
xx=overwrite, ex=expiration))
|
||||
|
||||
key_event = KeyEvent.SET if already_exists else KeyEvent.CREATE
|
||||
yield From(self._publish(event=key_event, key=key, value=value))
|
||||
|
||||
def set_key_sync(self, key, value, overwrite=False, expiration=None):
|
||||
already_exists = self._sync_client.exists(key)
|
||||
|
||||
self._sync_client.set(key, value, xx=overwrite)
|
||||
if expiration is not None:
|
||||
self._sync_client.set(slash_join(key, REDIS_EXPIRING_SUFFIX), value,
|
||||
xx=overwrite, ex=expiration)
|
||||
|
||||
self._sync_client.publish(self._pubsub_key, json.dumps({
|
||||
'event': int(KeyEvent.SET if already_exists else KeyEvent.CREATE),
|
||||
'key': key,
|
||||
'value': value,
|
||||
}))
|
||||
|
||||
@coroutine
|
||||
def _publish(self, **kwargs):
|
||||
kwargs['event'] = int(kwargs['event'])
|
||||
event_json = json.dumps(kwargs)
|
||||
logger.debug('publishing event: %s', event_json)
|
||||
yield From(self._client.publish(self._pubsub_key, event_json))
|
||||
|
||||
@coroutine
|
||||
def delete_key(self, key):
|
||||
assert not self.is_canceller_only
|
||||
|
||||
value = yield From(self._client.get(key))
|
||||
yield From(self._client.delete(key))
|
||||
yield From(self._client.delete(slash_join(key, REDIS_EXPIRING_SUFFIX)))
|
||||
yield From(self._publish(event=KeyEvent.DELETE, key=key, value=value))
|
||||
|
||||
@coroutine
|
||||
def lock(self, key, expiration=DEFAULT_LOCK_EXPIRATION):
|
||||
assert not self.is_canceller_only
|
||||
|
||||
yield From(self.set_key(key, '', ex=expiration))
|
||||
raise Return(True)
|
||||
|
||||
@coroutine
|
||||
def shutdown(self):
|
||||
logger.debug('Shutting down redis client.')
|
||||
|
||||
self._shutting_down = True
|
||||
|
||||
if self.is_canceller_only:
|
||||
return
|
||||
|
||||
for key, task in iteritems(self._tasks):
|
||||
if not task.done():
|
||||
logger.debug('Canceling watch task for %s', key)
|
||||
task.cancel()
|
||||
|
||||
if self._async_executor is not None:
|
||||
self._async_executor.shutdown()
|
||||
if self._async_executor_ex is not None:
|
||||
self._async_executor_ex.shutdown()
|
||||
if self._async_executor_pub is not None:
|
||||
self._async_executor_pub.shutdown()
|
|
@ -14,9 +14,10 @@ from flask import Flask
|
|||
from buildman.enums import BuildJobResult, BuildServerStatus, RESULT_PHASES
|
||||
from buildman.jobutil.buildstatus import StatusHandler
|
||||
from buildman.jobutil.buildjob import BuildJob, BuildJobLoadException
|
||||
from data import database
|
||||
from data import database, model
|
||||
from app import app, metric_queue
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
WORK_CHECK_TIMEOUT = 10
|
||||
|
@ -27,6 +28,7 @@ MINIMUM_JOB_EXTENSION = timedelta(minutes=1)
|
|||
|
||||
HEARTBEAT_PERIOD_SEC = 30
|
||||
|
||||
|
||||
class BuilderServer(object):
|
||||
""" Server which handles both HTTP and WAMP requests, managing the full state of the build
|
||||
controller.
|
||||
|
@ -130,7 +132,7 @@ class BuilderServer(object):
|
|||
|
||||
def _unregister_component(self, component):
|
||||
logger.debug('Unregistering component with realm %s and token %s',
|
||||
component.builder_realm, component.expected_token)
|
||||
component.builder_realm, component.expected_token)
|
||||
|
||||
self._realm_map.pop(component.builder_realm, None)
|
||||
|
||||
|
@ -151,6 +153,11 @@ class BuilderServer(object):
|
|||
else:
|
||||
self._queue.complete(build_job.job_item)
|
||||
|
||||
# Update the trigger failure tracking (if applicable).
|
||||
if build_job.repo_build.trigger is not None:
|
||||
model.build.update_trigger_disable_status(build_job.repo_build.trigger,
|
||||
RESULT_PHASES[job_status])
|
||||
|
||||
if update_phase:
|
||||
status_handler = StatusHandler(self._build_logs, build_job.repo_build.uuid)
|
||||
yield From(status_handler.set_phase(RESULT_PHASES[job_status]))
|
||||
|
@ -181,7 +188,7 @@ class BuilderServer(object):
|
|||
try:
|
||||
build_job = BuildJob(job_item)
|
||||
except BuildJobLoadException as irbe:
|
||||
logger.warning('[BUILD INCOMPLETE: job load exception] Jon data: %s. No retry restore.',
|
||||
logger.warning('[BUILD INCOMPLETE: job load exception] Job data: %s. No retry restore.',
|
||||
job_item.body)
|
||||
logger.exception(irbe)
|
||||
self._queue.incomplete(job_item, restore_retry=False)
|
||||
|
|
|
@ -7,16 +7,12 @@ users:
|
|||
- sudo
|
||||
- docker
|
||||
|
||||
{% if ssh_authorized_keys -%}
|
||||
ssh_authorized_keys:
|
||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCC0m+hVmyR3vn/xoxJe9+atRWBxSK+YXgyufNVDMcb7H00Jfnc341QH3kDVYZamUbhVh/nyc2RP7YbnZR5zORFtgOaNSdkMYrPozzBvxjnvSUokkCCWbLqXDHvIKiR12r+UTSijPJE/Yk702Mb2ejAFuae1C3Ec+qKAoOCagDjpQ3THyb5oaKE7VPHdwCWjWIQLRhC+plu77ObhoXIFJLD13gCi01L/rp4mYVCxIc2lX5A8rkK+bZHnIZwWUQ4t8SIjWxIaUo0FE7oZ83nKuNkYj5ngmLHQLY23Nx2WhE9H6NBthUpik9SmqQPtVYbhIG+bISPoH9Xs8CLrFb0VRjz JS Key
|
||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCo6FhAP7mFFOAzM91gtaKW7saahtaN4lur42FMMztz6aqUycIltCmvxo+3FmrXgCG30maMNU36Vm1+9QRtVQEd+eRuoIWP28t+8MT01Fh4zPuE2Wca3pOHSNo3X81FfWJLzmwEHiQKs9HPQqUhezR9PcVWVkbMyAzw85c0UycGmHGFNb0UiRd9HFY6XbgbxhZv/mvKLZ99xE3xkOzS1PNsdSNvjUKwZR7pSUPqNS5S/1NXyR4GhFTU24VPH/bTATOv2ATH+PSzsZ7Qyz9UHj38tKC+ALJHEDJ4HXGzobyOUP78cHGZOfCB5FYubq0zmOudAjKIAhwI8XTFvJ2DX1P3 JZ Key
|
||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDNvw8qo9m8np7yQ/Smv/oklM8bo8VyNRZriGYBDuolWDL/mZpYCQnZJXphQo7RFdNABYistikjJlBuuwUohLf2uSq0iKoFa2TgwI43wViWzvuzU4nA02/ITD5BZdmWAFNyIoqeB50Ol4qUgDwLAZ+7Kv7uCi6chcgr9gTi99jY3GHyZjrMiXMHGVGi+FExFuzhVC2drKjbz5q6oRfQeLtNfG4psl5GU3MQU6FkX4fgoCx0r9R48/b7l4+TT7pWblJQiRfeldixu6308vyoTUEHasdkU3/X0OTaGz/h5XqTKnGQc6stvvoED3w+L3QFp0H5Z8sZ9stSsitmCBrmbcKZ JM Key
|
||||
- ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAgEAo/JkbGO6R7g1ZxARi0xWVM7FOfN02snRAcIO6vT9M7xMUkWVLgD+hM/o91lk+UFiYdql0CATobpFWncRL36KaUqsbw9/1BlI40wg296XHXSSnxhxZ4L7ytf6G1tyN319HXlI2kh9vAf/fy++yDvkH8dI3k1oLoW+mZPET6Pff04/6AXXrRlS5mhmGv9irGwiDHtVKpj6lU8DN/UtOrv1tiQ0pgwEJq05fLGoQfgPNaBCnW2z4Ubpn2gyMcMBMpSwo4hCqJePd349e4bLmFcT+gXYg7Mnup1DoTDlowFFN56wpxQbdp96IxWzU+jYPaIAuRo+BJzCyOS8qBv0Z4RZrgop0qp2JYiVwmViO6TZhIDz6loQJXUOIleQmNgTbiZx8Bwv5GY2jMYoVwlBp7yy5bRjxfbFsJ0vU7TVzNAG7oEJy/74HmHmWzRQlSlQjesr8gRbm9zgR8wqc/L107UOWFg7Cgh8ZNjKuADbXqYuda1Y9m2upcfS26UPz5l5PW5uFRMHZSi8pb1XV6/0Z8H8vwsh37Ur6aLi/5jruRmKhdlsNrB1IiDicBsPW3yg7HHSIdPU4oBNPC77yDCT3l4CKr4el81RrZt7FbJPfY+Ig9Q5O+05f6I8+ZOlJGyZ/Qfyl2aVm1HnlJKuBqPxeic8tMng/9B5N7uZL6Y3k5jFU8c= QM Key
|
||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC964SY8ojXZVfWknF+Pz+pTHpyb66VBH7OLYnGP+Tm452YKJVFb/rXCpZYHFlzSQtzz9hko8qBoEFXuD2humojx0P7nEtTy8wUClnKcifIqD5b/V1r7ZDa/5hL9Xog11gOXZ17TW1qjN+00qgXwoSh+jM8mAxD7V2ZLnanIDqmpYamT3ZlICz1k4bwYj35gnpSFpijAXeF9LXOEUfDtzNBjeaCvyniYlQyKzpKr8x+oIHumPlxwkFOzGhBMRGrCQ1Kzija8vVZQ6/Tjvxl19jwfgcNT0Zd9vLbHNowJPWQZhLYXdGIb3NxEfAqkGPvGCsaLfsfETYhcFwxr2g+zvf4xvyKgK35PHA/5t7TQryDSKDrQ1qTDUp3dAjzwsBFwEoQ0x68shGC661n/+APMNtj8qR5M9ueIH5WEqdRW10kKzlEm/ESvjyjEVRhXiwWyKkPch/OIUPKexKaEeOBdKocSnNx1+5ntk8OXWRQgjfwtQvm1NE/qD7fViBVUlTRk0c1SVpZaybIZkiMWmA1hzsdUbDP2mzPek1ydsVffw0I8z/dRo5gXQSPq06WfNIKpsiQF8LqP+KU+462A2tbHxFzq9VozI9PeFV+xO59wlJogv6q2yA0Jfv9BFgVgNzItIsUMvStrfkUBTYgaG9djp/vAm+SwMdnLSXILJtMO/3eRQ== EC Key
|
||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC3Q9+JcjEck8CylGEekvskypE8lT3hYnCCfGUoMTAURokD8STtEaVxr197efitQkvwSYxOnDXo2Qr59FqlQ6QtFeCynX87VerN49LJ0pUA1NoYBUCvWRzwpaa8CXGhYPRpfku12mJ0qjqmGFaR5jqhXTNfXmRcWePsXqS+b3FFEqw8BhKg6By1z7NLvKeaEno4Kd0wPpxzs+hFRnk38k2p+1YO1vZzZ2mgEVp9/2577t4TmP8ucnsb9X4vURRpOJwjG8HIgmmQFUVxHRST8Zu3zOXfg9Yv/n3JYhXhvvPxkV4JB6ZbVq0cLHasexFAxz7nTmF1gDWaPbGxmdZtaDe/ CH Key
|
||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfuDFmwNaY2WlwVlGeG1pvGiU5KfqMbTwo38hO5bm3KutJtNe9Q2GgKXKbD4WCrpsa3QZPENzGWvkctORzaZNxQ8S4FxUV5M5NEVMs0vKa4a8TksqhoARP7eetvRF6leYtVYhtUyDmj1YzxJEMRbbs3SFhcSkA7HyWDAIi8rc4WCg+BDpmCyEshuuBE26+1g2R5lJTwVwmgMHs7p59Gop1Hbn33DNQyj9S8u24DxCJpnzkjegWiU4GA+pesgeWymxYhAKDfb2yWR6aBAvnZEn10evIfe9ORpnexmko4/DBgeweISCm16ffVhya4qNBrUxThKJU4286zwq/d0mDDU8x BI Key
|
||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4uDl4WGNgsIjGeJbUYFKSn3nhdiZJHUE47JK3W6VTfWpd1JNWNZ1CW0mJ+y7dQl0vmQq5DHQguYQLdTP4m8Waswh/9ckoX7tErA2FEZUQTmUrpeXrc8n2E8OeHh1ooqyWXP5Oup3MKA7qwMrkktM+m/MEhVhg0GUgsMd5BriePDgpdlOblEVZx+5IY3/PJc3ng+PmJbHfCds6+HgPR2tY2n6y4Ir7+15mZWjpLo6BOZlHmWAsqb8lfyp+8hrkfr4bKhY3AE2SQCqkF5LGgI84gJ5ooFN0bL9rl3bW4UNCqSiYH/QjLH+yzs55/BRBpV89mqDyDFHvsJUXta/Vz/UJ CA Key
|
||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCs9jVbzOkDg60i+TGkETit/K9h8iBkwapRa2XURJzdYKcE27fYueX37mOdTBVCi3phOV4cWzkjRwtQBz7KCMBqrr1gLaIsuUIqeFpskTuTr9k7XgZqZ6QpECrqDy9HgCLdZO40sYCOvpw+GzehlsZPZEHRROotXCKc3k98Vlb8+1QPa4s5iZrIIdFyq3ZyhoupcN2nIwMh0GnkvgwS2DymGeLd8tziI8+ti8dxWSvgILaPplv2JTf/iqRsE3xtbtjE0tSf8VyfTLIBv+hyW79Hvaf/pvrsADwJf43IWmdOwHpYNhqR/kvx6j0LkPfxWq+rtXG3Q4JqWi4nZz5w3VTH1KImMBGil2sK1AiCwEUSQzQs2apTivfTy25HFLtje6qB8ZkvelK2lOGI62gdWiOOknYn3VpfMdrPDLGNoTnntrcG/UbJoa911IxilP4idbUxXQdyIzYr6BJJccCFiLVECPHoOaDsZ0abkBvrewp+1hqsvL7zRs4EvbI7Cfvcnf9hZd+n20Bp250GbcH0HD4/9d2DMIU6c6rAjmglPfVmyphcRruWdyCZz+ps9cfpVCQSSGSnbGS7T3M4VIXrCtjNZ7Fv7YIJ8EXWlhkNEfOYuy/lhfvyMLrp5abg5HkXSgOA3kfyitLnBN/lJODSUguDPmpo7tyjplEFQ70LYxJczw== EvB Key
|
||||
{% for ssh_key in ssh_authorized_keys -%}
|
||||
- {{ ssh_key }}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
|
||||
write_files:
|
||||
- path: /root/disable-aws-metadata.sh
|
||||
|
@ -63,7 +59,7 @@ coreos:
|
|||
[Install]
|
||||
WantedBy=sockets.target
|
||||
{{ dockersystemd('quay-builder',
|
||||
'quay.io/coreos/registry-build-worker',
|
||||
worker_image,
|
||||
quay_username,
|
||||
quay_password,
|
||||
worker_tag,
|
||||
|
@ -103,4 +99,4 @@ coreos:
|
|||
Description=Machine Lifetime Service
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/bin/sh -xc "/bin/sleep 10800; /usr/bin/systemctl --no-block poweroff"
|
||||
ExecStart=/bin/sh -xc "/bin/sleep {{ max_lifetime_s }}; /usr/bin/systemctl --no-block poweroff"
|
||||
|
|
|
@ -1,21 +1,22 @@
|
|||
import unittest
|
||||
import etcd
|
||||
import time
|
||||
import json
|
||||
import uuid
|
||||
import os
|
||||
|
||||
from mock import Mock, ANY
|
||||
from six import iteritems
|
||||
from trollius import coroutine, get_event_loop, From, Future, Return
|
||||
from mock import Mock, ANY, call
|
||||
|
||||
from buildman.manager.executor import BuilderExecutor, ExecutorException
|
||||
from buildman.manager.ephemeral import (EphemeralBuilderManager, EtcdAction,
|
||||
ETCD_MAX_WATCH_TIMEOUT)
|
||||
from buildman.component.buildcomponent import BuildComponent
|
||||
from buildman.server import BuildJobResult
|
||||
from buildman.asyncutil import AsyncWrapper
|
||||
from util.metrics.metricqueue import duration_collector_async
|
||||
from app import metric_queue
|
||||
from buildman.asyncutil import AsyncWrapper
|
||||
from buildman.component.buildcomponent import BuildComponent
|
||||
from buildman.manager.ephemeral import (EphemeralBuilderManager, REALM_PREFIX,
|
||||
JOB_PREFIX)
|
||||
from buildman.manager.executor import BuilderExecutor, ExecutorException
|
||||
from buildman.orchestrator import KeyEvent, KeyChange
|
||||
from buildman.server import BuildJobResult
|
||||
from util import slash_join
|
||||
from util.metrics.metricqueue import duration_collector_async
|
||||
|
||||
|
||||
BUILD_UUID = 'deadbeef-dead-beef-dead-deadbeefdead'
|
||||
REALM_ID = '1234-realm'
|
||||
|
@ -45,7 +46,6 @@ class TestExecutor(BuilderExecutor):
|
|||
self.job_stopped = execution_id
|
||||
|
||||
|
||||
|
||||
class BadExecutor(BuilderExecutor):
|
||||
@coroutine
|
||||
@duration_collector_async(metric_queue.builder_time_to_start, labelvalues=["testlabel"])
|
||||
|
@ -58,19 +58,8 @@ class EphemeralBuilderTestCase(unittest.TestCase):
|
|||
self.etcd_client_mock = None
|
||||
super(EphemeralBuilderTestCase, self).__init__(*args, **kwargs)
|
||||
|
||||
def _create_mock_etcd_client(self, *args, **kwargs):
|
||||
def create_future(*args, **kwargs):
|
||||
return Future()
|
||||
|
||||
self.etcd_client_mock = Mock(spec=etcd.Client, name='etcd.Client')
|
||||
self.etcd_client_mock.read = Mock(side_effect=KeyError)
|
||||
self.etcd_client_mock.delete = Mock(side_effect=self._create_completed_future())
|
||||
self.etcd_client_mock.watch = Mock(side_effect=create_future)
|
||||
self.etcd_client_mock.write = Mock(side_effect=self._create_completed_future('some_exec_id'))
|
||||
|
||||
return (self.etcd_client_mock, None)
|
||||
|
||||
def _create_completed_future(self, result=None):
|
||||
@staticmethod
|
||||
def _create_completed_future(result=None):
|
||||
def inner(*args, **kwargs):
|
||||
new_future = Future()
|
||||
new_future.set_result(result)
|
||||
|
@ -89,12 +78,10 @@ class EphemeralBuilderTestCase(unittest.TestCase):
|
|||
|
||||
def _create_build_job(self, namespace='namespace', retries=3):
|
||||
mock_job = Mock()
|
||||
mock_job.job_details = {
|
||||
'build_uuid': BUILD_UUID,
|
||||
}
|
||||
mock_job.job_details = {'build_uuid': BUILD_UUID}
|
||||
mock_job.job_item = {
|
||||
'body': json.dumps(mock_job.job_details),
|
||||
'id': 1,
|
||||
'body': json.dumps(mock_job.job_details),
|
||||
'id': 1,
|
||||
}
|
||||
|
||||
mock_job.namespace = namespace
|
||||
|
@ -103,7 +90,6 @@ class EphemeralBuilderTestCase(unittest.TestCase):
|
|||
return mock_job
|
||||
|
||||
|
||||
|
||||
class TestEphemeralLifecycle(EphemeralBuilderTestCase):
|
||||
""" Tests the various lifecycles of the ephemeral builder and its interaction with etcd. """
|
||||
|
||||
|
@ -123,6 +109,7 @@ class TestEphemeralLifecycle(EphemeralBuilderTestCase):
|
|||
self.test_executor = Mock(spec=BuilderExecutor)
|
||||
self.test_executor.start_builder = Mock(side_effect=self._create_completed_future('123'))
|
||||
self.test_executor.stop_builder = Mock(side_effect=self._create_completed_future())
|
||||
self.test_executor.setup_time = 60
|
||||
self.test_executor.name = 'MockExecutor'
|
||||
self.test_executor.minimum_retry_threshold = 0
|
||||
return self.test_executor
|
||||
|
@ -144,21 +131,20 @@ class TestEphemeralLifecycle(EphemeralBuilderTestCase):
|
|||
self.job_complete_callback,
|
||||
'127.0.0.1',
|
||||
30,
|
||||
etcd_creator=self._create_mock_etcd_client,
|
||||
)
|
||||
|
||||
self.manager.initialize({'EXECUTOR': 'test'})
|
||||
|
||||
# Test that we are watching the realm and jobs key once initialized.
|
||||
self.etcd_client_mock.watch.assert_any_call('realm/', recursive=True, index=None,
|
||||
timeout=ETCD_MAX_WATCH_TIMEOUT)
|
||||
|
||||
self.etcd_client_mock.watch.assert_any_call('building/', recursive=True, index=None,
|
||||
timeout=ETCD_MAX_WATCH_TIMEOUT)
|
||||
self.manager.initialize({
|
||||
'EXECUTOR': 'test',
|
||||
'ORCHESTRATOR': {'MEM_CONFIG': None},
|
||||
})
|
||||
|
||||
# Ensure that that the realm and building callbacks have been registered
|
||||
callback_keys = [key for key in self.manager._orchestrator.callbacks]
|
||||
self.assertIn(REALM_PREFIX, callback_keys)
|
||||
self.assertIn(JOB_PREFIX, callback_keys)
|
||||
|
||||
self.mock_job = self._create_build_job()
|
||||
self.mock_job_key = os.path.join('building/', BUILD_UUID)
|
||||
self.mock_job_key = slash_join('building', BUILD_UUID)
|
||||
|
||||
def tearDown(self):
|
||||
super(TestEphemeralLifecycle, self).tearDown()
|
||||
|
@ -167,46 +153,58 @@ class TestEphemeralLifecycle(EphemeralBuilderTestCase):
|
|||
|
||||
@coroutine
|
||||
def _setup_job_for_managers(self):
|
||||
self.etcd_client_mock.read = Mock(side_effect=KeyError)
|
||||
test_component = Mock(spec=BuildComponent)
|
||||
test_component.builder_realm = REALM_ID
|
||||
test_component.start_build = Mock(side_effect=self._create_completed_future())
|
||||
self.register_component_callback.return_value = test_component
|
||||
|
||||
# Ask for a builder to be scheduled
|
||||
self.etcd_client_mock.write.reset()
|
||||
|
||||
is_scheduled = yield From(self.manager.schedule(self.mock_job))
|
||||
self.assertTrue(is_scheduled)
|
||||
self.assertEqual(self.test_executor.start_builder.call_count, 1)
|
||||
|
||||
# Ensure the job and realm and metric were added to etcd.
|
||||
self.assertEqual(self.etcd_client_mock.write.call_args_list[0][0][0], self.mock_job_key)
|
||||
self.assertTrue(self.etcd_client_mock.write.call_args_list[1][0][0].find('metric/') == 0)
|
||||
self.assertTrue(self.etcd_client_mock.write.call_args_list[2][0][0].find('realm/') == 0)
|
||||
realm_data = json.loads(self.etcd_client_mock.write.call_args_list[2][0][1])
|
||||
# Ensure that that the job, realm, and metric callbacks have been registered
|
||||
callback_keys = [key for key in self.manager._orchestrator.callbacks]
|
||||
self.assertIn(self.mock_job_key, self.manager._orchestrator.state)
|
||||
self.assertIn(REALM_PREFIX, callback_keys)
|
||||
# TODO: assert metric key has been set
|
||||
|
||||
realm_for_build = self._find_realm_key(self.manager._orchestrator, BUILD_UUID)
|
||||
|
||||
raw_realm_data = yield From(self.manager._orchestrator.get_key(slash_join('realm',
|
||||
realm_for_build)))
|
||||
realm_data = json.loads(raw_realm_data)
|
||||
realm_data['realm'] = REALM_ID
|
||||
|
||||
# Right now the job is not registered with any managers because etcd has not accepted the job
|
||||
self.assertEqual(self.register_component_callback.call_count, 0)
|
||||
|
||||
# Fire off a realm changed with the same data.
|
||||
realm_created = Mock(spec=etcd.EtcdResult)
|
||||
realm_created.action = EtcdAction.CREATE
|
||||
realm_created.key = os.path.join('realm/', REALM_ID)
|
||||
realm_created.value = json.dumps(realm_data)
|
||||
|
||||
yield From(self.manager._handle_realm_change(realm_created))
|
||||
self.assertEqual(self.register_component_callback.call_count, 1)
|
||||
yield From(self.manager._realm_callback(
|
||||
KeyChange(KeyEvent.CREATE,
|
||||
slash_join(REALM_PREFIX, REALM_ID),
|
||||
json.dumps(realm_data))))
|
||||
|
||||
# Ensure that we have at least one component node.
|
||||
self.assertEquals(1, self.manager.num_workers())
|
||||
self.assertEqual(self.register_component_callback.call_count, 1)
|
||||
self.assertEqual(1, self.manager.num_workers())
|
||||
|
||||
# Ensure that the build info exists.
|
||||
self.assertIsNotNone(self.manager._build_uuid_to_info.get(BUILD_UUID))
|
||||
|
||||
raise Return(test_component)
|
||||
|
||||
@staticmethod
|
||||
def _find_realm_key(orchestrator, build_uuid):
|
||||
for key, value in iteritems(orchestrator.state):
|
||||
if key.startswith(REALM_PREFIX):
|
||||
parsed_value = json.loads(value)
|
||||
body = json.loads(parsed_value['job_queue_item']['body'])
|
||||
if body['build_uuid'] == build_uuid:
|
||||
return parsed_value['realm']
|
||||
continue
|
||||
raise KeyError
|
||||
|
||||
|
||||
@async_test
|
||||
def test_schedule_and_complete(self):
|
||||
# Test that a job is properly registered with all of the managers
|
||||
|
@ -214,12 +212,6 @@ class TestEphemeralLifecycle(EphemeralBuilderTestCase):
|
|||
|
||||
# Take the job ourselves
|
||||
yield From(self.manager.build_component_ready(test_component))
|
||||
read_calls = [call('building/', recursive=True), call(os.path.join('metric/', REALM_ID))]
|
||||
self.etcd_client_mock.read.assert_has_calls(read_calls)
|
||||
|
||||
delete_calls = [call('building/', recursive=True), call(os.path.join('metric/', REALM_ID))]
|
||||
self.etcd_client_mock.read.assert_has_calls(delete_calls)
|
||||
self.etcd_client_mock.delete.reset_mock()
|
||||
|
||||
self.assertIsNotNone(self.manager._build_uuid_to_info.get(BUILD_UUID))
|
||||
|
||||
|
@ -228,30 +220,23 @@ class TestEphemeralLifecycle(EphemeralBuilderTestCase):
|
|||
|
||||
# Ensure that the executor kills the job.
|
||||
self.assertEqual(self.test_executor.stop_builder.call_count, 1)
|
||||
self.etcd_client_mock.delete.assert_has_calls([call(self.mock_job_key)])
|
||||
|
||||
# Ensure the build information is cleaned up.
|
||||
self.assertIsNone(self.manager._build_uuid_to_info.get(BUILD_UUID))
|
||||
self.assertEquals(0, self.manager.num_workers())
|
||||
self.assertEqual(0, self.manager.num_workers())
|
||||
|
||||
@async_test
|
||||
def test_another_manager_takes_job(self):
|
||||
# Prepare a job to be taken by another manager
|
||||
test_component = yield From(self._setup_job_for_managers())
|
||||
|
||||
realm_deleted = Mock(spec=etcd.EtcdResult)
|
||||
realm_deleted.action = EtcdAction.DELETE
|
||||
realm_deleted.key = os.path.join('realm/', REALM_ID)
|
||||
|
||||
realm_deleted._prev_node = Mock(spec=etcd.EtcdResult)
|
||||
realm_deleted._prev_node.value = json.dumps({
|
||||
'realm': REALM_ID,
|
||||
'token': 'beef',
|
||||
'execution_id': '123',
|
||||
'job_queue_item': self.mock_job.job_item,
|
||||
})
|
||||
|
||||
yield From(self.manager._handle_realm_change(realm_deleted))
|
||||
yield From(self.manager._realm_callback(
|
||||
KeyChange(KeyEvent.DELETE,
|
||||
slash_join(REALM_PREFIX, REALM_ID),
|
||||
json.dumps({'realm': REALM_ID,
|
||||
'token': 'beef',
|
||||
'execution_id': '123',
|
||||
'job_queue_item': self.mock_job.job_item}))))
|
||||
|
||||
self.unregister_component_callback.assert_called_once_with(test_component)
|
||||
|
||||
|
@ -259,20 +244,15 @@ class TestEphemeralLifecycle(EphemeralBuilderTestCase):
|
|||
self.assertEqual(self.test_executor.stop_builder.call_count, 0)
|
||||
|
||||
# Ensure that we still have the build info, but not the component.
|
||||
self.assertEquals(0, self.manager.num_workers())
|
||||
self.assertEqual(0, self.manager.num_workers())
|
||||
self.assertIsNotNone(self.manager._build_uuid_to_info.get(BUILD_UUID))
|
||||
|
||||
# Delete the job once it has "completed".
|
||||
expired_result = Mock(spec=etcd.EtcdResult)
|
||||
expired_result.action = EtcdAction.DELETE
|
||||
expired_result.key = self.mock_job_key
|
||||
expired_result._prev_node = Mock(spec=etcd.EtcdResult)
|
||||
expired_result._prev_node.value = json.dumps({
|
||||
'had_heartbeat': False,
|
||||
'job_queue_item': self.mock_job.job_item,
|
||||
})
|
||||
|
||||
yield From(self.manager._handle_job_change(expired_result))
|
||||
yield From(self.manager._job_callback(
|
||||
KeyChange(KeyEvent.DELETE,
|
||||
self.mock_job_key,
|
||||
json.dumps({'had_heartbeat': False,
|
||||
'job_queue_item': self.mock_job.job_item}))))
|
||||
|
||||
# Ensure the job was removed from the info, but stop was not called.
|
||||
self.assertIsNone(self.manager._build_uuid_to_info.get(BUILD_UUID))
|
||||
|
@ -280,63 +260,49 @@ class TestEphemeralLifecycle(EphemeralBuilderTestCase):
|
|||
|
||||
@async_test
|
||||
def test_job_started_by_other_manager(self):
|
||||
# Test that we are watching before anything else happens
|
||||
self.etcd_client_mock.watch.assert_any_call('building/', recursive=True,
|
||||
timeout=ETCD_MAX_WATCH_TIMEOUT, index=None)
|
||||
# Ensure that that the building callbacks have been registered
|
||||
callback_keys = [key for key in self.manager._orchestrator.callbacks]
|
||||
self.assertIn(JOB_PREFIX, callback_keys)
|
||||
|
||||
# Send a signal to the callback that the job has been created.
|
||||
expired_result = Mock(spec=etcd.EtcdResult)
|
||||
expired_result.action = EtcdAction.CREATE
|
||||
expired_result.key = self.mock_job_key
|
||||
expired_result._prev_node = Mock(spec=etcd.EtcdResult)
|
||||
expired_result._prev_node.value = json.dumps({
|
||||
'had_heartbeat': False,
|
||||
'job_queue_item': self.mock_job.job_item,
|
||||
})
|
||||
yield From(self.manager._job_callback(
|
||||
KeyChange(KeyEvent.CREATE,
|
||||
self.mock_job_key,
|
||||
json.dumps({'had_heartbeat': False,
|
||||
'job_queue_item': self.mock_job.job_item}))))
|
||||
|
||||
# Ensure the create does nothing.
|
||||
yield From(self.manager._handle_job_change(expired_result))
|
||||
self.assertEqual(self.test_executor.stop_builder.call_count, 0)
|
||||
|
||||
@async_test
|
||||
def test_expiring_worker_not_started(self):
|
||||
# Test that we are watching before anything else happens
|
||||
self.etcd_client_mock.watch.assert_any_call('building/', recursive=True,
|
||||
timeout=ETCD_MAX_WATCH_TIMEOUT, index=None)
|
||||
# Ensure that that the building callbacks have been registered
|
||||
callback_keys = [key for key in self.manager._orchestrator.callbacks]
|
||||
self.assertIn(JOB_PREFIX, callback_keys)
|
||||
|
||||
# Send a signal to the callback that a worker has expired
|
||||
expired_result = Mock(spec=etcd.EtcdResult)
|
||||
expired_result.action = EtcdAction.EXPIRE
|
||||
expired_result.key = self.mock_job_key
|
||||
expired_result._prev_node = Mock(spec=etcd.EtcdResult)
|
||||
expired_result._prev_node.value = json.dumps({
|
||||
'had_heartbeat': True,
|
||||
'job_queue_item': self.mock_job.job_item,
|
||||
})
|
||||
yield From(self.manager._job_callback(
|
||||
KeyChange(KeyEvent.EXPIRE,
|
||||
self.mock_job_key,
|
||||
json.dumps({'had_heartbeat': True,
|
||||
'job_queue_item': self.mock_job.job_item}))))
|
||||
|
||||
# Since the realm was never registered, expiration should do nothing.
|
||||
yield From(self.manager._handle_job_change(expired_result))
|
||||
self.assertEqual(self.test_executor.stop_builder.call_count, 0)
|
||||
|
||||
@async_test
|
||||
def test_expiring_worker_started(self):
|
||||
test_component = yield From(self._setup_job_for_managers())
|
||||
|
||||
# Test that we are watching before anything else happens
|
||||
self.etcd_client_mock.watch.assert_any_call('building/', recursive=True,
|
||||
timeout=ETCD_MAX_WATCH_TIMEOUT, index=None)
|
||||
# Ensure that that the building callbacks have been registered
|
||||
callback_keys = [key for key in self.manager._orchestrator.callbacks]
|
||||
self.assertIn(JOB_PREFIX, callback_keys)
|
||||
|
||||
# Send a signal to the callback that a worker has expired
|
||||
expired_result = Mock(spec=etcd.EtcdResult)
|
||||
expired_result.action = EtcdAction.EXPIRE
|
||||
expired_result.key = self.mock_job_key
|
||||
expired_result._prev_node = Mock(spec=etcd.EtcdResult)
|
||||
expired_result._prev_node.value = json.dumps({
|
||||
'had_heartbeat': True,
|
||||
'job_queue_item': self.mock_job.job_item,
|
||||
})
|
||||
|
||||
yield From(self.manager._handle_job_change(expired_result))
|
||||
yield From(self.manager._job_callback(
|
||||
KeyChange(KeyEvent.EXPIRE,
|
||||
self.mock_job_key,
|
||||
json.dumps({'had_heartbeat': True,
|
||||
'job_queue_item': self.mock_job.job_item}))))
|
||||
|
||||
self.test_executor.stop_builder.assert_called_once_with('123')
|
||||
self.assertEqual(self.test_executor.stop_builder.call_count, 1)
|
||||
|
@ -345,21 +311,16 @@ class TestEphemeralLifecycle(EphemeralBuilderTestCase):
|
|||
def test_buildjob_deleted(self):
|
||||
test_component = yield From(self._setup_job_for_managers())
|
||||
|
||||
# Test that we are watching before anything else happens
|
||||
self.etcd_client_mock.watch.assert_any_call('building/', recursive=True,
|
||||
timeout=ETCD_MAX_WATCH_TIMEOUT, index=None)
|
||||
# Ensure that that the building callbacks have been registered
|
||||
callback_keys = [key for key in self.manager._orchestrator.callbacks]
|
||||
self.assertIn(JOB_PREFIX, callback_keys)
|
||||
|
||||
# Send a signal to the callback that a worker has expired
|
||||
expired_result = Mock(spec=etcd.EtcdResult)
|
||||
expired_result.action = EtcdAction.DELETE
|
||||
expired_result.key = self.mock_job_key
|
||||
expired_result._prev_node = Mock(spec=etcd.EtcdResult)
|
||||
expired_result._prev_node.value = json.dumps({
|
||||
'had_heartbeat': False,
|
||||
'job_queue_item': self.mock_job.job_item,
|
||||
})
|
||||
|
||||
yield From(self.manager._handle_job_change(expired_result))
|
||||
yield From(self.manager._job_callback(
|
||||
KeyChange(KeyEvent.DELETE,
|
||||
self.mock_job_key,
|
||||
json.dumps({'had_heartbeat': False,
|
||||
'job_queue_item': self.mock_job.job_item}))))
|
||||
|
||||
self.assertEqual(self.test_executor.stop_builder.call_count, 0)
|
||||
self.assertEqual(self.job_complete_callback.call_count, 0)
|
||||
|
@ -369,21 +330,16 @@ class TestEphemeralLifecycle(EphemeralBuilderTestCase):
|
|||
def test_builder_never_starts(self):
|
||||
test_component = yield From(self._setup_job_for_managers())
|
||||
|
||||
# Test that we are watching before anything else happens
|
||||
self.etcd_client_mock.watch.assert_any_call('building/', recursive=True,
|
||||
timeout=ETCD_MAX_WATCH_TIMEOUT, index=None)
|
||||
# Ensure that that the building callbacks have been registered
|
||||
callback_keys = [key for key in self.manager._orchestrator.callbacks]
|
||||
self.assertIn(JOB_PREFIX, callback_keys)
|
||||
|
||||
# Send a signal to the callback that a worker has expired
|
||||
expired_result = Mock(spec=etcd.EtcdResult)
|
||||
expired_result.action = EtcdAction.EXPIRE
|
||||
expired_result.key = self.mock_job_key
|
||||
expired_result._prev_node = Mock(spec=etcd.EtcdResult)
|
||||
expired_result._prev_node.value = json.dumps({
|
||||
'had_heartbeat': False,
|
||||
'job_queue_item': self.mock_job.job_item,
|
||||
})
|
||||
|
||||
yield From(self.manager._handle_job_change(expired_result))
|
||||
yield From(self.manager._job_callback(
|
||||
KeyChange(KeyEvent.EXPIRE,
|
||||
self.mock_job_key,
|
||||
json.dumps({'had_heartbeat': False,
|
||||
'job_queue_item': self.mock_job.job_item}))))
|
||||
|
||||
self.test_executor.stop_builder.assert_called_once_with('123')
|
||||
self.assertEqual(self.test_executor.stop_builder.call_count, 1)
|
||||
|
@ -397,74 +353,29 @@ class TestEphemeralLifecycle(EphemeralBuilderTestCase):
|
|||
@async_test
|
||||
def test_change_worker(self):
|
||||
# Send a signal to the callback that a worker key has been changed
|
||||
set_result = Mock(sepc=etcd.EtcdResult)
|
||||
set_result.action = 'set'
|
||||
set_result.key = self.mock_job_key
|
||||
|
||||
self.manager._handle_job_change(set_result)
|
||||
self.assertEquals(self.test_executor.stop_builder.call_count, 0)
|
||||
self.manager._job_callback(KeyChange(KeyEvent.SET, self.mock_job_key, 'value'))
|
||||
self.assertEqual(self.test_executor.stop_builder.call_count, 0)
|
||||
|
||||
@async_test
|
||||
def test_realm_expired(self):
|
||||
test_component = yield From(self._setup_job_for_managers())
|
||||
|
||||
# Send a signal to the callback that a realm has expired
|
||||
expired_result = Mock(spec=etcd.EtcdResult)
|
||||
expired_result.action = EtcdAction.EXPIRE
|
||||
expired_result.key = self.mock_job_key
|
||||
expired_result._prev_node = Mock(spec=etcd.EtcdResult)
|
||||
expired_result._prev_node.value = json.dumps({
|
||||
'realm': REALM_ID,
|
||||
'execution_id': 'foobar',
|
||||
'executor_name': 'MockExecutor',
|
||||
'job_queue_item': {'body': '{"build_uuid": "fakeid"}'},
|
||||
})
|
||||
|
||||
yield From(self.manager._handle_realm_change(expired_result))
|
||||
yield From(self.manager._realm_callback(KeyChange(
|
||||
KeyEvent.EXPIRE,
|
||||
self.mock_job_key,
|
||||
json.dumps({
|
||||
'realm': REALM_ID,
|
||||
'execution_id': 'foobar',
|
||||
'executor_name': 'MockExecutor',
|
||||
'job_queue_item': {'body': '{"build_uuid": "fakeid"}'},
|
||||
}))))
|
||||
|
||||
# Ensure that the cleanup code for the executor was called.
|
||||
self.test_executor.stop_builder.assert_called_once_with('foobar')
|
||||
self.assertEqual(self.test_executor.stop_builder.call_count, 1)
|
||||
|
||||
|
||||
@async_test
|
||||
def test_heartbeat_response(self):
|
||||
yield From(self.assertHeartbeatWithExpiration(100, self.manager.heartbeat_period_sec * 2))
|
||||
|
||||
@async_test
|
||||
def test_heartbeat_future_expiration(self):
|
||||
yield From(self.assertHeartbeatWithExpiration(10, 10, ranged=True))
|
||||
|
||||
@async_test
|
||||
def test_heartbeat_expired(self):
|
||||
yield From(self.assertHeartbeatWithExpiration(-60, 0))
|
||||
|
||||
@coroutine
|
||||
def assertHeartbeatWithExpiration(self, expires_in_sec, expected_ttl, ranged=False):
|
||||
expiration_timestamp = time.time() + expires_in_sec
|
||||
builder_result = Mock(spec=etcd.EtcdResult)
|
||||
builder_result.value = json.dumps({
|
||||
'expiration': expiration_timestamp,
|
||||
'max_expiration': expiration_timestamp,
|
||||
})
|
||||
self.etcd_client_mock.read = Mock(side_effect=self._create_completed_future(builder_result))
|
||||
|
||||
yield From(self.manager.job_heartbeat(self.mock_job))
|
||||
|
||||
self.job_heartbeat_callback.assert_called_once_with(self.mock_job)
|
||||
self.assertEqual(self.etcd_client_mock.write.call_count, 1)
|
||||
self.assertEqual(self.etcd_client_mock.write.call_args_list[0][0][0], self.mock_job_key)
|
||||
|
||||
job_key_data = json.loads(self.etcd_client_mock.write.call_args_list[0][0][1])
|
||||
self.assertTrue(job_key_data['had_heartbeat'])
|
||||
self.assertEquals(self.mock_job.job_item, job_key_data['job_queue_item'])
|
||||
|
||||
if not ranged:
|
||||
self.assertEquals(expected_ttl, self.etcd_client_mock.write.call_args_list[0][1]['ttl'])
|
||||
else:
|
||||
self.assertTrue(self.etcd_client_mock.write.call_args_list[0][1]['ttl'] <= expected_ttl)
|
||||
|
||||
|
||||
class TestEphemeral(EphemeralBuilderTestCase):
|
||||
""" Simple unit tests for the ephemeral builder around config management, starting and stopping
|
||||
jobs.
|
||||
|
@ -487,7 +398,6 @@ class TestEphemeral(EphemeralBuilderTestCase):
|
|||
job_complete_callback,
|
||||
'127.0.0.1',
|
||||
30,
|
||||
etcd_creator=self._create_mock_etcd_client,
|
||||
)
|
||||
|
||||
def tearDown(self):
|
||||
|
@ -498,13 +408,14 @@ class TestEphemeral(EphemeralBuilderTestCase):
|
|||
EphemeralBuilderManager.EXECUTORS['test'] = TestExecutor
|
||||
self.manager.initialize({
|
||||
'EXECUTOR': 'test',
|
||||
'EXECUTOR_CONFIG': dict(MINIMUM_RETRY_THRESHOLD=42)
|
||||
'EXECUTOR_CONFIG': dict(MINIMUM_RETRY_THRESHOLD=42),
|
||||
'ORCHESTRATOR': {'MEM_CONFIG': None},
|
||||
})
|
||||
|
||||
# Ensure that we have a single test executor.
|
||||
self.assertEquals(1, len(self.manager.registered_executors))
|
||||
self.assertEquals(42, self.manager.registered_executors[0].minimum_retry_threshold)
|
||||
self.assertEquals('TestExecutor', self.manager.registered_executors[0].name)
|
||||
self.assertEqual(1, len(self.manager.registered_executors))
|
||||
self.assertEqual(42, self.manager.registered_executors[0].minimum_retry_threshold)
|
||||
self.assertEqual('TestExecutor', self.manager.registered_executors[0].name)
|
||||
|
||||
def test_verify_executor_newconfig(self):
|
||||
EphemeralBuilderManager.EXECUTORS['test'] = TestExecutor
|
||||
|
@ -512,12 +423,13 @@ class TestEphemeral(EphemeralBuilderTestCase):
|
|||
'EXECUTORS': [{
|
||||
'EXECUTOR': 'test',
|
||||
'MINIMUM_RETRY_THRESHOLD': 42
|
||||
}]
|
||||
}],
|
||||
'ORCHESTRATOR': {'MEM_CONFIG': None},
|
||||
})
|
||||
|
||||
# Ensure that we have a single test executor.
|
||||
self.assertEquals(1, len(self.manager.registered_executors))
|
||||
self.assertEquals(42, self.manager.registered_executors[0].minimum_retry_threshold)
|
||||
self.assertEqual(1, len(self.manager.registered_executors))
|
||||
self.assertEqual(42, self.manager.registered_executors[0].minimum_retry_threshold)
|
||||
|
||||
|
||||
def test_multiple_executors_samename(self):
|
||||
|
@ -537,7 +449,8 @@ class TestEphemeral(EphemeralBuilderTestCase):
|
|||
'EXECUTOR': 'anotherexecutor',
|
||||
'MINIMUM_RETRY_THRESHOLD': 24
|
||||
},
|
||||
]
|
||||
],
|
||||
'ORCHESTRATOR': {'MEM_CONFIG': None},
|
||||
})
|
||||
|
||||
|
||||
|
@ -557,13 +470,14 @@ class TestEphemeral(EphemeralBuilderTestCase):
|
|||
'EXECUTOR': 'anotherexecutor',
|
||||
'MINIMUM_RETRY_THRESHOLD': 24
|
||||
},
|
||||
]
|
||||
],
|
||||
'ORCHESTRATOR': {'MEM_CONFIG': None},
|
||||
})
|
||||
|
||||
# Ensure that we have a two test executors.
|
||||
self.assertEquals(2, len(self.manager.registered_executors))
|
||||
self.assertEquals(42, self.manager.registered_executors[0].minimum_retry_threshold)
|
||||
self.assertEquals(24, self.manager.registered_executors[1].minimum_retry_threshold)
|
||||
self.assertEqual(2, len(self.manager.registered_executors))
|
||||
self.assertEqual(42, self.manager.registered_executors[0].minimum_retry_threshold)
|
||||
self.assertEqual(24, self.manager.registered_executors[1].minimum_retry_threshold)
|
||||
|
||||
def test_skip_invalid_executor(self):
|
||||
self.manager.initialize({
|
||||
|
@ -572,10 +486,11 @@ class TestEphemeral(EphemeralBuilderTestCase):
|
|||
'EXECUTOR': 'unknown',
|
||||
'MINIMUM_RETRY_THRESHOLD': 42
|
||||
},
|
||||
]
|
||||
],
|
||||
'ORCHESTRATOR': {'MEM_CONFIG': None},
|
||||
})
|
||||
|
||||
self.assertEquals(0, len(self.manager.registered_executors))
|
||||
self.assertEqual(0, len(self.manager.registered_executors))
|
||||
|
||||
@async_test
|
||||
def test_schedule_job_namespace_filter(self):
|
||||
|
@ -584,7 +499,8 @@ class TestEphemeral(EphemeralBuilderTestCase):
|
|||
'EXECUTORS': [{
|
||||
'EXECUTOR': 'test',
|
||||
'NAMESPACE_WHITELIST': ['something'],
|
||||
}]
|
||||
}],
|
||||
'ORCHESTRATOR': {'MEM_CONFIG': None},
|
||||
})
|
||||
|
||||
# Try with a build job in an invalid namespace.
|
||||
|
@ -604,7 +520,8 @@ class TestEphemeral(EphemeralBuilderTestCase):
|
|||
'EXECUTORS': [{
|
||||
'EXECUTOR': 'test',
|
||||
'MINIMUM_RETRY_THRESHOLD': 2,
|
||||
}]
|
||||
}],
|
||||
'ORCHESTRATOR': {'MEM_CONFIG': None},
|
||||
})
|
||||
|
||||
# Try with a build job that has too few retries.
|
||||
|
@ -617,7 +534,6 @@ class TestEphemeral(EphemeralBuilderTestCase):
|
|||
result = yield From(self.manager.schedule(build_job))
|
||||
self.assertTrue(result[0])
|
||||
|
||||
|
||||
@async_test
|
||||
def test_schedule_job_executor_fallback(self):
|
||||
EphemeralBuilderManager.EXECUTORS['primary'] = TestExecutor
|
||||
|
@ -636,7 +552,9 @@ class TestEphemeral(EphemeralBuilderTestCase):
|
|||
'EXECUTOR': 'secondary',
|
||||
'MINIMUM_RETRY_THRESHOLD': 2,
|
||||
},
|
||||
]
|
||||
],
|
||||
'ALLOWED_WORKER_COUNT': 5,
|
||||
'ORCHESTRATOR': {'MEM_CONFIG': None},
|
||||
})
|
||||
|
||||
# Try a job not matching the primary's namespace filter. Should schedule on secondary.
|
||||
|
@ -691,6 +609,8 @@ class TestEphemeral(EphemeralBuilderTestCase):
|
|||
self.manager.initialize({
|
||||
'EXECUTOR': 'test',
|
||||
'EXECUTOR_CONFIG': {},
|
||||
'ALLOWED_WORKER_COUNT': 5,
|
||||
'ORCHESTRATOR': {'MEM_CONFIG': None},
|
||||
})
|
||||
|
||||
build_job = self._create_build_job(namespace='something', retries=3)
|
||||
|
@ -708,7 +628,6 @@ class TestEphemeral(EphemeralBuilderTestCase):
|
|||
self.assertIsNotNone(self.manager.registered_executors[0].job_started)
|
||||
self.manager.registered_executors[0].job_started = None
|
||||
|
||||
|
||||
@async_test
|
||||
def test_executor_exception(self):
|
||||
EphemeralBuilderManager.EXECUTORS['bad'] = BadExecutor
|
||||
|
@ -716,13 +635,13 @@ class TestEphemeral(EphemeralBuilderTestCase):
|
|||
self.manager.initialize({
|
||||
'EXECUTOR': 'bad',
|
||||
'EXECUTOR_CONFIG': {},
|
||||
'ORCHESTRATOR': {'MEM_CONFIG': None},
|
||||
})
|
||||
|
||||
build_job = self._create_build_job(namespace='something', retries=3)
|
||||
result = yield From(self.manager.schedule(build_job))
|
||||
self.assertFalse(result[0])
|
||||
|
||||
|
||||
@async_test
|
||||
def test_schedule_and_stop(self):
|
||||
EphemeralBuilderManager.EXECUTORS['test'] = TestExecutor
|
||||
|
@ -730,6 +649,7 @@ class TestEphemeral(EphemeralBuilderTestCase):
|
|||
self.manager.initialize({
|
||||
'EXECUTOR': 'test',
|
||||
'EXECUTOR_CONFIG': {},
|
||||
'ORCHESTRATOR': {'MEM_CONFIG': None},
|
||||
})
|
||||
|
||||
# Start the build job.
|
||||
|
@ -752,7 +672,7 @@ class TestEphemeral(EphemeralBuilderTestCase):
|
|||
|
||||
# Stop the build job.
|
||||
yield From(self.manager.kill_builder_executor(build_job.build_uuid))
|
||||
self.assertEquals(executor.job_stopped, executor.job_started)
|
||||
self.assertEqual(executor.job_stopped, executor.job_started)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
|
@ -3,6 +3,7 @@ from abc import ABCMeta, abstractmethod
|
|||
from jsonschema import validate
|
||||
from six import add_metaclass
|
||||
|
||||
from active_migration import ActiveDataMigration, ERTMigrationFlags
|
||||
from endpoints.building import PreparedBuild
|
||||
from data import model
|
||||
from buildtrigger.triggerutil import get_trigger_config, InvalidServiceException
|
||||
|
@ -37,7 +38,7 @@ NAMESPACES_SCHEMA = {
|
|||
'description': 'Human-readable title of the namespace',
|
||||
},
|
||||
},
|
||||
'required': ['personal', 'score', 'avatar_url', 'url', 'id', 'title'],
|
||||
'required': ['personal', 'score', 'avatar_url', 'id', 'title'],
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -75,7 +76,7 @@ BUILD_SOURCES_SCHEMA = {
|
|||
'description': 'True if the repository is private',
|
||||
},
|
||||
},
|
||||
'required': ['name', 'full_name', 'description', 'last_updated', 'url',
|
||||
'required': ['name', 'full_name', 'description', 'last_updated',
|
||||
'has_admin_permissions', 'private'],
|
||||
},
|
||||
}
|
||||
|
@ -156,7 +157,7 @@ METADATA_SCHEMA = {
|
|||
'required': ['username'],
|
||||
},
|
||||
},
|
||||
'required': ['url', 'message', 'date'],
|
||||
'required': ['message'],
|
||||
},
|
||||
},
|
||||
'required': ['commit', 'git_url'],
|
||||
|
@ -172,7 +173,18 @@ class BuildTriggerHandler(object):
|
|||
@property
|
||||
def auth_token(self):
|
||||
""" Returns the auth token for the trigger. """
|
||||
return self.trigger.auth_token
|
||||
# NOTE: This check is for testing.
|
||||
if isinstance(self.trigger.auth_token, str):
|
||||
return self.trigger.auth_token
|
||||
|
||||
# TODO(remove-unenc): Remove legacy field.
|
||||
if self.trigger.secure_auth_token is not None:
|
||||
return self.trigger.secure_auth_token.decrypt()
|
||||
|
||||
if ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS):
|
||||
return self.trigger.auth_token
|
||||
|
||||
return None
|
||||
|
||||
@abstractmethod
|
||||
def load_dockerfile_contents(self):
|
||||
|
@ -306,7 +318,6 @@ class BuildTriggerHandler(object):
|
|||
default_branch = metadata.get('default_branch', None)
|
||||
prepared = PreparedBuild(self.trigger)
|
||||
prepared.name_from_sha(commit_sha)
|
||||
# TODO: Charlie Tuesday, March 28, 2017 come back and clean up subdirectory.
|
||||
prepared.subdirectory = config.get('dockerfile_path', None)
|
||||
prepared.context = config.get('context', None)
|
||||
prepared.is_manual = is_manual
|
||||
|
|
|
@ -64,21 +64,15 @@ BITBUCKET_WEBHOOK_PAYLOAD_SCHEMA = {
|
|||
'user': {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'username': {
|
||||
'display_name': {
|
||||
'type': 'string',
|
||||
},
|
||||
'account_id': {
|
||||
'type': 'string',
|
||||
},
|
||||
'links': {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'html': {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'href': {
|
||||
'type': 'string',
|
||||
},
|
||||
},
|
||||
'required': ['href'],
|
||||
},
|
||||
'avatar': {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
|
@ -89,28 +83,12 @@ BITBUCKET_WEBHOOK_PAYLOAD_SCHEMA = {
|
|||
'required': ['href'],
|
||||
},
|
||||
},
|
||||
'required': ['html', 'avatar'],
|
||||
'required': ['avatar'],
|
||||
}, # /User
|
||||
},
|
||||
'required': ['username'],
|
||||
}, # /Author
|
||||
},
|
||||
},
|
||||
'links': {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'html': {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'href': {
|
||||
'type': 'string',
|
||||
},
|
||||
},
|
||||
'required': ['href'],
|
||||
},
|
||||
},
|
||||
'required': ['html'],
|
||||
}, # /Links
|
||||
},
|
||||
},
|
||||
'required': ['hash', 'message', 'date'],
|
||||
}, # /Target
|
||||
|
@ -127,21 +105,15 @@ BITBUCKET_WEBHOOK_PAYLOAD_SCHEMA = {
|
|||
'actor': {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'username': {
|
||||
'account_id': {
|
||||
'type': 'string',
|
||||
},
|
||||
'display_name': {
|
||||
'type': 'string',
|
||||
},
|
||||
'links': {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'html': {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'href': {
|
||||
'type': 'string',
|
||||
},
|
||||
},
|
||||
'required': ['href'],
|
||||
},
|
||||
'avatar': {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
|
@ -152,10 +124,9 @@ BITBUCKET_WEBHOOK_PAYLOAD_SCHEMA = {
|
|||
'required': ['href'],
|
||||
},
|
||||
},
|
||||
'required': ['html', 'avatar'],
|
||||
'required': ['avatar'],
|
||||
},
|
||||
},
|
||||
'required': ['username'],
|
||||
}, # /Actor
|
||||
'required': ['push', 'repository'],
|
||||
} # /Root
|
||||
|
@ -206,8 +177,7 @@ def get_transformed_commit_info(bb_commit, ref, default_branch, repository_name,
|
|||
author = lookup_author(match.group(1))
|
||||
author_info = JSONPathDict(author) if author is not None else None
|
||||
if author_info:
|
||||
config['commit_info.author.username'] = author_info['user.username']
|
||||
config['commit_info.author.url'] = 'https://bitbucket.org/%s/' % author_info['user.username']
|
||||
config['commit_info.author.username'] = author_info['user.display_name']
|
||||
config['commit_info.author.avatar_url'] = author_info['user.avatar']
|
||||
|
||||
return config.dict_value()
|
||||
|
@ -245,12 +215,10 @@ def get_transformed_webhook_payload(bb_payload, default_branch=None):
|
|||
config['commit_info.message'] = target['message']
|
||||
config['commit_info.date'] = target['date']
|
||||
|
||||
config['commit_info.author.username'] = target['author.user.username']
|
||||
config['commit_info.author.url'] = target['author.user.links.html.href']
|
||||
config['commit_info.author.username'] = target['author.user.display_name']
|
||||
config['commit_info.author.avatar_url'] = target['author.user.links.avatar.href']
|
||||
|
||||
config['commit_info.committer.username'] = payload['actor.username']
|
||||
config['commit_info.committer.url'] = payload['actor.links.html.href']
|
||||
config['commit_info.committer.username'] = payload['actor.display_name']
|
||||
config['commit_info.committer.avatar_url'] = payload['actor.links.avatar.href']
|
||||
return config.dict_value()
|
||||
|
||||
|
@ -328,8 +296,8 @@ class BitbucketBuildTrigger(BuildTriggerHandler):
|
|||
if not result:
|
||||
return False
|
||||
|
||||
username = data['user']['username']
|
||||
self.put_config_key('username', username)
|
||||
self.put_config_key('account_id', data['user']['account_id'])
|
||||
self.put_config_key('nickname', data['user']['nickname'])
|
||||
return True
|
||||
|
||||
def is_active(self):
|
||||
|
@ -403,11 +371,12 @@ class BitbucketBuildTrigger(BuildTriggerHandler):
|
|||
namespaces = {}
|
||||
for repo in data:
|
||||
owner = repo['owner']
|
||||
|
||||
if owner in namespaces:
|
||||
namespaces[owner]['score'] = namespaces[owner]['score'] + 1
|
||||
else:
|
||||
namespaces[owner] = {
|
||||
'personal': owner == self.config.get('username'),
|
||||
'personal': owner == self.config.get('nickname', self.config.get('username')),
|
||||
'id': owner,
|
||||
'title': owner,
|
||||
'avatar_url': repo['logo'],
|
||||
|
|
|
@ -108,7 +108,7 @@ def get_transformed_webhook_payload(gh_payload, default_branch=None, lookup_user
|
|||
config = SafeDictSetter()
|
||||
config['commit'] = payload['head_commit.id']
|
||||
config['ref'] = payload['ref']
|
||||
config['default_branch'] = default_branch
|
||||
config['default_branch'] = payload['repository.default_branch'] or default_branch
|
||||
config['git_url'] = payload['repository.ssh_url']
|
||||
|
||||
config['commit_info.url'] = payload['head_commit.url']
|
||||
|
@ -293,13 +293,18 @@ class GithubBuildTrigger(BuildTriggerHandler):
|
|||
|
||||
for org in usr.get_orgs():
|
||||
organization = org.login if org.login else org.name
|
||||
|
||||
# NOTE: We don't load the organization's html_url nor its plan, because doing
|
||||
# so requires loading *each organization* via its own API call in this tight
|
||||
# loop, which was massively slowing down the load time for users when setting
|
||||
# up triggers.
|
||||
namespaces[organization] = {
|
||||
'personal': False,
|
||||
'id': organization,
|
||||
'title': organization,
|
||||
'avatar_url': org.avatar_url,
|
||||
'url': org.html_url,
|
||||
'score': org.plan.private_repos if org.plan else 0,
|
||||
'url': '',
|
||||
'score': 0,
|
||||
}
|
||||
|
||||
return BuildTriggerHandler.build_namespaces_response(namespaces)
|
||||
|
@ -320,7 +325,7 @@ class GithubBuildTrigger(BuildTriggerHandler):
|
|||
gh_client = self._get_client()
|
||||
usr = gh_client.get_user()
|
||||
if namespace == usr.login:
|
||||
repos = [repo_view(repo) for repo in usr.get_repos() if repo.owner.login == namespace]
|
||||
repos = [repo_view(repo) for repo in usr.get_repos(type='owner', sort='updated')]
|
||||
return BuildTriggerHandler.build_sources_response(repos)
|
||||
|
||||
try:
|
||||
|
@ -376,7 +381,7 @@ class GithubBuildTrigger(BuildTriggerHandler):
|
|||
return None
|
||||
|
||||
try:
|
||||
file_info = repo.get_file_contents(path)
|
||||
file_info = repo.get_contents(path)
|
||||
# TypeError is needed because directory inputs cause a TypeError
|
||||
except (GithubException, TypeError) as ghe:
|
||||
logger.error("got error from trying to find github file %s" % ghe)
|
||||
|
|
|
@ -1,28 +1,26 @@
|
|||
import os.path
|
||||
import logging
|
||||
import os
|
||||
|
||||
from calendar import timegm
|
||||
from functools import wraps
|
||||
|
||||
import dateutil.parser
|
||||
|
||||
from app import app, gitlab_trigger
|
||||
import gitlab
|
||||
import requests
|
||||
|
||||
from jsonschema import validate
|
||||
|
||||
from app import app, gitlab_trigger
|
||||
from buildtrigger.triggerutil import (RepositoryReadException, TriggerActivationException,
|
||||
TriggerDeactivationException, TriggerStartException,
|
||||
SkipRequestException, InvalidPayloadException,
|
||||
TriggerAuthException,
|
||||
determine_build_ref, raise_if_skipped_build,
|
||||
find_matching_branches)
|
||||
|
||||
from buildtrigger.basehandler import BuildTriggerHandler
|
||||
|
||||
from endpoints.exception import ExternalServiceError
|
||||
from util.security.ssh import generate_ssh_keypair
|
||||
from util.dict_wrappers import JSONPathDict, SafeDictSetter
|
||||
from endpoints.exception import ExternalServiceError
|
||||
|
||||
import gitlab
|
||||
import requests
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -53,7 +51,7 @@ GITLAB_WEBHOOK_PAYLOAD_SCHEMA = {
|
|||
'type': 'string',
|
||||
},
|
||||
'url': {
|
||||
'type': 'string',
|
||||
'type': ['string', 'null'],
|
||||
},
|
||||
'message': {
|
||||
'type': 'string',
|
||||
|
@ -71,7 +69,7 @@ GITLAB_WEBHOOK_PAYLOAD_SCHEMA = {
|
|||
'required': ['email'],
|
||||
},
|
||||
},
|
||||
'required': ['id', 'url', 'message', 'timestamp'],
|
||||
'required': ['id', 'message', 'timestamp'],
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -89,7 +87,7 @@ _ACCESS_LEVEL_MAP = {
|
|||
_PER_PAGE_COUNT = 20
|
||||
|
||||
|
||||
def _catch_timeouts(func):
|
||||
def _catch_timeouts_and_errors(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
|
@ -98,17 +96,21 @@ def _catch_timeouts(func):
|
|||
msg = 'Request to the GitLab API timed out'
|
||||
logger.exception(msg)
|
||||
raise ExternalServiceError(msg)
|
||||
except gitlab.GitlabError:
|
||||
msg = 'GitLab API error. Please contact support.'
|
||||
logger.exception(msg)
|
||||
raise ExternalServiceError(msg)
|
||||
return wrapper
|
||||
|
||||
|
||||
def _paginated_iterator(func, exc):
|
||||
def _paginated_iterator(func, exc, **kwargs):
|
||||
""" Returns an iterator over invocations of the given function, automatically handling
|
||||
pagination.
|
||||
"""
|
||||
page = 0
|
||||
page = 1
|
||||
while True:
|
||||
result = func(page=page, per_page=_PER_PAGE_COUNT)
|
||||
if result is False:
|
||||
result = func(page=page, per_page=_PER_PAGE_COUNT, **kwargs)
|
||||
if result is None or result is False:
|
||||
raise exc
|
||||
|
||||
counter = 0
|
||||
|
@ -143,32 +145,36 @@ def get_transformed_webhook_payload(gl_payload, default_branch=None, lookup_user
|
|||
if payload['object_kind'] == 'push' and not commits:
|
||||
raise SkipRequestException
|
||||
|
||||
# Check for missing commit information.
|
||||
commit_sha = payload['checkout_sha'] or payload['after']
|
||||
if commit_sha is None or commit_sha == '0000000000000000000000000000000000000000':
|
||||
raise SkipRequestException
|
||||
|
||||
config = SafeDictSetter()
|
||||
config['commit'] = payload['checkout_sha']
|
||||
config['commit'] = commit_sha
|
||||
config['ref'] = payload['ref']
|
||||
config['default_branch'] = default_branch
|
||||
config['git_url'] = payload['repository.git_ssh_url']
|
||||
|
||||
found_commit = JSONPathDict({})
|
||||
if payload['object_kind'] == 'push':
|
||||
if payload['object_kind'] == 'push' or payload['object_kind'] == 'tag_push':
|
||||
# Find the commit associated with the checkout_sha. Gitlab doesn't (necessary) send this in
|
||||
# any order, so we cannot simply index into the commits list.
|
||||
found_commit = None
|
||||
for commit in commits:
|
||||
if commit['id'] == payload['checkout_sha']:
|
||||
found_commit = JSONPathDict(commit)
|
||||
break
|
||||
if commits is not None:
|
||||
for commit in commits:
|
||||
if commit['id'] == payload['checkout_sha']:
|
||||
found_commit = JSONPathDict(commit)
|
||||
break
|
||||
|
||||
if found_commit is None and lookup_commit:
|
||||
checkout_sha = payload['checkout_sha'] or payload['after']
|
||||
found_commit_info = lookup_commit(payload['project_id'], checkout_sha)
|
||||
found_commit = JSONPathDict(dict(found_commit_info) if found_commit_info else {})
|
||||
|
||||
if found_commit is None:
|
||||
raise SkipRequestException
|
||||
|
||||
elif payload['object_kind'] == 'tag_push':
|
||||
# Gitlab doesn't send commit information for tag pushes (WHY?!), so we need to lookup the
|
||||
# commit SHA directly.
|
||||
if lookup_commit:
|
||||
found_commit_info = lookup_commit(payload['project_id'], payload['checkout_sha'])
|
||||
found_commit = JSONPathDict(found_commit_info or {})
|
||||
|
||||
config['commit_info.url'] = found_commit['url']
|
||||
config['commit_info.message'] = found_commit['message']
|
||||
config['commit_info.date'] = found_commit['timestamp']
|
||||
|
@ -196,20 +202,28 @@ class GitLabBuildTrigger(BuildTriggerHandler):
|
|||
|
||||
def _get_authorized_client(self):
|
||||
auth_token = self.auth_token or 'invalid'
|
||||
return gitlab.Gitlab(gitlab_trigger.api_endpoint(), oauth_token=auth_token, timeout=5)
|
||||
api_version = self.config.get('API_VERSION', '4')
|
||||
client = gitlab.Gitlab(gitlab_trigger.api_endpoint(), oauth_token=auth_token, timeout=20,
|
||||
api_version=api_version)
|
||||
try:
|
||||
client.auth()
|
||||
except gitlab.GitlabGetError as ex:
|
||||
raise TriggerAuthException(ex.message)
|
||||
|
||||
return client
|
||||
|
||||
def is_active(self):
|
||||
return 'hook_id' in self.config
|
||||
|
||||
@_catch_timeouts
|
||||
@_catch_timeouts_and_errors
|
||||
def activate(self, standard_webhook_url):
|
||||
config = self.config
|
||||
new_build_source = config['build_source']
|
||||
gl_client = self._get_authorized_client()
|
||||
|
||||
# Find the GitLab repository.
|
||||
repository = gl_client.getproject(new_build_source)
|
||||
if repository is False:
|
||||
gl_project = gl_client.projects.get(new_build_source)
|
||||
if not gl_project:
|
||||
msg = 'Unable to find GitLab repository for source: %s' % new_build_source
|
||||
raise TriggerActivationException(msg)
|
||||
|
||||
|
@ -221,20 +235,31 @@ class GitLabBuildTrigger(BuildTriggerHandler):
|
|||
'value': public_key,
|
||||
},
|
||||
]
|
||||
key = gl_client.adddeploykey(repository['id'], '%s Builder' % app.config['REGISTRY_TITLE'],
|
||||
public_key)
|
||||
if key is False:
|
||||
|
||||
key = gl_project.keys.create({
|
||||
'title': '%s Builder' % app.config['REGISTRY_TITLE'],
|
||||
'key': public_key,
|
||||
})
|
||||
|
||||
if not key:
|
||||
msg = 'Unable to add deploy key to repository: %s' % new_build_source
|
||||
raise TriggerActivationException(msg)
|
||||
config['key_id'] = key['id']
|
||||
|
||||
config['key_id'] = key.get_id()
|
||||
|
||||
# Add the webhook to the GitLab repository.
|
||||
hook = gl_client.addprojecthook(repository['id'], standard_webhook_url, push=True)
|
||||
if hook is False:
|
||||
hook = gl_project.hooks.create({
|
||||
'url': standard_webhook_url,
|
||||
'push': True,
|
||||
'tag_push': True,
|
||||
'push_events': True,
|
||||
'tag_push_events': True,
|
||||
})
|
||||
if not hook:
|
||||
msg = 'Unable to create webhook on repository: %s' % new_build_source
|
||||
raise TriggerActivationException(msg)
|
||||
|
||||
config['hook_id'] = hook['id']
|
||||
config['hook_id'] = hook.get_id()
|
||||
self.config = config
|
||||
return config, {'private_key': private_key}
|
||||
|
||||
|
@ -243,72 +268,85 @@ class GitLabBuildTrigger(BuildTriggerHandler):
|
|||
gl_client = self._get_authorized_client()
|
||||
|
||||
# Find the GitLab repository.
|
||||
repository = gl_client.getproject(config['build_source'])
|
||||
if repository is False:
|
||||
msg = 'Unable to find GitLab repository for source: %s' % config['build_source']
|
||||
raise TriggerDeactivationException(msg)
|
||||
try:
|
||||
gl_project = gl_client.projects.get(config['build_source'])
|
||||
if not gl_project:
|
||||
config.pop('key_id', None)
|
||||
config.pop('hook_id', None)
|
||||
self.config = config
|
||||
return config
|
||||
except gitlab.GitlabGetError as ex:
|
||||
if ex.response_code != 404:
|
||||
raise
|
||||
|
||||
# Remove the webhook.
|
||||
success = gl_client.deleteprojecthook(repository['id'], config['hook_id'])
|
||||
if success is False:
|
||||
msg = 'Unable to remove hook: %s' % config['hook_id']
|
||||
raise TriggerDeactivationException(msg)
|
||||
try:
|
||||
gl_project.hooks.delete(config['hook_id'])
|
||||
except gitlab.GitlabDeleteError as ex:
|
||||
if ex.response_code != 404:
|
||||
raise
|
||||
|
||||
config.pop('hook_id', None)
|
||||
|
||||
# Remove the key
|
||||
success = gl_client.deletedeploykey(repository['id'], config['key_id'])
|
||||
if success is False:
|
||||
msg = 'Unable to remove deploy key: %s' % config['key_id']
|
||||
raise TriggerDeactivationException(msg)
|
||||
try:
|
||||
gl_project.keys.delete(config['key_id'])
|
||||
except gitlab.GitlabDeleteError as ex:
|
||||
if ex.response_code != 404:
|
||||
raise
|
||||
|
||||
config.pop('key_id', None)
|
||||
|
||||
self.config = config
|
||||
return config
|
||||
|
||||
@_catch_timeouts
|
||||
@_catch_timeouts_and_errors
|
||||
def list_build_source_namespaces(self):
|
||||
gl_client = self._get_authorized_client()
|
||||
current_user = gl_client.currentuser()
|
||||
if current_user is False:
|
||||
current_user = gl_client.user
|
||||
if not current_user:
|
||||
raise RepositoryReadException('Unable to get current user')
|
||||
|
||||
namespaces = {}
|
||||
repositories = _paginated_iterator(gl_client.getprojects, RepositoryReadException)
|
||||
for repo in repositories:
|
||||
namespace = repo.get('namespace') or {}
|
||||
if not namespace:
|
||||
continue
|
||||
|
||||
namespace_id = namespace['id']
|
||||
|
||||
avatar_url = ''
|
||||
if 'avatar' in namespace:
|
||||
avatar_data = namespace.get('avatar') or {}
|
||||
avatar_url = avatar_data.get('url')
|
||||
elif 'owner' in repo:
|
||||
owner_data = repo.get('owner') or {}
|
||||
avatar_url = owner_data.get('avatar_url')
|
||||
|
||||
for namespace in _paginated_iterator(gl_client.namespaces.list, RepositoryReadException):
|
||||
namespace_id = namespace.get_id()
|
||||
if namespace_id in namespaces:
|
||||
namespaces[namespace_id]['score'] = namespaces[namespace_id]['score'] + 1
|
||||
else:
|
||||
owner = namespace['name']
|
||||
owner = namespace.attributes['name']
|
||||
namespaces[namespace_id] = {
|
||||
'personal': owner == current_user['username'],
|
||||
'id': namespace['path'],
|
||||
'title': namespace['name'],
|
||||
'avatar_url': avatar_url,
|
||||
'personal': namespace.attributes['kind'] == 'user',
|
||||
'id': str(namespace_id),
|
||||
'title': namespace.attributes['name'],
|
||||
'avatar_url': namespace.attributes.get('avatar_url'),
|
||||
'score': 1,
|
||||
'url': gl_client.host + '/' + namespace['path'],
|
||||
'url': namespace.attributes.get('web_url') or '',
|
||||
}
|
||||
|
||||
return BuildTriggerHandler.build_namespaces_response(namespaces)
|
||||
|
||||
@_catch_timeouts
|
||||
def list_build_sources_for_namespace(self, namespace):
|
||||
def _get_namespace(self, gl_client, gl_namespace, lazy=False):
|
||||
try:
|
||||
if gl_namespace.attributes['kind'] == 'group':
|
||||
return gl_client.groups.get(gl_namespace.attributes['id'], lazy=lazy)
|
||||
|
||||
if gl_namespace.attributes['kind'] == 'user':
|
||||
return gl_client.users.get(gl_client.user.attributes['id'], lazy=lazy)
|
||||
|
||||
# Note: This doesn't seem to work for IDs retrieved via the namespaces API; the IDs are
|
||||
# different.
|
||||
return gl_client.users.get(gl_namespace.attributes['id'], lazy=lazy)
|
||||
except gitlab.GitlabGetError:
|
||||
return None
|
||||
|
||||
@_catch_timeouts_and_errors
|
||||
def list_build_sources_for_namespace(self, namespace_id):
|
||||
if not namespace_id:
|
||||
return []
|
||||
|
||||
def repo_view(repo):
|
||||
# Because *anything* can be None in GitLab API!
|
||||
permissions = repo.get('permissions') or {}
|
||||
permissions = repo.attributes.get('permissions') or {}
|
||||
group_access = permissions.get('group_access') or {}
|
||||
project_access = permissions.get('project_access') or {}
|
||||
|
||||
|
@ -327,17 +365,17 @@ class GitLabBuildTrigger(BuildTriggerHandler):
|
|||
has_admin_permission = True
|
||||
|
||||
view = {
|
||||
'name': repo['path'],
|
||||
'full_name': repo['path_with_namespace'],
|
||||
'description': repo.get('description') or '',
|
||||
'url': repo.get('web_url'),
|
||||
'name': repo.attributes['path'],
|
||||
'full_name': repo.attributes['path_with_namespace'],
|
||||
'description': repo.attributes.get('description') or '',
|
||||
'url': repo.attributes.get('web_url'),
|
||||
'has_admin_permissions': has_admin_permission,
|
||||
'private': repo.get('public', False) is False,
|
||||
'private': repo.attributes.get('visibility') == 'private',
|
||||
}
|
||||
|
||||
if repo.get('last_activity_at'):
|
||||
if repo.attributes.get('last_activity_at'):
|
||||
try:
|
||||
last_modified = dateutil.parser.parse(repo['last_activity_at'])
|
||||
last_modified = dateutil.parser.parse(repo.attributes['last_activity_at'])
|
||||
view['last_updated'] = timegm(last_modified.utctimetuple())
|
||||
except ValueError:
|
||||
logger.exception('Gitlab gave us an invalid last_activity_at: %s', last_modified)
|
||||
|
@ -345,44 +383,54 @@ class GitLabBuildTrigger(BuildTriggerHandler):
|
|||
return view
|
||||
|
||||
gl_client = self._get_authorized_client()
|
||||
repositories = _paginated_iterator(gl_client.getprojects, RepositoryReadException)
|
||||
repos = [repo_view(repo) for repo in repositories if repo['namespace']['path'] == namespace]
|
||||
return BuildTriggerHandler.build_sources_response(repos)
|
||||
|
||||
@_catch_timeouts
|
||||
try:
|
||||
gl_namespace = gl_client.namespaces.get(namespace_id)
|
||||
except gitlab.GitlabGetError:
|
||||
return []
|
||||
|
||||
namespace_obj = self._get_namespace(gl_client, gl_namespace, lazy=True)
|
||||
repositories = _paginated_iterator(namespace_obj.projects.list, RepositoryReadException)
|
||||
|
||||
try:
|
||||
return BuildTriggerHandler.build_sources_response([repo_view(repo) for repo in repositories])
|
||||
except gitlab.GitlabGetError:
|
||||
return []
|
||||
|
||||
@_catch_timeouts_and_errors
|
||||
def list_build_subdirs(self):
|
||||
config = self.config
|
||||
gl_client = self._get_authorized_client()
|
||||
new_build_source = config['build_source']
|
||||
|
||||
repository = gl_client.getproject(new_build_source)
|
||||
if repository is False:
|
||||
gl_project = gl_client.projects.get(new_build_source)
|
||||
if not gl_project:
|
||||
msg = 'Unable to find GitLab repository for source: %s' % new_build_source
|
||||
raise RepositoryReadException(msg)
|
||||
|
||||
repo_branches = gl_client.getbranches(repository['id'])
|
||||
if repo_branches is False:
|
||||
repo_branches = gl_project.branches.list()
|
||||
if not repo_branches:
|
||||
msg = 'Unable to find GitLab branches for source: %s' % new_build_source
|
||||
raise RepositoryReadException(msg)
|
||||
|
||||
branches = [branch['name'] for branch in repo_branches]
|
||||
branches = [branch.attributes['name'] for branch in repo_branches]
|
||||
branches = find_matching_branches(config, branches)
|
||||
branches = branches or [repository['default_branch'] or 'master']
|
||||
branches = branches or [gl_project.attributes['default_branch'] or 'master']
|
||||
|
||||
repo_tree = gl_client.getrepositorytree(repository['id'], ref_name=branches[0])
|
||||
if repo_tree is False:
|
||||
repo_tree = gl_project.repository_tree(ref=branches[0])
|
||||
if not repo_tree:
|
||||
msg = 'Unable to find GitLab repository tree for source: %s' % new_build_source
|
||||
raise RepositoryReadException(msg)
|
||||
|
||||
return ["/" + node['name'] for node in repo_tree if self.filename_is_dockerfile(node['name'])]
|
||||
return [node['name'] for node in repo_tree if self.filename_is_dockerfile(node['name'])]
|
||||
|
||||
@_catch_timeouts
|
||||
@_catch_timeouts_and_errors
|
||||
def load_dockerfile_contents(self):
|
||||
gl_client = self._get_authorized_client()
|
||||
path = self.get_dockerfile_path()
|
||||
|
||||
repository = gl_client.getproject(self.config['build_source'])
|
||||
if repository is False:
|
||||
gl_project = gl_client.projects.get(self.config['build_source'])
|
||||
if not gl_project:
|
||||
return None
|
||||
|
||||
branches = self.list_field_values('branch_name')
|
||||
|
@ -391,16 +439,15 @@ class GitLabBuildTrigger(BuildTriggerHandler):
|
|||
return None
|
||||
|
||||
branch_name = branches[0]
|
||||
if repository['default_branch'] in branches:
|
||||
branch_name = repository['default_branch']
|
||||
if gl_project.attributes['default_branch'] in branches:
|
||||
branch_name = gl_project.attributes['default_branch']
|
||||
|
||||
contents = gl_client.getrawfile(repository['id'], branch_name, path)
|
||||
if contents is False:
|
||||
try:
|
||||
return gl_project.files.get(path, branch_name).decode()
|
||||
except gitlab.GitlabGetError:
|
||||
return None
|
||||
|
||||
return contents
|
||||
|
||||
@_catch_timeouts
|
||||
@_catch_timeouts_and_errors
|
||||
def list_field_values(self, field_name, limit=None):
|
||||
if field_name == 'refs':
|
||||
branches = self.list_field_values('branch_name')
|
||||
|
@ -410,139 +457,138 @@ class GitLabBuildTrigger(BuildTriggerHandler):
|
|||
[{'kind': 'tag', 'name': t} for t in tags])
|
||||
|
||||
gl_client = self._get_authorized_client()
|
||||
repo = gl_client.getproject(self.config['build_source'])
|
||||
if repo is False:
|
||||
gl_project = gl_client.projects.get(self.config['build_source'])
|
||||
if not gl_project:
|
||||
return []
|
||||
|
||||
if field_name == 'tag_name':
|
||||
tags = gl_client.getrepositorytags(repo['id'])
|
||||
if tags is False:
|
||||
tags = gl_project.tags.list()
|
||||
if not tags:
|
||||
return []
|
||||
|
||||
if limit:
|
||||
tags = tags[0:limit]
|
||||
|
||||
return [tag['name'] for tag in tags]
|
||||
return [tag.attributes['name'] for tag in tags]
|
||||
|
||||
if field_name == 'branch_name':
|
||||
branches = gl_client.getbranches(repo['id'])
|
||||
if branches is False:
|
||||
branches = gl_project.branches.list()
|
||||
if not branches:
|
||||
return []
|
||||
|
||||
if limit:
|
||||
branches = branches[0:limit]
|
||||
|
||||
return [branch['name'] for branch in branches]
|
||||
return [branch.attributes['name'] for branch in branches]
|
||||
|
||||
return None
|
||||
|
||||
def get_repository_url(self):
|
||||
return gitlab_trigger.get_public_url(self.config['build_source'])
|
||||
|
||||
@_catch_timeouts
|
||||
@_catch_timeouts_and_errors
|
||||
def lookup_commit(self, repo_id, commit_sha):
|
||||
if repo_id is None:
|
||||
return None
|
||||
|
||||
gl_client = self._get_authorized_client()
|
||||
commit = gl_client.getrepositorycommit(repo_id, commit_sha)
|
||||
if commit is False:
|
||||
gl_project = gl_client.projects.get(self.config['build_source'], lazy=True)
|
||||
commit = gl_project.commits.get(commit_sha)
|
||||
if not commit:
|
||||
return None
|
||||
|
||||
return commit
|
||||
|
||||
@_catch_timeouts
|
||||
@_catch_timeouts_and_errors
|
||||
def lookup_user(self, email):
|
||||
gl_client = self._get_authorized_client()
|
||||
try:
|
||||
result = gl_client.getusers(search=email)
|
||||
if result is False:
|
||||
result = gl_client.users.list(search=email)
|
||||
if not result:
|
||||
return None
|
||||
|
||||
[user] = result
|
||||
return {
|
||||
'username': user['username'],
|
||||
'html_url': gl_client.host + '/' + user['username'],
|
||||
'avatar_url': user['avatar_url']
|
||||
'username': user.attributes['username'],
|
||||
'html_url': user.attributes['web_url'],
|
||||
'avatar_url': user.attributes['avatar_url']
|
||||
}
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
@_catch_timeouts
|
||||
@_catch_timeouts_and_errors
|
||||
def get_metadata_for_commit(self, commit_sha, ref, repo):
|
||||
gl_client = self._get_authorized_client()
|
||||
commit = gl_client.getrepositorycommit(repo['id'], commit_sha)
|
||||
commit = self.lookup_commit(repo.get_id(), commit_sha)
|
||||
if commit is None:
|
||||
return None
|
||||
|
||||
metadata = {
|
||||
'commit': commit['id'],
|
||||
'commit': commit.attributes['id'],
|
||||
'ref': ref,
|
||||
'default_branch': repo['default_branch'],
|
||||
'git_url': repo['ssh_url_to_repo'],
|
||||
'default_branch': repo.attributes['default_branch'],
|
||||
'git_url': repo.attributes['ssh_url_to_repo'],
|
||||
'commit_info': {
|
||||
'url': gl_client.host + '/' + repo['path_with_namespace'] + '/commit/' + commit['id'],
|
||||
'message': commit['message'],
|
||||
'date': commit['committed_date'],
|
||||
'url': os.path.join(repo.attributes['web_url'], 'commit', commit.attributes['id']),
|
||||
'message': commit.attributes['message'],
|
||||
'date': commit.attributes['committed_date'],
|
||||
},
|
||||
}
|
||||
|
||||
committer = None
|
||||
if 'committer_email' in commit:
|
||||
committer = self.lookup_user(commit['committer_email'])
|
||||
if 'committer_email' in commit.attributes:
|
||||
committer = self.lookup_user(commit.attributes['committer_email'])
|
||||
|
||||
author = None
|
||||
if 'author_email' in commit:
|
||||
author = self.lookup_user(commit['author_email'])
|
||||
if 'author_email' in commit.attributes:
|
||||
author = self.lookup_user(commit.attributes['author_email'])
|
||||
|
||||
if committer is not None:
|
||||
metadata['commit_info']['committer'] = {
|
||||
'username': committer['username'],
|
||||
'avatar_url': committer['avatar_url'],
|
||||
'url': gl_client.host + '/' + committer['username'],
|
||||
'url': committer.get('http_url', ''),
|
||||
}
|
||||
|
||||
if author is not None:
|
||||
metadata['commit_info']['author'] = {
|
||||
'username': author['username'],
|
||||
'avatar_url': author['avatar_url'],
|
||||
'url': gl_client.host + '/' + author['username']
|
||||
'url': author.get('http_url', ''),
|
||||
}
|
||||
|
||||
return metadata
|
||||
|
||||
@_catch_timeouts
|
||||
@_catch_timeouts_and_errors
|
||||
def manual_start(self, run_parameters=None):
|
||||
gl_client = self._get_authorized_client()
|
||||
|
||||
repo = gl_client.getproject(self.config['build_source'])
|
||||
if repo is False:
|
||||
gl_project = gl_client.projects.get(self.config['build_source'])
|
||||
if not gl_project:
|
||||
raise TriggerStartException('Could not find repository')
|
||||
|
||||
def get_tag_sha(tag_name):
|
||||
tags = gl_client.getrepositorytags(repo['id'])
|
||||
if tags is False:
|
||||
try:
|
||||
tag = gl_project.tags.get(tag_name)
|
||||
except gitlab.GitlabGetError:
|
||||
raise TriggerStartException('Could not find tag in repository')
|
||||
|
||||
for tag in tags:
|
||||
if tag['name'] == tag_name:
|
||||
return tag['commit']['id']
|
||||
|
||||
raise TriggerStartException('Could not find tag in repository')
|
||||
return tag.attributes['commit']['id']
|
||||
|
||||
def get_branch_sha(branch_name):
|
||||
branch = gl_client.getbranch(repo['id'], branch_name)
|
||||
if branch is False:
|
||||
try:
|
||||
branch = gl_project.branches.get(branch_name)
|
||||
except gitlab.GitlabGetError:
|
||||
raise TriggerStartException('Could not find branch in repository')
|
||||
|
||||
return branch['commit']['id']
|
||||
return branch.attributes['commit']['id']
|
||||
|
||||
# Find the branch or tag to build.
|
||||
(commit_sha, ref) = determine_build_ref(run_parameters, get_branch_sha, get_tag_sha,
|
||||
repo['default_branch'])
|
||||
gl_project.attributes['default_branch'])
|
||||
|
||||
metadata = self.get_metadata_for_commit(commit_sha, ref, repo)
|
||||
metadata = self.get_metadata_for_commit(commit_sha, ref, gl_project)
|
||||
return self.prepare_build(metadata, is_manual=True)
|
||||
|
||||
@_catch_timeouts
|
||||
@_catch_timeouts_and_errors
|
||||
def handle_trigger_request(self, request):
|
||||
payload = request.get_json()
|
||||
if not payload:
|
||||
|
@ -552,15 +598,22 @@ class GitLabBuildTrigger(BuildTriggerHandler):
|
|||
|
||||
# Lookup the default branch.
|
||||
gl_client = self._get_authorized_client()
|
||||
repo = gl_client.getproject(self.config['build_source'])
|
||||
if repo is False:
|
||||
gl_project = gl_client.projects.get(self.config['build_source'])
|
||||
if not gl_project:
|
||||
logger.debug('Skipping GitLab build; project %s not found', self.config['build_source'])
|
||||
raise InvalidPayloadException()
|
||||
|
||||
default_branch = repo['default_branch']
|
||||
def lookup_commit(repo_id, commit_sha):
|
||||
commit = self.lookup_commit(repo_id, commit_sha)
|
||||
if commit is None:
|
||||
return None
|
||||
|
||||
return dict(commit.attributes)
|
||||
|
||||
default_branch = gl_project.attributes['default_branch']
|
||||
metadata = get_transformed_webhook_payload(payload, default_branch=default_branch,
|
||||
lookup_user=self.lookup_user,
|
||||
lookup_commit=self.lookup_commit)
|
||||
lookup_commit=lookup_commit)
|
||||
prepared = self.prepare_build(metadata)
|
||||
|
||||
# Check if we should skip this build.
|
||||
|
|
|
@ -9,7 +9,8 @@ def get_bitbucket_trigger(dockerfile_path=''):
|
|||
trigger = BitbucketBuildTrigger(trigger_obj, {
|
||||
'build_source': 'foo/bar',
|
||||
'dockerfile_path': dockerfile_path,
|
||||
'username': 'knownuser'
|
||||
'nickname': 'knownuser',
|
||||
'account_id': 'foo',
|
||||
})
|
||||
|
||||
trigger._get_client = get_mock_bitbucket
|
||||
|
|
|
@ -65,7 +65,7 @@ def get_mock_github():
|
|||
repo_mock.permissions.admin = namespace == 'knownuser'
|
||||
return repo_mock
|
||||
|
||||
def get_user_repos_mock():
|
||||
def get_user_repos_mock(type='all', sort='created'):
|
||||
return [get_repo_mock('knownuser', 'somerepo')]
|
||||
|
||||
def get_org_repos_mock(type='all'):
|
||||
|
@ -123,7 +123,7 @@ def get_mock_github():
|
|||
otherbranch.commit = get_commit_mock('aaaaaaa')
|
||||
return [master, otherbranch]
|
||||
|
||||
def get_file_contents_mock(filepath):
|
||||
def get_contents_mock(filepath):
|
||||
if filepath == 'Dockerfile':
|
||||
m = Mock()
|
||||
m.content = 'hello world'
|
||||
|
@ -168,7 +168,7 @@ def get_mock_github():
|
|||
repo_mock.get_tags = Mock(side_effect=get_tags_mock)
|
||||
repo_mock.get_branches = Mock(side_effect=get_branches_mock)
|
||||
repo_mock.get_commit = Mock(side_effect=get_commit_mock)
|
||||
repo_mock.get_file_contents = Mock(side_effect=get_file_contents_mock)
|
||||
repo_mock.get_contents = Mock(side_effect=get_contents_mock)
|
||||
repo_mock.get_git_tree = Mock(side_effect=get_git_tree_mock)
|
||||
|
||||
gh_mock = Mock()
|
||||
|
|
|
@ -1,219 +1,598 @@
|
|||
from datetime import datetime
|
||||
from mock import Mock
|
||||
import base64
|
||||
import json
|
||||
|
||||
from contextlib import contextmanager
|
||||
|
||||
import gitlab
|
||||
|
||||
from httmock import urlmatch, HTTMock
|
||||
|
||||
from buildtrigger.gitlabhandler import GitLabBuildTrigger
|
||||
from util.morecollections import AttrDict
|
||||
|
||||
def get_gitlab_trigger(dockerfile_path=''):
|
||||
trigger_obj = AttrDict(dict(auth_token='foobar', id='sometrigger'))
|
||||
trigger = GitLabBuildTrigger(trigger_obj, {
|
||||
'build_source': 'foo/bar',
|
||||
'dockerfile_path': dockerfile_path,
|
||||
'username': 'knownuser'
|
||||
})
|
||||
|
||||
trigger._get_authorized_client = get_mock_gitlab(with_nulls=False)
|
||||
return trigger
|
||||
@urlmatch(netloc=r'fakegitlab')
|
||||
def catchall_handler(url, request):
|
||||
return {'status_code': 404}
|
||||
|
||||
def adddeploykey_mock(project_id, name, public_key):
|
||||
return {'id': 'foo'}
|
||||
|
||||
def addprojecthook_mock(project_id, webhook_url, push=False):
|
||||
return {'id': 'foo'}
|
||||
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/users$')
|
||||
def users_handler(url, request):
|
||||
if not request.headers.get('Authorization') == 'Bearer foobar':
|
||||
return {'status_code': 401}
|
||||
|
||||
def get_currentuser_mock():
|
||||
return {
|
||||
'username': 'knownuser'
|
||||
}
|
||||
|
||||
def project(namespace, name, is_org=False):
|
||||
project_access = None
|
||||
|
||||
if name != 'null':
|
||||
if namespace == 'knownuser':
|
||||
project_access = {
|
||||
'access_level': 50,
|
||||
}
|
||||
else:
|
||||
project_access = {
|
||||
'access_level': 0,
|
||||
}
|
||||
|
||||
data = {
|
||||
'id': '%s/%s' % (namespace, name),
|
||||
'default_branch': 'master',
|
||||
'namespace': {
|
||||
'id': namespace,
|
||||
'path': namespace,
|
||||
'name': namespace,
|
||||
},
|
||||
'path': name,
|
||||
'path_with_namespace': '%s/%s' % (namespace, name),
|
||||
'description': 'some %s repo' % name,
|
||||
'last_activity_at': str(datetime.utcfromtimestamp(0)),
|
||||
'web_url': 'https://bitbucket.org/%s/%s' % (namespace, name),
|
||||
'ssh_url_to_repo': 'git://%s/%s' % (namespace, name),
|
||||
'public': name != 'somerepo',
|
||||
'permissions': {
|
||||
'project_access': project_access,
|
||||
'group_access': {'access_level': 0},
|
||||
},
|
||||
'owner': {
|
||||
'avatar_url': 'avatarurl',
|
||||
if url.query.find('knownuser') < 0:
|
||||
return {
|
||||
'status_code': 200,
|
||||
'headers': {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
'content': json.dumps([]),
|
||||
}
|
||||
}
|
||||
|
||||
if name == 'null':
|
||||
del data['owner']['avatar_url']
|
||||
data['namespace']['avatar'] = None
|
||||
elif is_org:
|
||||
del data['owner']['avatar_url']
|
||||
data['namespace']['avatar'] = {'url': 'avatarurl'}
|
||||
|
||||
return data
|
||||
|
||||
def getprojects_mock(with_nulls=False):
|
||||
if with_nulls:
|
||||
def _getprojs(page=1, per_page=100):
|
||||
return [
|
||||
project('someorg', 'null', is_org=True),
|
||||
]
|
||||
return _getprojs
|
||||
|
||||
else:
|
||||
def _getprojs(page=1, per_page=100):
|
||||
return [
|
||||
project('knownuser', 'somerepo'),
|
||||
project('someorg', 'somerepo', is_org=True),
|
||||
project('someorg', 'anotherrepo', is_org=True),
|
||||
]
|
||||
return _getprojs
|
||||
|
||||
def getproject_mock(project_name):
|
||||
if project_name == 'knownuser/somerepo':
|
||||
return project('knownuser', 'somerepo')
|
||||
|
||||
if project_name == 'foo/bar':
|
||||
return project('foo', 'bar', is_org=True)
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def getbranches_mock(project_id):
|
||||
return [
|
||||
{
|
||||
'name': 'master',
|
||||
'commit': {
|
||||
'id': 'aaaaaaa',
|
||||
}
|
||||
},
|
||||
{
|
||||
'name': 'otherbranch',
|
||||
'commit': {
|
||||
'id': 'aaaaaaa',
|
||||
}
|
||||
},
|
||||
]
|
||||
|
||||
def getrepositorytags_mock(project_id):
|
||||
return [
|
||||
{
|
||||
'name': 'sometag',
|
||||
'commit': {
|
||||
'id': 'aaaaaaa',
|
||||
}
|
||||
},
|
||||
{
|
||||
'name': 'someothertag',
|
||||
'commit': {
|
||||
'id': 'aaaaaaa',
|
||||
}
|
||||
},
|
||||
]
|
||||
|
||||
def getrepositorytree_mock(project_id, ref_name='master'):
|
||||
return [
|
||||
{'name': 'README'},
|
||||
{'name': 'Dockerfile'},
|
||||
]
|
||||
|
||||
def getrepositorycommit_mock(project_id, commit_sha):
|
||||
if commit_sha != 'aaaaaaa':
|
||||
return False
|
||||
|
||||
return {
|
||||
'id': 'aaaaaaa',
|
||||
'message': 'some message',
|
||||
'committed_date': 'now',
|
||||
}
|
||||
|
||||
def getusers_mock(search=None):
|
||||
if search == 'knownuser':
|
||||
return [
|
||||
'status_code': 200,
|
||||
'headers': {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
'content': json.dumps([
|
||||
{
|
||||
'username': 'knownuser',
|
||||
'avatar_url': 'avatarurl',
|
||||
"id": 1,
|
||||
"username": "knownuser",
|
||||
"name": "Known User",
|
||||
"state": "active",
|
||||
"avatar_url": "avatarurl",
|
||||
"web_url": "https://bitbucket.org/knownuser",
|
||||
},
|
||||
]),
|
||||
}
|
||||
|
||||
|
||||
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/user$')
|
||||
def user_handler(_, request):
|
||||
if not request.headers.get('Authorization') == 'Bearer foobar':
|
||||
return {'status_code': 401}
|
||||
|
||||
return {
|
||||
'status_code': 200,
|
||||
'headers': {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
'content': json.dumps({
|
||||
"id": 1,
|
||||
"username": "john_smith",
|
||||
"email": "john@example.com",
|
||||
"name": "John Smith",
|
||||
"state": "active",
|
||||
}),
|
||||
}
|
||||
|
||||
|
||||
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/foo%2Fbar$')
|
||||
def project_handler(_, request):
|
||||
if not request.headers.get('Authorization') == 'Bearer foobar':
|
||||
return {'status_code': 401}
|
||||
|
||||
return {
|
||||
'status_code': 200,
|
||||
'headers': {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
'content': json.dumps({
|
||||
"id": 4,
|
||||
"description": None,
|
||||
"default_branch": "master",
|
||||
"visibility": "private",
|
||||
"path_with_namespace": "someorg/somerepo",
|
||||
"ssh_url_to_repo": "git@example.com:someorg/somerepo.git",
|
||||
"web_url": "http://example.com/someorg/somerepo",
|
||||
}),
|
||||
}
|
||||
|
||||
|
||||
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/repository/tree$')
|
||||
def project_tree_handler(_, request):
|
||||
if not request.headers.get('Authorization') == 'Bearer foobar':
|
||||
return {'status_code': 401}
|
||||
|
||||
return {
|
||||
'status_code': 200,
|
||||
'headers': {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
'content': json.dumps([
|
||||
{
|
||||
"id": "a1e8f8d745cc87e3a9248358d9352bb7f9a0aeba",
|
||||
"name": "Dockerfile",
|
||||
"type": "tree",
|
||||
"path": "files/Dockerfile",
|
||||
"mode": "040000",
|
||||
},
|
||||
]),
|
||||
}
|
||||
|
||||
|
||||
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/repository/tags$')
|
||||
def project_tags_handler(_, request):
|
||||
if not request.headers.get('Authorization') == 'Bearer foobar':
|
||||
return {'status_code': 401}
|
||||
|
||||
return {
|
||||
'status_code': 200,
|
||||
'headers': {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
'content': json.dumps([
|
||||
{
|
||||
'name': 'sometag',
|
||||
'commit': {
|
||||
'id': '60a8ff033665e1207714d6670fcd7b65304ec02f',
|
||||
},
|
||||
},
|
||||
{
|
||||
'name': 'someothertag',
|
||||
'commit': {
|
||||
'id': '60a8ff033665e1207714d6670fcd7b65304ec02f',
|
||||
},
|
||||
},
|
||||
]),
|
||||
}
|
||||
|
||||
|
||||
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/repository/branches$')
|
||||
def project_branches_handler(_, request):
|
||||
if not request.headers.get('Authorization') == 'Bearer foobar':
|
||||
return {'status_code': 401}
|
||||
|
||||
return {
|
||||
'status_code': 200,
|
||||
'headers': {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
'content': json.dumps([
|
||||
{
|
||||
'name': 'master',
|
||||
'commit': {
|
||||
'id': '60a8ff033665e1207714d6670fcd7b65304ec02f',
|
||||
},
|
||||
},
|
||||
{
|
||||
'name': 'otherbranch',
|
||||
'commit': {
|
||||
'id': '60a8ff033665e1207714d6670fcd7b65304ec02f',
|
||||
},
|
||||
},
|
||||
]),
|
||||
}
|
||||
|
||||
|
||||
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/repository/branches/master$')
|
||||
def project_branch_handler(_, request):
|
||||
if not request.headers.get('Authorization') == 'Bearer foobar':
|
||||
return {'status_code': 401}
|
||||
|
||||
return {
|
||||
'status_code': 200,
|
||||
'headers': {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
'content': json.dumps({
|
||||
"name": "master",
|
||||
"merged": True,
|
||||
"protected": True,
|
||||
"developers_can_push": False,
|
||||
"developers_can_merge": False,
|
||||
"commit": {
|
||||
"author_email": "john@example.com",
|
||||
"author_name": "John Smith",
|
||||
"authored_date": "2012-06-27T05:51:39-07:00",
|
||||
"committed_date": "2012-06-28T03:44:20-07:00",
|
||||
"committer_email": "john@example.com",
|
||||
"committer_name": "John Smith",
|
||||
"id": "60a8ff033665e1207714d6670fcd7b65304ec02f",
|
||||
"short_id": "7b5c3cc",
|
||||
"title": "add projects API",
|
||||
"message": "add projects API",
|
||||
"parent_ids": [
|
||||
"4ad91d3c1144c406e50c7b33bae684bd6837faf8",
|
||||
],
|
||||
},
|
||||
}),
|
||||
}
|
||||
|
||||
|
||||
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/namespaces/someorg$')
|
||||
def namespace_handler(_, request):
|
||||
if not request.headers.get('Authorization') == 'Bearer foobar':
|
||||
return {'status_code': 401}
|
||||
|
||||
return {
|
||||
'status_code': 200,
|
||||
'headers': {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
'content': json.dumps({
|
||||
"id": 2,
|
||||
"name": "someorg",
|
||||
"path": "someorg",
|
||||
"kind": "group",
|
||||
"full_path": "someorg",
|
||||
"parent_id": None,
|
||||
"members_count_with_descendants": 2
|
||||
}),
|
||||
}
|
||||
|
||||
|
||||
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/namespaces/knownuser$')
|
||||
def user_namespace_handler(_, request):
|
||||
if not request.headers.get('Authorization') == 'Bearer foobar':
|
||||
return {'status_code': 401}
|
||||
|
||||
return {
|
||||
'status_code': 200,
|
||||
'headers': {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
'content': json.dumps({
|
||||
"id": 1,
|
||||
"name": "knownuser",
|
||||
"path": "knownuser",
|
||||
"kind": "user",
|
||||
"full_path": "knownuser",
|
||||
"parent_id": None,
|
||||
"members_count_with_descendants": 2
|
||||
}),
|
||||
}
|
||||
|
||||
|
||||
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/namespaces(/)?$')
|
||||
def namespaces_handler(_, request):
|
||||
if not request.headers.get('Authorization') == 'Bearer foobar':
|
||||
return {'status_code': 401}
|
||||
|
||||
return {
|
||||
'status_code': 200,
|
||||
'headers': {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
'content': json.dumps([{
|
||||
"id": 2,
|
||||
"name": "someorg",
|
||||
"path": "someorg",
|
||||
"kind": "group",
|
||||
"full_path": "someorg",
|
||||
"parent_id": None,
|
||||
"web_url": "http://gitlab.com/groups/someorg",
|
||||
"members_count_with_descendants": 2
|
||||
}]),
|
||||
}
|
||||
|
||||
|
||||
def get_projects_handler(add_permissions_block):
|
||||
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/groups/2/projects$')
|
||||
def projects_handler(_, request):
|
||||
if not request.headers.get('Authorization') == 'Bearer foobar':
|
||||
return {'status_code': 401}
|
||||
|
||||
permissions_block = {
|
||||
"project_access": {
|
||||
"access_level": 10,
|
||||
"notification_level": 3
|
||||
},
|
||||
"group_access": {
|
||||
"access_level": 20,
|
||||
"notification_level": 3
|
||||
},
|
||||
}
|
||||
|
||||
return {
|
||||
'status_code': 200,
|
||||
'headers': {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
'content': json.dumps([{
|
||||
"id": 4,
|
||||
"name": "Some project",
|
||||
"description": None,
|
||||
"default_branch": "master",
|
||||
"visibility": "private",
|
||||
"path": "someproject",
|
||||
"path_with_namespace": "someorg/someproject",
|
||||
"last_activity_at": "2013-09-30T13:46:02Z",
|
||||
"web_url": "http://example.com/someorg/someproject",
|
||||
"permissions": permissions_block if add_permissions_block else None,
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"name": "Another project",
|
||||
"description": None,
|
||||
"default_branch": "master",
|
||||
"visibility": "public",
|
||||
"path": "anotherproject",
|
||||
"path_with_namespace": "someorg/anotherproject",
|
||||
"last_activity_at": "2013-09-30T13:46:02Z",
|
||||
"web_url": "http://example.com/someorg/anotherproject",
|
||||
}]),
|
||||
}
|
||||
return projects_handler
|
||||
|
||||
|
||||
def get_group_handler(null_avatar):
|
||||
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/groups/2$')
|
||||
def group_handler(_, request):
|
||||
if not request.headers.get('Authorization') == 'Bearer foobar':
|
||||
return {'status_code': 401}
|
||||
|
||||
return {
|
||||
'status_code': 200,
|
||||
'headers': {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
'content': json.dumps({
|
||||
"id": 1,
|
||||
"name": "SomeOrg Group",
|
||||
"path": "someorg",
|
||||
"description": "An interesting group",
|
||||
"visibility": "public",
|
||||
"lfs_enabled": True,
|
||||
"avatar_url": 'avatar_url' if not null_avatar else None,
|
||||
"web_url": "http://gitlab.com/groups/someorg",
|
||||
"request_access_enabled": False,
|
||||
"full_name": "SomeOrg Group",
|
||||
"full_path": "someorg",
|
||||
"parent_id": None,
|
||||
}),
|
||||
}
|
||||
return group_handler
|
||||
|
||||
|
||||
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/repository/files/Dockerfile$')
|
||||
def dockerfile_handler(_, request):
|
||||
if not request.headers.get('Authorization') == 'Bearer foobar':
|
||||
return {'status_code': 401}
|
||||
|
||||
return {
|
||||
'status_code': 200,
|
||||
'headers': {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
'content': json.dumps({
|
||||
"file_name": "Dockerfile",
|
||||
"file_path": "Dockerfile",
|
||||
"size": 10,
|
||||
"encoding": "base64",
|
||||
"content": base64.b64encode('hello world'),
|
||||
"ref": "master",
|
||||
"blob_id": "79f7bbd25901e8334750839545a9bd021f0e4c83",
|
||||
"commit_id": "d5a3ff139356ce33e37e73add446f16869741b50",
|
||||
"last_commit_id": "570e7b2abdd848b95f2f578043fc23bd6f6fd24d"
|
||||
}),
|
||||
}
|
||||
|
||||
|
||||
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/repository/files/somesubdir%2FDockerfile$')
|
||||
def sub_dockerfile_handler(_, request):
|
||||
if not request.headers.get('Authorization') == 'Bearer foobar':
|
||||
return {'status_code': 401}
|
||||
|
||||
return {
|
||||
'status_code': 200,
|
||||
'headers': {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
'content': json.dumps({
|
||||
"file_name": "Dockerfile",
|
||||
"file_path": "somesubdir/Dockerfile",
|
||||
"size": 10,
|
||||
"encoding": "base64",
|
||||
"content": base64.b64encode('hi universe'),
|
||||
"ref": "master",
|
||||
"blob_id": "79f7bbd25901e8334750839545a9bd021f0e4c83",
|
||||
"commit_id": "d5a3ff139356ce33e37e73add446f16869741b50",
|
||||
"last_commit_id": "570e7b2abdd848b95f2f578043fc23bd6f6fd24d"
|
||||
}),
|
||||
}
|
||||
|
||||
|
||||
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/repository/tags/sometag$')
|
||||
def tag_handler(_, request):
|
||||
if not request.headers.get('Authorization') == 'Bearer foobar':
|
||||
return {'status_code': 401}
|
||||
|
||||
return {
|
||||
'status_code': 200,
|
||||
'headers': {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
'content': json.dumps({
|
||||
"name": "sometag",
|
||||
"message": "some cool message",
|
||||
"target": "60a8ff033665e1207714d6670fcd7b65304ec02f",
|
||||
"commit": {
|
||||
"id": "60a8ff033665e1207714d6670fcd7b65304ec02f",
|
||||
"short_id": "60a8ff03",
|
||||
"title": "Initial commit",
|
||||
"created_at": "2017-07-26T11:08:53.000+02:00",
|
||||
"parent_ids": [
|
||||
"f61c062ff8bcbdb00e0a1b3317a91aed6ceee06b"
|
||||
],
|
||||
"message": "v5.0.0\n",
|
||||
"author_name": "Arthur Verschaeve",
|
||||
"author_email": "contact@arthurverschaeve.be",
|
||||
"authored_date": "2015-02-01T21:56:31.000+01:00",
|
||||
"committer_name": "Arthur Verschaeve",
|
||||
"committer_email": "contact@arthurverschaeve.be",
|
||||
"committed_date": "2015-02-01T21:56:31.000+01:00"
|
||||
},
|
||||
"release": None,
|
||||
}),
|
||||
}
|
||||
|
||||
|
||||
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/foo%2Fbar/repository/commits/60a8ff033665e1207714d6670fcd7b65304ec02f$')
|
||||
def commit_handler(_, request):
|
||||
if not request.headers.get('Authorization') == 'Bearer foobar':
|
||||
return {'status_code': 401}
|
||||
|
||||
return {
|
||||
'status_code': 200,
|
||||
'headers': {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
'content': json.dumps({
|
||||
"id": "60a8ff033665e1207714d6670fcd7b65304ec02f",
|
||||
"short_id": "60a8ff03366",
|
||||
"title": "Sanitize for network graph",
|
||||
"author_name": "someguy",
|
||||
"author_email": "some.guy@gmail.com",
|
||||
"committer_name": "Some Guy",
|
||||
"committer_email": "some.guy@gmail.com",
|
||||
"created_at": "2012-09-20T09:06:12+03:00",
|
||||
"message": "Sanitize for network graph",
|
||||
"committed_date": "2012-09-20T09:06:12+03:00",
|
||||
"authored_date": "2012-09-20T09:06:12+03:00",
|
||||
"parent_ids": [
|
||||
"ae1d9fb46aa2b07ee9836d49862ec4e2c46fbbba"
|
||||
],
|
||||
"last_pipeline" : {
|
||||
"id": 8,
|
||||
"ref": "master",
|
||||
"sha": "2dc6aa325a317eda67812f05600bdf0fcdc70ab0",
|
||||
"status": "created",
|
||||
},
|
||||
"stats": {
|
||||
"additions": 15,
|
||||
"deletions": 10,
|
||||
"total": 25
|
||||
},
|
||||
"status": "running"
|
||||
}),
|
||||
}
|
||||
|
||||
|
||||
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/deploy_keys$', method='POST')
|
||||
def create_deploykey_handler(_, request):
|
||||
if not request.headers.get('Authorization') == 'Bearer foobar':
|
||||
return {'status_code': 401}
|
||||
|
||||
return {
|
||||
'status_code': 200,
|
||||
'headers': {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
'content': json.dumps({
|
||||
"id": 1,
|
||||
"title": "Public key",
|
||||
"key": "ssh-rsa some stuff",
|
||||
"created_at": "2013-10-02T10:12:29Z",
|
||||
"can_push": False,
|
||||
}),
|
||||
}
|
||||
|
||||
|
||||
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/hooks$', method='POST')
|
||||
def create_hook_handler(_, request):
|
||||
if not request.headers.get('Authorization') == 'Bearer foobar':
|
||||
return {'status_code': 401}
|
||||
|
||||
return {
|
||||
'status_code': 200,
|
||||
'headers': {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
'content': json.dumps({
|
||||
"id": 1,
|
||||
"url": "http://example.com/hook",
|
||||
"project_id": 4,
|
||||
"push_events": True,
|
||||
"issues_events": True,
|
||||
"confidential_issues_events": True,
|
||||
"merge_requests_events": True,
|
||||
"tag_push_events": True,
|
||||
"note_events": True,
|
||||
"job_events": True,
|
||||
"pipeline_events": True,
|
||||
"wiki_page_events": True,
|
||||
"enable_ssl_verification": True,
|
||||
"created_at": "2012-10-12T17:04:47Z",
|
||||
}),
|
||||
}
|
||||
|
||||
|
||||
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/hooks/1$', method='DELETE')
|
||||
def delete_hook_handler(_, request):
|
||||
if not request.headers.get('Authorization') == 'Bearer foobar':
|
||||
return {'status_code': 401}
|
||||
|
||||
return {
|
||||
'status_code': 200,
|
||||
'headers': {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
'content': json.dumps({}),
|
||||
}
|
||||
|
||||
|
||||
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/deploy_keys/1$', method='DELETE')
|
||||
def delete_deploykey_handker(_, request):
|
||||
if not request.headers.get('Authorization') == 'Bearer foobar':
|
||||
return {'status_code': 401}
|
||||
|
||||
return {
|
||||
'status_code': 200,
|
||||
'headers': {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
'content': json.dumps({}),
|
||||
}
|
||||
|
||||
|
||||
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/users/1/projects$')
|
||||
def user_projects_list_handler(_, request):
|
||||
if not request.headers.get('Authorization') == 'Bearer foobar':
|
||||
return {'status_code': 401}
|
||||
|
||||
return {
|
||||
'status_code': 200,
|
||||
'headers': {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
'content': json.dumps([
|
||||
{
|
||||
"id": 2,
|
||||
"name": "Another project",
|
||||
"description": None,
|
||||
"default_branch": "master",
|
||||
"visibility": "public",
|
||||
"path": "anotherproject",
|
||||
"path_with_namespace": "knownuser/anotherproject",
|
||||
"last_activity_at": "2013-09-30T13:46:02Z",
|
||||
"web_url": "http://example.com/knownuser/anotherproject",
|
||||
}
|
||||
]
|
||||
|
||||
return False
|
||||
|
||||
def getbranch_mock(repo_id, branch):
|
||||
if branch != 'master' and branch != 'otherbranch':
|
||||
return False
|
||||
|
||||
return {
|
||||
'name': branch,
|
||||
'commit': {
|
||||
'id': 'aaaaaaa',
|
||||
}
|
||||
]),
|
||||
}
|
||||
|
||||
def gettag_mock(repo_id, tag):
|
||||
if tag != 'sometag' and tag != 'someothertag':
|
||||
return False
|
||||
|
||||
return {
|
||||
'name': tag,
|
||||
'commit': {
|
||||
'id': 'aaaaaaa',
|
||||
}
|
||||
}
|
||||
@contextmanager
|
||||
def get_gitlab_trigger(dockerfile_path='', add_permissions=True, missing_avatar_url=False):
|
||||
handlers = [user_handler, users_handler, project_branches_handler, project_tree_handler,
|
||||
project_handler, get_projects_handler(add_permissions), tag_handler,
|
||||
project_branch_handler, get_group_handler(missing_avatar_url), dockerfile_handler,
|
||||
sub_dockerfile_handler, namespace_handler, user_namespace_handler, namespaces_handler,
|
||||
commit_handler, create_deploykey_handler, delete_deploykey_handker,
|
||||
create_hook_handler, delete_hook_handler, project_tags_handler,
|
||||
user_projects_list_handler, catchall_handler]
|
||||
|
||||
def getrawfile_mock(repo_id, branch_name, path):
|
||||
if path == 'Dockerfile':
|
||||
return 'hello world'
|
||||
with HTTMock(*handlers):
|
||||
trigger_obj = AttrDict(dict(auth_token='foobar', id='sometrigger'))
|
||||
trigger = GitLabBuildTrigger(trigger_obj, {
|
||||
'build_source': 'foo/bar',
|
||||
'dockerfile_path': dockerfile_path,
|
||||
'username': 'knownuser'
|
||||
})
|
||||
|
||||
if path == 'somesubdir/Dockerfile':
|
||||
return 'hi universe'
|
||||
client = gitlab.Gitlab('http://fakegitlab', oauth_token='foobar', timeout=20, api_version=4)
|
||||
client.auth()
|
||||
|
||||
return False
|
||||
|
||||
def get_mock_gitlab(with_nulls=False):
|
||||
def _get_mock():
|
||||
mock_gitlab = Mock()
|
||||
mock_gitlab.host = 'https://bitbucket.org'
|
||||
|
||||
mock_gitlab.currentuser = Mock(side_effect=get_currentuser_mock)
|
||||
mock_gitlab.getusers = Mock(side_effect=getusers_mock)
|
||||
|
||||
mock_gitlab.getprojects = Mock(side_effect=getprojects_mock(with_nulls))
|
||||
mock_gitlab.getproject = Mock(side_effect=getproject_mock)
|
||||
mock_gitlab.getbranches = Mock(side_effect=getbranches_mock)
|
||||
|
||||
mock_gitlab.getbranch = Mock(side_effect=getbranch_mock)
|
||||
mock_gitlab.gettag = Mock(side_effect=gettag_mock)
|
||||
|
||||
mock_gitlab.getrepositorytags = Mock(side_effect=getrepositorytags_mock)
|
||||
mock_gitlab.getrepositorytree = Mock(side_effect=getrepositorytree_mock)
|
||||
mock_gitlab.getrepositorycommit = Mock(side_effect=getrepositorycommit_mock)
|
||||
|
||||
mock_gitlab.getrawfile = Mock(side_effect=getrawfile_mock)
|
||||
|
||||
mock_gitlab.adddeploykey = Mock(side_effect=adddeploykey_mock)
|
||||
mock_gitlab.addprojecthook = Mock(side_effect=addprojecthook_mock)
|
||||
mock_gitlab.deletedeploykey = Mock(return_value=True)
|
||||
mock_gitlab.deleteprojecthook = Mock(return_value=True)
|
||||
return mock_gitlab
|
||||
|
||||
return _get_mock
|
||||
trigger._get_authorized_client = lambda: client
|
||||
yield trigger
|
||||
|
|
|
@ -86,6 +86,6 @@ def test_handle_trigger_request(bitbucket_trigger, payload, expected_error, expe
|
|||
if expected_error is not None:
|
||||
with pytest.raises(expected_error) as ipe:
|
||||
bitbucket_trigger.handle_trigger_request(request)
|
||||
assert ipe.value.message == expected_message
|
||||
assert str(ipe.value) == expected_message
|
||||
else:
|
||||
assert isinstance(bitbucket_trigger.handle_trigger_request(request), PreparedBuild)
|
||||
|
|
|
@ -32,7 +32,7 @@ def test_handle_trigger_request(payload, expected_error, expected_message):
|
|||
if expected_error is not None:
|
||||
with pytest.raises(expected_error) as ipe:
|
||||
trigger.handle_trigger_request(request)
|
||||
assert ipe.value.message == expected_message
|
||||
assert str(ipe.value) == expected_message
|
||||
else:
|
||||
assert isinstance(trigger.handle_trigger_request(request), PreparedBuild)
|
||||
|
||||
|
@ -46,6 +46,6 @@ def test_manual_start(run_parameters, expected_error, expected_message):
|
|||
if expected_error is not None:
|
||||
with pytest.raises(expected_error) as ipe:
|
||||
trigger.manual_start(run_parameters)
|
||||
assert ipe.value.message == expected_message
|
||||
assert str(ipe.value) == expected_message
|
||||
else:
|
||||
assert isinstance(trigger.manual_start(run_parameters), PreparedBuild)
|
||||
|
|
|
@ -3,12 +3,11 @@ import pytest
|
|||
from buildtrigger.triggerutil import TriggerStartException
|
||||
from buildtrigger.test.bitbucketmock import get_bitbucket_trigger
|
||||
from buildtrigger.test.githubmock import get_github_trigger
|
||||
from buildtrigger.test.gitlabmock import get_gitlab_trigger
|
||||
from endpoints.building import PreparedBuild
|
||||
|
||||
# Note: This test suite executes a common set of tests against all the trigger types specified
|
||||
# in this fixture. Each trigger's mock is expected to return the same data for all of these calls.
|
||||
@pytest.fixture(params=[get_github_trigger(), get_bitbucket_trigger(), get_gitlab_trigger()])
|
||||
@pytest.fixture(params=[get_github_trigger(), get_bitbucket_trigger()])
|
||||
def githost_trigger(request):
|
||||
return request.param
|
||||
|
||||
|
@ -34,7 +33,7 @@ def test_manual_start(run_parameters, expected_error, expected_message, githost_
|
|||
if expected_error is not None:
|
||||
with pytest.raises(expected_error) as ipe:
|
||||
githost_trigger.manual_start(run_parameters)
|
||||
assert ipe.value.message == expected_message
|
||||
assert str(ipe.value) == expected_message
|
||||
else:
|
||||
assert isinstance(githost_trigger.manual_start(run_parameters), PreparedBuild)
|
||||
|
||||
|
@ -59,7 +58,7 @@ def test_list_field_values(name, expected, githost_trigger):
|
|||
assert githost_trigger.list_field_values(name) == expected
|
||||
|
||||
|
||||
def test_list_build_source_namespaces(githost_trigger):
|
||||
def test_list_build_source_namespaces():
|
||||
namespaces_expected = [
|
||||
{
|
||||
'personal': True,
|
||||
|
@ -79,7 +78,7 @@ def test_list_build_source_namespaces(githost_trigger):
|
|||
}
|
||||
]
|
||||
|
||||
found = githost_trigger.list_build_source_namespaces()
|
||||
found = get_bitbucket_trigger().list_build_source_namespaces()
|
||||
found.sort()
|
||||
|
||||
namespaces_expected.sort()
|
||||
|
@ -116,10 +115,7 @@ def test_list_build_sources_for_namespace(namespace, expected, githost_trigger):
|
|||
assert githost_trigger.list_build_sources_for_namespace(namespace) == expected
|
||||
|
||||
|
||||
def test_activate(githost_trigger):
|
||||
def test_activate_and_deactivate(githost_trigger):
|
||||
_, private_key = githost_trigger.activate('http://some/url')
|
||||
assert 'private_key' in private_key
|
||||
|
||||
|
||||
def test_deactivate(githost_trigger):
|
||||
githost_trigger.deactivate()
|
||||
|
|
|
@ -63,7 +63,7 @@ def test_handle_trigger_request(github_trigger, payload, expected_error, expecte
|
|||
if expected_error is not None:
|
||||
with pytest.raises(expected_error) as ipe:
|
||||
github_trigger.handle_trigger_request(request)
|
||||
assert ipe.value.message == expected_message
|
||||
assert str(ipe.value) == expected_message
|
||||
else:
|
||||
assert isinstance(github_trigger.handle_trigger_request(request), PreparedBuild)
|
||||
|
||||
|
@ -88,3 +88,30 @@ def test_lookup_user(username, expected_response, github_trigger):
|
|||
|
||||
def test_list_build_subdirs(github_trigger):
|
||||
assert github_trigger.list_build_subdirs() == ['Dockerfile', 'somesubdir/Dockerfile']
|
||||
|
||||
|
||||
def test_list_build_source_namespaces(github_trigger):
|
||||
namespaces_expected = [
|
||||
{
|
||||
'personal': True,
|
||||
'score': 1,
|
||||
'avatar_url': 'avatarurl',
|
||||
'id': 'knownuser',
|
||||
'title': 'knownuser',
|
||||
'url': 'https://bitbucket.org/knownuser',
|
||||
},
|
||||
{
|
||||
'score': 0,
|
||||
'title': 'someorg',
|
||||
'personal': False,
|
||||
'url': '',
|
||||
'avatar_url': 'avatarurl',
|
||||
'id': 'someorg'
|
||||
}
|
||||
]
|
||||
|
||||
found = github_trigger.list_build_source_namespaces()
|
||||
found.sort()
|
||||
|
||||
namespaces_expected.sort()
|
||||
assert found == namespaces_expected
|
||||
|
|
|
@ -3,19 +3,20 @@ import pytest
|
|||
|
||||
from mock import Mock
|
||||
|
||||
from buildtrigger.test.gitlabmock import get_gitlab_trigger, get_mock_gitlab
|
||||
from buildtrigger.test.gitlabmock import get_gitlab_trigger
|
||||
from buildtrigger.triggerutil import (SkipRequestException, ValidationRequestException,
|
||||
InvalidPayloadException)
|
||||
InvalidPayloadException, TriggerStartException)
|
||||
from endpoints.building import PreparedBuild
|
||||
from util.morecollections import AttrDict
|
||||
|
||||
@pytest.fixture
|
||||
@pytest.fixture()
|
||||
def gitlab_trigger():
|
||||
return get_gitlab_trigger()
|
||||
with get_gitlab_trigger() as t:
|
||||
yield t
|
||||
|
||||
|
||||
def test_list_build_subdirs(gitlab_trigger):
|
||||
assert gitlab_trigger.list_build_subdirs() == ['/Dockerfile']
|
||||
assert gitlab_trigger.list_build_subdirs() == ['Dockerfile']
|
||||
|
||||
|
||||
@pytest.mark.parametrize('dockerfile_path, contents', [
|
||||
|
@ -24,8 +25,8 @@ def test_list_build_subdirs(gitlab_trigger):
|
|||
('unknownpath', None),
|
||||
])
|
||||
def test_load_dockerfile_contents(dockerfile_path, contents):
|
||||
trigger = get_gitlab_trigger(dockerfile_path)
|
||||
assert trigger.load_dockerfile_contents() == contents
|
||||
with get_gitlab_trigger(dockerfile_path=dockerfile_path) as trigger:
|
||||
assert trigger.load_dockerfile_contents() == contents
|
||||
|
||||
|
||||
@pytest.mark.parametrize('email, expected_response', [
|
||||
|
@ -37,26 +38,50 @@ def test_lookup_user(email, expected_response, gitlab_trigger):
|
|||
assert gitlab_trigger.lookup_user(email) == expected_response
|
||||
|
||||
|
||||
def test_null_permissions(gitlab_trigger):
|
||||
gitlab_trigger._get_authorized_client = get_mock_gitlab(with_nulls=True)
|
||||
sources = gitlab_trigger.list_build_sources_for_namespace('someorg')
|
||||
source = sources[0]
|
||||
assert source['has_admin_permissions']
|
||||
def test_null_permissions():
|
||||
with get_gitlab_trigger(add_permissions=False) as trigger:
|
||||
sources = trigger.list_build_sources_for_namespace('someorg')
|
||||
source = sources[0]
|
||||
assert source['has_admin_permissions']
|
||||
|
||||
|
||||
def test_null_avatar(gitlab_trigger):
|
||||
gitlab_trigger._get_authorized_client = get_mock_gitlab(with_nulls=True)
|
||||
namespace_data = gitlab_trigger.list_build_source_namespaces()
|
||||
expected = {
|
||||
'avatar_url': None,
|
||||
'personal': False,
|
||||
'title': 'someorg',
|
||||
'url': 'https://bitbucket.org/someorg',
|
||||
'score': 1,
|
||||
'id': 'someorg',
|
||||
}
|
||||
def test_list_build_sources():
|
||||
with get_gitlab_trigger() as trigger:
|
||||
sources = trigger.list_build_sources_for_namespace('someorg')
|
||||
assert sources == [
|
||||
{
|
||||
'last_updated': 1380548762,
|
||||
'name': u'someproject',
|
||||
'url': u'http://example.com/someorg/someproject',
|
||||
'private': True,
|
||||
'full_name': u'someorg/someproject',
|
||||
'has_admin_permissions': False,
|
||||
'description': ''
|
||||
},
|
||||
{
|
||||
'last_updated': 1380548762,
|
||||
'name': u'anotherproject',
|
||||
'url': u'http://example.com/someorg/anotherproject',
|
||||
'private': False,
|
||||
'full_name': u'someorg/anotherproject',
|
||||
'has_admin_permissions': True,
|
||||
'description': '',
|
||||
}]
|
||||
|
||||
assert namespace_data == [expected]
|
||||
|
||||
def test_null_avatar():
|
||||
with get_gitlab_trigger(missing_avatar_url=True) as trigger:
|
||||
namespace_data = trigger.list_build_source_namespaces()
|
||||
expected = {
|
||||
'avatar_url': None,
|
||||
'personal': False,
|
||||
'title': u'someorg',
|
||||
'url': u'http://gitlab.com/groups/someorg',
|
||||
'score': 1,
|
||||
'id': '2',
|
||||
}
|
||||
|
||||
assert namespace_data == [expected]
|
||||
|
||||
|
||||
@pytest.mark.parametrize('payload, expected_error, expected_message', [
|
||||
|
@ -107,8 +132,100 @@ def test_handle_trigger_request(gitlab_trigger, payload, expected_error, expecte
|
|||
if expected_error is not None:
|
||||
with pytest.raises(expected_error) as ipe:
|
||||
gitlab_trigger.handle_trigger_request(request)
|
||||
assert ipe.value.message == expected_message
|
||||
assert str(ipe.value) == expected_message
|
||||
else:
|
||||
assert isinstance(gitlab_trigger.handle_trigger_request(request), PreparedBuild)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('run_parameters, expected_error, expected_message', [
|
||||
# No branch or tag specified: use the commit of the default branch.
|
||||
({}, None, None),
|
||||
|
||||
# Invalid branch.
|
||||
({'refs': {'kind': 'branch', 'name': 'invalid'}}, TriggerStartException,
|
||||
'Could not find branch in repository'),
|
||||
|
||||
# Invalid tag.
|
||||
({'refs': {'kind': 'tag', 'name': 'invalid'}}, TriggerStartException,
|
||||
'Could not find tag in repository'),
|
||||
|
||||
# Valid branch.
|
||||
({'refs': {'kind': 'branch', 'name': 'master'}}, None, None),
|
||||
|
||||
# Valid tag.
|
||||
({'refs': {'kind': 'tag', 'name': 'sometag'}}, None, None),
|
||||
])
|
||||
def test_manual_start(run_parameters, expected_error, expected_message, gitlab_trigger):
|
||||
if expected_error is not None:
|
||||
with pytest.raises(expected_error) as ipe:
|
||||
gitlab_trigger.manual_start(run_parameters)
|
||||
assert str(ipe.value) == expected_message
|
||||
else:
|
||||
assert isinstance(gitlab_trigger.manual_start(run_parameters), PreparedBuild)
|
||||
|
||||
|
||||
def test_activate_and_deactivate(gitlab_trigger):
|
||||
_, private_key = gitlab_trigger.activate('http://some/url')
|
||||
assert 'private_key' in private_key
|
||||
|
||||
gitlab_trigger.deactivate()
|
||||
|
||||
|
||||
@pytest.mark.parametrize('name, expected', [
|
||||
('refs', [
|
||||
{'kind': 'branch', 'name': 'master'},
|
||||
{'kind': 'branch', 'name': 'otherbranch'},
|
||||
{'kind': 'tag', 'name': 'sometag'},
|
||||
{'kind': 'tag', 'name': 'someothertag'},
|
||||
]),
|
||||
('tag_name', set(['sometag', 'someothertag'])),
|
||||
('branch_name', set(['master', 'otherbranch'])),
|
||||
('invalid', None)
|
||||
])
|
||||
def test_list_field_values(name, expected, gitlab_trigger):
|
||||
if expected is None:
|
||||
assert gitlab_trigger.list_field_values(name) is None
|
||||
elif isinstance(expected, set):
|
||||
assert set(gitlab_trigger.list_field_values(name)) == set(expected)
|
||||
else:
|
||||
assert gitlab_trigger.list_field_values(name) == expected
|
||||
|
||||
|
||||
@pytest.mark.parametrize('namespace, expected', [
|
||||
('', []),
|
||||
('unknown', []),
|
||||
|
||||
('knownuser', [
|
||||
{
|
||||
'last_updated': 1380548762,
|
||||
'name': u'anotherproject',
|
||||
'url': u'http://example.com/knownuser/anotherproject',
|
||||
'private': False,
|
||||
'full_name': u'knownuser/anotherproject',
|
||||
'has_admin_permissions': True,
|
||||
'description': ''
|
||||
},
|
||||
]),
|
||||
|
||||
('someorg', [
|
||||
{
|
||||
'last_updated': 1380548762,
|
||||
'name': u'someproject',
|
||||
'url': u'http://example.com/someorg/someproject',
|
||||
'private': True,
|
||||
'full_name': u'someorg/someproject',
|
||||
'has_admin_permissions': False,
|
||||
'description': ''
|
||||
},
|
||||
{
|
||||
'last_updated': 1380548762,
|
||||
'name': u'anotherproject',
|
||||
'url': u'http://example.com/someorg/anotherproject',
|
||||
'private': False,
|
||||
'full_name': u'someorg/anotherproject',
|
||||
'has_admin_permissions': True,
|
||||
'description': '',
|
||||
}]),
|
||||
])
|
||||
def test_list_build_sources_for_namespace(namespace, expected, gitlab_trigger):
|
||||
assert gitlab_trigger.list_build_sources_for_namespace(namespace) == expected
|
||||
|
|
|
@ -0,0 +1,572 @@
|
|||
import json
|
||||
|
||||
import pytest
|
||||
|
||||
from jsonschema import validate
|
||||
|
||||
from buildtrigger.customhandler import custom_trigger_payload
|
||||
from buildtrigger.basehandler import METADATA_SCHEMA
|
||||
from buildtrigger.bitbuckethandler import get_transformed_webhook_payload as bb_webhook
|
||||
from buildtrigger.bitbuckethandler import get_transformed_commit_info as bb_commit
|
||||
from buildtrigger.githubhandler import get_transformed_webhook_payload as gh_webhook
|
||||
from buildtrigger.gitlabhandler import get_transformed_webhook_payload as gl_webhook
|
||||
from buildtrigger.triggerutil import SkipRequestException
|
||||
|
||||
def assertSkipped(filename, processor, *args, **kwargs):
|
||||
with open('buildtrigger/test/triggerjson/%s.json' % filename) as f:
|
||||
payload = json.loads(f.read())
|
||||
|
||||
nargs = [payload]
|
||||
nargs.extend(args)
|
||||
|
||||
with pytest.raises(SkipRequestException):
|
||||
processor(*nargs, **kwargs)
|
||||
|
||||
|
||||
def assertSchema(filename, expected, processor, *args, **kwargs):
|
||||
with open('buildtrigger/test/triggerjson/%s.json' % filename) as f:
|
||||
payload = json.loads(f.read())
|
||||
|
||||
nargs = [payload]
|
||||
nargs.extend(args)
|
||||
|
||||
created = processor(*nargs, **kwargs)
|
||||
assert created == expected
|
||||
validate(created, METADATA_SCHEMA)
|
||||
|
||||
|
||||
def test_custom_custom():
|
||||
expected = {
|
||||
u'commit':u'1c002dd',
|
||||
u'commit_info': {
|
||||
u'url': u'gitsoftware.com/repository/commits/1234567',
|
||||
u'date': u'timestamp',
|
||||
u'message': u'initial commit',
|
||||
u'committer': {
|
||||
u'username': u'user',
|
||||
u'url': u'gitsoftware.com/users/user',
|
||||
u'avatar_url': u'gravatar.com/user.png'
|
||||
},
|
||||
u'author': {
|
||||
u'username': u'user',
|
||||
u'url': u'gitsoftware.com/users/user',
|
||||
u'avatar_url': u'gravatar.com/user.png'
|
||||
}
|
||||
},
|
||||
u'ref': u'refs/heads/master',
|
||||
u'default_branch': u'master',
|
||||
u'git_url': u'foobar',
|
||||
}
|
||||
|
||||
assertSchema('custom_webhook', expected, custom_trigger_payload, git_url='foobar')
|
||||
|
||||
|
||||
def test_custom_gitlab():
|
||||
expected = {
|
||||
'commit': u'fb88379ee45de28a0a4590fddcbd8eff8b36026e',
|
||||
'ref': u'refs/heads/master',
|
||||
'git_url': u'git@gitlab.com:jsmith/somerepo.git',
|
||||
'commit_info': {
|
||||
'url': u'https://gitlab.com/jsmith/somerepo/commit/fb88379ee45de28a0a4590fddcbd8eff8b36026e',
|
||||
'date': u'2015-08-13T19:33:18+00:00',
|
||||
'message': u'Fix link\n',
|
||||
},
|
||||
}
|
||||
|
||||
assertSchema('gitlab_webhook', expected, custom_trigger_payload, git_url='git@gitlab.com:jsmith/somerepo.git')
|
||||
|
||||
|
||||
def test_custom_github():
|
||||
expected = {
|
||||
'commit': u'410f4cdf8ff09b87f245b13845e8497f90b90a4c',
|
||||
'ref': u'refs/heads/master',
|
||||
'default_branch': u'master',
|
||||
'git_url': u'git@github.com:jsmith/anothertest.git',
|
||||
'commit_info': {
|
||||
'url': u'https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c',
|
||||
'date': u'2015-09-11T14:26:16-04:00',
|
||||
'message': u'Update Dockerfile',
|
||||
'committer': {
|
||||
'username': u'jsmith',
|
||||
},
|
||||
'author': {
|
||||
'username': u'jsmith',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
assertSchema('github_webhook', expected, custom_trigger_payload,
|
||||
git_url='git@github.com:jsmith/anothertest.git')
|
||||
|
||||
|
||||
def test_custom_bitbucket():
|
||||
expected = {
|
||||
"commit": u"af64ae7188685f8424040b4735ad12941b980d75",
|
||||
"ref": u"refs/heads/master",
|
||||
"git_url": u"git@bitbucket.org:jsmith/another-repo.git",
|
||||
"commit_info": {
|
||||
"url": u"https://bitbucket.org/jsmith/another-repo/commits/af64ae7188685f8424040b4735ad12941b980d75",
|
||||
"date": u"2015-09-10T20:40:54+00:00",
|
||||
"message": u"Dockerfile edited online with Bitbucket",
|
||||
"author": {
|
||||
"username": u"John Smith",
|
||||
"avatar_url": u"https://bitbucket.org/account/jsmith/avatar/32/",
|
||||
},
|
||||
"committer": {
|
||||
"username": u"John Smith",
|
||||
"avatar_url": u"https://bitbucket.org/account/jsmith/avatar/32/",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
assertSchema('bitbucket_webhook', expected, custom_trigger_payload, git_url='git@bitbucket.org:jsmith/another-repo.git')
|
||||
|
||||
|
||||
def test_bitbucket_customer_payload_noauthor():
|
||||
expected = {
|
||||
"commit": "a0ec139843b2bb281ab21a433266ddc498e605dc",
|
||||
"ref": "refs/heads/master",
|
||||
"git_url": "git@bitbucket.org:somecoollabs/svc-identity.git",
|
||||
"commit_info": {
|
||||
"url": "https://bitbucket.org/somecoollabs/svc-identity/commits/a0ec139843b2bb281ab21a433266ddc498e605dc",
|
||||
"date": "2015-09-25T00:55:08+00:00",
|
||||
"message": "Update version.py to 0.1.2 [skip ci]\n\n(by utilitybelt/scripts/autotag_version.py)\n",
|
||||
"committer": {
|
||||
"username": "CodeShip Tagging",
|
||||
"avatar_url": "https://bitbucket.org/account/SomeCoolLabs_CodeShip/avatar/32/",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
assertSchema('bitbucket_customer_example_noauthor', expected, bb_webhook)
|
||||
|
||||
|
||||
def test_bitbucket_customer_payload_tag():
|
||||
expected = {
|
||||
"commit": "a0ec139843b2bb281ab21a433266ddc498e605dc",
|
||||
"ref": "refs/tags/0.1.2",
|
||||
"git_url": "git@bitbucket.org:somecoollabs/svc-identity.git",
|
||||
"commit_info": {
|
||||
"url": "https://bitbucket.org/somecoollabs/svc-identity/commits/a0ec139843b2bb281ab21a433266ddc498e605dc",
|
||||
"date": "2015-09-25T00:55:08+00:00",
|
||||
"message": "Update version.py to 0.1.2 [skip ci]\n\n(by utilitybelt/scripts/autotag_version.py)\n",
|
||||
"committer": {
|
||||
"username": "CodeShip Tagging",
|
||||
"avatar_url": "https://bitbucket.org/account/SomeCoolLabs_CodeShip/avatar/32/",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
assertSchema('bitbucket_customer_example_tag', expected, bb_webhook)
|
||||
|
||||
|
||||
def test_bitbucket_commit():
|
||||
ref = 'refs/heads/somebranch'
|
||||
default_branch = 'somebranch'
|
||||
repository_name = 'foo/bar'
|
||||
|
||||
def lookup_author(_):
|
||||
return {
|
||||
'user': {
|
||||
'display_name': 'cooluser',
|
||||
'avatar': 'http://some/avatar/url'
|
||||
}
|
||||
}
|
||||
|
||||
expected = {
|
||||
"commit": u"abdeaf1b2b4a6b9ddf742c1e1754236380435a62",
|
||||
"ref": u"refs/heads/somebranch",
|
||||
"git_url": u"git@bitbucket.org:foo/bar.git",
|
||||
"default_branch": u"somebranch",
|
||||
"commit_info": {
|
||||
"url": u"https://bitbucket.org/foo/bar/commits/abdeaf1b2b4a6b9ddf742c1e1754236380435a62",
|
||||
"date": u"2012-07-24 00:26:36",
|
||||
"message": u"making some changes\n",
|
||||
"author": {
|
||||
"avatar_url": u"http://some/avatar/url",
|
||||
"username": u"cooluser",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
assertSchema('bitbucket_commit', expected, bb_commit, ref, default_branch,
|
||||
repository_name, lookup_author)
|
||||
|
||||
def test_bitbucket_webhook_payload():
|
||||
expected = {
|
||||
"commit": u"af64ae7188685f8424040b4735ad12941b980d75",
|
||||
"ref": u"refs/heads/master",
|
||||
"git_url": u"git@bitbucket.org:jsmith/another-repo.git",
|
||||
"commit_info": {
|
||||
"url": u"https://bitbucket.org/jsmith/another-repo/commits/af64ae7188685f8424040b4735ad12941b980d75",
|
||||
"date": u"2015-09-10T20:40:54+00:00",
|
||||
"message": u"Dockerfile edited online with Bitbucket",
|
||||
"author": {
|
||||
"username": u"John Smith",
|
||||
"avatar_url": u"https://bitbucket.org/account/jsmith/avatar/32/",
|
||||
},
|
||||
"committer": {
|
||||
"username": u"John Smith",
|
||||
"avatar_url": u"https://bitbucket.org/account/jsmith/avatar/32/",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
assertSchema('bitbucket_webhook', expected, bb_webhook)
|
||||
|
||||
|
||||
def test_github_webhook_payload_slash_branch():
|
||||
expected = {
|
||||
'commit': u'410f4cdf8ff09b87f245b13845e8497f90b90a4c',
|
||||
'ref': u'refs/heads/slash/branch',
|
||||
'default_branch': u'master',
|
||||
'git_url': u'git@github.com:jsmith/anothertest.git',
|
||||
'commit_info': {
|
||||
'url': u'https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c',
|
||||
'date': u'2015-09-11T14:26:16-04:00',
|
||||
'message': u'Update Dockerfile',
|
||||
'committer': {
|
||||
'username': u'jsmith',
|
||||
},
|
||||
'author': {
|
||||
'username': u'jsmith',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
assertSchema('github_webhook_slash_branch', expected, gh_webhook)
|
||||
|
||||
|
||||
def test_github_webhook_payload():
|
||||
expected = {
|
||||
'commit': u'410f4cdf8ff09b87f245b13845e8497f90b90a4c',
|
||||
'ref': u'refs/heads/master',
|
||||
'default_branch': u'master',
|
||||
'git_url': u'git@github.com:jsmith/anothertest.git',
|
||||
'commit_info': {
|
||||
'url': u'https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c',
|
||||
'date': u'2015-09-11T14:26:16-04:00',
|
||||
'message': u'Update Dockerfile',
|
||||
'committer': {
|
||||
'username': u'jsmith',
|
||||
},
|
||||
'author': {
|
||||
'username': u'jsmith',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
assertSchema('github_webhook', expected, gh_webhook)
|
||||
|
||||
|
||||
def test_github_webhook_payload_with_lookup():
|
||||
expected = {
|
||||
'commit': u'410f4cdf8ff09b87f245b13845e8497f90b90a4c',
|
||||
'ref': u'refs/heads/master',
|
||||
'default_branch': u'master',
|
||||
'git_url': u'git@github.com:jsmith/anothertest.git',
|
||||
'commit_info': {
|
||||
'url': u'https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c',
|
||||
'date': u'2015-09-11T14:26:16-04:00',
|
||||
'message': u'Update Dockerfile',
|
||||
'committer': {
|
||||
'username': u'jsmith',
|
||||
'url': u'http://github.com/jsmith',
|
||||
'avatar_url': u'http://some/avatar/url',
|
||||
},
|
||||
'author': {
|
||||
'username': u'jsmith',
|
||||
'url': u'http://github.com/jsmith',
|
||||
'avatar_url': u'http://some/avatar/url',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
def lookup_user(_):
|
||||
return {
|
||||
'html_url': 'http://github.com/jsmith',
|
||||
'avatar_url': 'http://some/avatar/url'
|
||||
}
|
||||
|
||||
assertSchema('github_webhook', expected, gh_webhook, lookup_user=lookup_user)
|
||||
|
||||
|
||||
def test_github_webhook_payload_missing_fields_with_lookup():
|
||||
expected = {
|
||||
'commit': u'410f4cdf8ff09b87f245b13845e8497f90b90a4c',
|
||||
'ref': u'refs/heads/master',
|
||||
'default_branch': u'master',
|
||||
'git_url': u'git@github.com:jsmith/anothertest.git',
|
||||
'commit_info': {
|
||||
'url': u'https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c',
|
||||
'date': u'2015-09-11T14:26:16-04:00',
|
||||
'message': u'Update Dockerfile'
|
||||
},
|
||||
}
|
||||
|
||||
def lookup_user(username):
|
||||
if not username:
|
||||
raise Exception('Fail!')
|
||||
|
||||
return {
|
||||
'html_url': 'http://github.com/jsmith',
|
||||
'avatar_url': 'http://some/avatar/url'
|
||||
}
|
||||
|
||||
assertSchema('github_webhook_missing', expected, gh_webhook, lookup_user=lookup_user)
|
||||
|
||||
|
||||
def test_gitlab_webhook_payload():
|
||||
expected = {
|
||||
'commit': u'fb88379ee45de28a0a4590fddcbd8eff8b36026e',
|
||||
'ref': u'refs/heads/master',
|
||||
'git_url': u'git@gitlab.com:jsmith/somerepo.git',
|
||||
'commit_info': {
|
||||
'url': u'https://gitlab.com/jsmith/somerepo/commit/fb88379ee45de28a0a4590fddcbd8eff8b36026e',
|
||||
'date': u'2015-08-13T19:33:18+00:00',
|
||||
'message': u'Fix link\n',
|
||||
},
|
||||
}
|
||||
|
||||
assertSchema('gitlab_webhook', expected, gl_webhook)
|
||||
|
||||
|
||||
def test_github_webhook_payload_known_issue():
|
||||
expected = {
|
||||
"commit": "118b07121695d9f2e40a5ff264fdcc2917680870",
|
||||
"ref": "refs/heads/master",
|
||||
"default_branch": "master",
|
||||
"git_url": "git@github.com:jsmith/docker-test.git",
|
||||
"commit_info": {
|
||||
"url": "https://github.com/jsmith/docker-test/commit/118b07121695d9f2e40a5ff264fdcc2917680870",
|
||||
"date": "2015-09-25T14:55:11-04:00",
|
||||
"message": "Fail",
|
||||
},
|
||||
}
|
||||
|
||||
assertSchema('github_webhook_noname', expected, gh_webhook)
|
||||
|
||||
|
||||
def test_github_webhook_payload_missing_fields():
|
||||
expected = {
|
||||
'commit': u'410f4cdf8ff09b87f245b13845e8497f90b90a4c',
|
||||
'ref': u'refs/heads/master',
|
||||
'default_branch': u'master',
|
||||
'git_url': u'git@github.com:jsmith/anothertest.git',
|
||||
'commit_info': {
|
||||
'url': u'https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c',
|
||||
'date': u'2015-09-11T14:26:16-04:00',
|
||||
'message': u'Update Dockerfile'
|
||||
},
|
||||
}
|
||||
|
||||
assertSchema('github_webhook_missing', expected, gh_webhook)
|
||||
|
||||
|
||||
def test_gitlab_webhook_nocommit_payload():
|
||||
assertSkipped('gitlab_webhook_nocommit', gl_webhook)
|
||||
|
||||
|
||||
def test_gitlab_webhook_multiple_commits():
|
||||
expected = {
|
||||
'commit': u'9a052a0b2fbe01d4a1a88638dd9fe31c1c56ef53',
|
||||
'ref': u'refs/heads/master',
|
||||
'git_url': u'git@gitlab.com:jsmith/some-test-project.git',
|
||||
'commit_info': {
|
||||
'url': u'https://gitlab.com/jsmith/some-test-project/commit/9a052a0b2fbe01d4a1a88638dd9fe31c1c56ef53',
|
||||
'date': u'2016-09-29T15:02:41+00:00',
|
||||
'message': u"Merge branch 'foobar' into 'master'\r\n\r\nAdd changelog\r\n\r\nSome merge thing\r\n\r\nSee merge request !1",
|
||||
'author': {
|
||||
'username': 'jsmith',
|
||||
'url': 'http://gitlab.com/jsmith',
|
||||
'avatar_url': 'http://some/avatar/url'
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
def lookup_user(_):
|
||||
return {
|
||||
'username': 'jsmith',
|
||||
'html_url': 'http://gitlab.com/jsmith',
|
||||
'avatar_url': 'http://some/avatar/url',
|
||||
}
|
||||
|
||||
assertSchema('gitlab_webhook_multicommit', expected, gl_webhook, lookup_user=lookup_user)
|
||||
|
||||
|
||||
def test_gitlab_webhook_for_tag():
|
||||
expected = {
|
||||
'commit': u'82b3d5ae55f7080f1e6022629cdb57bfae7cccc7',
|
||||
'commit_info': {
|
||||
'author': {
|
||||
'avatar_url': 'http://some/avatar/url',
|
||||
'url': 'http://gitlab.com/jsmith',
|
||||
'username': 'jsmith'
|
||||
},
|
||||
'date': '2015-08-13T19:33:18+00:00',
|
||||
'message': 'Fix link\n',
|
||||
'url': 'https://some/url',
|
||||
},
|
||||
'git_url': u'git@example.com:jsmith/example.git',
|
||||
'ref': u'refs/tags/v1.0.0',
|
||||
}
|
||||
|
||||
def lookup_user(_):
|
||||
return {
|
||||
'username': 'jsmith',
|
||||
'html_url': 'http://gitlab.com/jsmith',
|
||||
'avatar_url': 'http://some/avatar/url',
|
||||
}
|
||||
|
||||
def lookup_commit(repo_id, commit_sha):
|
||||
if commit_sha == '82b3d5ae55f7080f1e6022629cdb57bfae7cccc7':
|
||||
return {
|
||||
"id": "82b3d5ae55f7080f1e6022629cdb57bfae7cccc7",
|
||||
"message": "Fix link\n",
|
||||
"timestamp": "2015-08-13T19:33:18+00:00",
|
||||
"url": "https://some/url",
|
||||
"author_name": "Foo Guy",
|
||||
"author_email": "foo@bar.com",
|
||||
}
|
||||
|
||||
return None
|
||||
|
||||
assertSchema('gitlab_webhook_tag', expected, gl_webhook, lookup_user=lookup_user,
|
||||
lookup_commit=lookup_commit)
|
||||
|
||||
|
||||
def test_gitlab_webhook_for_tag_nocommit():
|
||||
assertSkipped('gitlab_webhook_tag', gl_webhook)
|
||||
|
||||
|
||||
def test_gitlab_webhook_for_tag_commit_sha_null():
|
||||
assertSkipped('gitlab_webhook_tag_commit_sha_null', gl_webhook)
|
||||
|
||||
|
||||
def test_gitlab_webhook_for_tag_known_issue():
|
||||
expected = {
|
||||
'commit': u'770830e7ca132856991e6db4f7fc0f4dbe20bd5f',
|
||||
'ref': u'refs/tags/thirdtag',
|
||||
'git_url': u'git@gitlab.com:someuser/some-test-project.git',
|
||||
'commit_info': {
|
||||
'url': u'https://gitlab.com/someuser/some-test-project/commit/770830e7ca132856991e6db4f7fc0f4dbe20bd5f',
|
||||
'date': u'2019-10-17T18:07:48Z',
|
||||
'message': u'Update Dockerfile',
|
||||
'author': {
|
||||
'username': 'someuser',
|
||||
'url': 'http://gitlab.com/someuser',
|
||||
'avatar_url': 'http://some/avatar/url',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
def lookup_user(_):
|
||||
return {
|
||||
'username': 'someuser',
|
||||
'html_url': 'http://gitlab.com/someuser',
|
||||
'avatar_url': 'http://some/avatar/url',
|
||||
}
|
||||
|
||||
assertSchema('gitlab_webhook_tag_commit_issue', expected, gl_webhook, lookup_user=lookup_user)
|
||||
|
||||
|
||||
def test_gitlab_webhook_payload_known_issue():
|
||||
expected = {
|
||||
'commit': u'770830e7ca132856991e6db4f7fc0f4dbe20bd5f',
|
||||
'ref': u'refs/tags/fourthtag',
|
||||
'git_url': u'git@gitlab.com:someuser/some-test-project.git',
|
||||
'commit_info': {
|
||||
'url': u'https://gitlab.com/someuser/some-test-project/commit/770830e7ca132856991e6db4f7fc0f4dbe20bd5f',
|
||||
'date': u'2019-10-17T18:07:48Z',
|
||||
'message': u'Update Dockerfile',
|
||||
},
|
||||
}
|
||||
|
||||
def lookup_commit(repo_id, commit_sha):
|
||||
if commit_sha == '770830e7ca132856991e6db4f7fc0f4dbe20bd5f':
|
||||
return {
|
||||
"added": [],
|
||||
"author": {
|
||||
"name": "Some User",
|
||||
"email": "someuser@somedomain.com"
|
||||
},
|
||||
"url": "https://gitlab.com/someuser/some-test-project/commit/770830e7ca132856991e6db4f7fc0f4dbe20bd5f",
|
||||
"message": "Update Dockerfile",
|
||||
"removed": [],
|
||||
"modified": [
|
||||
"Dockerfile"
|
||||
],
|
||||
"id": "770830e7ca132856991e6db4f7fc0f4dbe20bd5f"
|
||||
}
|
||||
|
||||
return None
|
||||
|
||||
assertSchema('gitlab_webhook_known_issue', expected, gl_webhook, lookup_commit=lookup_commit)
|
||||
|
||||
|
||||
def test_gitlab_webhook_for_other():
|
||||
assertSkipped('gitlab_webhook_other', gl_webhook)
|
||||
|
||||
|
||||
def test_gitlab_webhook_payload_with_lookup():
|
||||
expected = {
|
||||
'commit': u'fb88379ee45de28a0a4590fddcbd8eff8b36026e',
|
||||
'ref': u'refs/heads/master',
|
||||
'git_url': u'git@gitlab.com:jsmith/somerepo.git',
|
||||
'commit_info': {
|
||||
'url': u'https://gitlab.com/jsmith/somerepo/commit/fb88379ee45de28a0a4590fddcbd8eff8b36026e',
|
||||
'date': u'2015-08-13T19:33:18+00:00',
|
||||
'message': u'Fix link\n',
|
||||
'author': {
|
||||
'username': 'jsmith',
|
||||
'url': 'http://gitlab.com/jsmith',
|
||||
'avatar_url': 'http://some/avatar/url',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
def lookup_user(_):
|
||||
return {
|
||||
'username': 'jsmith',
|
||||
'html_url': 'http://gitlab.com/jsmith',
|
||||
'avatar_url': 'http://some/avatar/url',
|
||||
}
|
||||
|
||||
assertSchema('gitlab_webhook', expected, gl_webhook, lookup_user=lookup_user)
|
||||
|
||||
|
||||
def test_github_webhook_payload_deleted_commit():
|
||||
expected = {
|
||||
'commit': u'456806b662cb903a0febbaed8344f3ed42f27bab',
|
||||
'commit_info': {
|
||||
'author': {
|
||||
'username': u'jsmith'
|
||||
},
|
||||
'committer': {
|
||||
'username': u'jsmith'
|
||||
},
|
||||
'date': u'2015-12-08T18:07:03-05:00',
|
||||
'message': (u'Merge pull request #1044 from jsmith/errerror\n\n' +
|
||||
'Assign the exception to a variable to log it'),
|
||||
'url': u'https://github.com/jsmith/somerepo/commit/456806b662cb903a0febbaed8344f3ed42f27bab'
|
||||
},
|
||||
'git_url': u'git@github.com:jsmith/somerepo.git',
|
||||
'ref': u'refs/heads/master',
|
||||
'default_branch': u'master',
|
||||
}
|
||||
|
||||
def lookup_user(_):
|
||||
return None
|
||||
|
||||
assertSchema('github_webhook_deletedcommit', expected, gh_webhook, lookup_user=lookup_user)
|
||||
|
||||
|
||||
def test_github_webhook_known_issue():
|
||||
def lookup_user(_):
|
||||
return None
|
||||
|
||||
assertSkipped('github_webhook_knownissue', gh_webhook, lookup_user=lookup_user)
|
||||
|
||||
|
||||
def test_bitbucket_webhook_known_issue():
|
||||
assertSkipped('bitbucket_knownissue', bb_webhook)
|
|
@ -0,0 +1,25 @@
|
|||
import re
|
||||
|
||||
import pytest
|
||||
|
||||
from buildtrigger.triggerutil import matches_ref
|
||||
|
||||
@pytest.mark.parametrize('ref, filt, matches', [
|
||||
('ref/heads/master', '.+', True),
|
||||
('ref/heads/master', 'heads/.+', True),
|
||||
('ref/heads/master', 'heads/master', True),
|
||||
('ref/heads/slash/branch', 'heads/slash/branch', True),
|
||||
('ref/heads/slash/branch', 'heads/.+', True),
|
||||
|
||||
('ref/heads/foobar', 'heads/master', False),
|
||||
('ref/heads/master', 'tags/master', False),
|
||||
|
||||
('ref/heads/master', '(((heads/alpha)|(heads/beta))|(heads/gamma))|(heads/master)', True),
|
||||
('ref/heads/alpha', '(((heads/alpha)|(heads/beta))|(heads/gamma))|(heads/master)', True),
|
||||
('ref/heads/beta', '(((heads/alpha)|(heads/beta))|(heads/gamma))|(heads/master)', True),
|
||||
('ref/heads/gamma', '(((heads/alpha)|(heads/beta))|(heads/gamma))|(heads/master)', True),
|
||||
|
||||
('ref/heads/delta', '(((heads/alpha)|(heads/beta))|(heads/gamma))|(heads/master)', False),
|
||||
])
|
||||
def test_matches_ref(ref, filt, matches):
|
||||
assert matches_ref(ref, re.compile(filt)) == matches
|
|
@ -9,9 +9,9 @@
|
|||
"file": "Readme"
|
||||
}
|
||||
],
|
||||
"raw_author": "Mary Anthony <manthony@172-28-13-105.staff.sf.atlassian.com>",
|
||||
"raw_author": "Mark Anthony <manthony@example.com>",
|
||||
"utctimestamp": "2012-07-23 22:26:36+00:00",
|
||||
"author": "Mary Anthony",
|
||||
"author": "Mark Anthony",
|
||||
"timestamp": "2012-07-24 00:26:36",
|
||||
"node": "abdeaf1b2b4a6b9ddf742c1e1754236380435a62",
|
||||
"parents": [
|
|
@ -1,55 +1,43 @@
|
|||
{
|
||||
"actor": {
|
||||
"username": "LightSide_CodeShip",
|
||||
"account_id": "SomeCoolLabs_CodeShip",
|
||||
"links": {
|
||||
"self": {
|
||||
"href": "https://api.bitbucket.org/2.0/users/LightSide_CodeShip"
|
||||
"href": "https://api.bitbucket.org/2.0/users/SomeCoolLabs_CodeShip"
|
||||
},
|
||||
"avatar": {
|
||||
"href": "https://bitbucket.org/account/LightSide_CodeShip/avatar/32/"
|
||||
},
|
||||
"html": {
|
||||
"href": "https://bitbucket.org/LightSide_CodeShip/"
|
||||
"href": "https://bitbucket.org/account/SomeCoolLabs_CodeShip/avatar/32/"
|
||||
}
|
||||
},
|
||||
"uuid": "{d009ab20-b8b8-4840-9491-bfe72fbf666e}",
|
||||
"type": "user",
|
||||
"display_name": "CodeShip Tagging"
|
||||
},
|
||||
"repository": {
|
||||
"full_name": "lightsidelabs/svc-identity",
|
||||
"full_name": "somecoollabs/svc-identity",
|
||||
"name": "svc-identity",
|
||||
"scm": "git",
|
||||
"type": "repository",
|
||||
"links": {
|
||||
"self": {
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity"
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity"
|
||||
},
|
||||
"avatar": {
|
||||
"href": "https://bitbucket.org/lightsidelabs/svc-identity/avatar/16/"
|
||||
},
|
||||
"html": {
|
||||
"href": "https://bitbucket.org/lightsidelabs/svc-identity"
|
||||
"href": "https://bitbucket.org/somecoollabs/svc-identity/avatar/16/"
|
||||
}
|
||||
},
|
||||
"is_private": true,
|
||||
"uuid": "{3400bed9-5cde-45b9-8d86-c1dac5d5e610}",
|
||||
"owner": {
|
||||
"username": "lightsidelabs",
|
||||
"account_id": "somecoollabs",
|
||||
"links": {
|
||||
"self": {
|
||||
"href": "https://api.bitbucket.org/2.0/teams/lightsidelabs"
|
||||
"href": "https://api.bitbucket.org/2.0/teams/somecoollabs"
|
||||
},
|
||||
"avatar": {
|
||||
"href": "https://bitbucket.org/account/lightsidelabs/avatar/32/"
|
||||
},
|
||||
"html": {
|
||||
"href": "https://bitbucket.org/lightsidelabs/"
|
||||
"href": "https://bitbucket.org/account/somecoollabs/avatar/32/"
|
||||
}
|
||||
},
|
||||
"uuid": "{456c5f28-7338-4d89-9506-c7b889ba2d11}",
|
||||
"type": "team",
|
||||
"display_name": "LightSIDE Labs"
|
||||
"display_name": "Some Cool Labs"
|
||||
}
|
||||
},
|
||||
"push": {
|
||||
|
@ -60,14 +48,14 @@
|
|||
"hash": "a0ec139843b2bb281ab21a433266ddc498e605dc",
|
||||
"links": {
|
||||
"self": {
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commit/a0ec139843b2bb281ab21a433266ddc498e605dc"
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commit/a0ec139843b2bb281ab21a433266ddc498e605dc"
|
||||
},
|
||||
"html": {
|
||||
"href": "https://bitbucket.org/lightsidelabs/svc-identity/commits/a0ec139843b2bb281ab21a433266ddc498e605dc"
|
||||
"href": "https://bitbucket.org/somecoollabs/svc-identity/commits/a0ec139843b2bb281ab21a433266ddc498e605dc"
|
||||
}
|
||||
},
|
||||
"author": {
|
||||
"raw": "scripts/autotag_version.py <utilitybelt@lightside>"
|
||||
"raw": "scripts/autotag_version.py <utilitybelt@somecoollabs.com>"
|
||||
},
|
||||
"type": "commit",
|
||||
"message": "Update version.py to 0.1.2 [skip ci]\n\n(by utilitybelt/scripts/autotag_version.py)\n"
|
||||
|
@ -82,10 +70,10 @@
|
|||
"hash": "bd749165b0c50c65c15fc4df526b8e9df26eff10",
|
||||
"links": {
|
||||
"self": {
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commit/bd749165b0c50c65c15fc4df526b8e9df26eff10"
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commit/bd749165b0c50c65c15fc4df526b8e9df26eff10"
|
||||
},
|
||||
"html": {
|
||||
"href": "https://bitbucket.org/lightsidelabs/svc-identity/commits/bd749165b0c50c65c15fc4df526b8e9df26eff10"
|
||||
"href": "https://bitbucket.org/somecoollabs/svc-identity/commits/bd749165b0c50c65c15fc4df526b8e9df26eff10"
|
||||
}
|
||||
},
|
||||
"type": "commit"
|
||||
|
@ -94,10 +82,10 @@
|
|||
"hash": "910b5624b74190dfaa51938d851563a4c5254926",
|
||||
"links": {
|
||||
"self": {
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commit/910b5624b74190dfaa51938d851563a4c5254926"
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commit/910b5624b74190dfaa51938d851563a4c5254926"
|
||||
},
|
||||
"html": {
|
||||
"href": "https://bitbucket.org/lightsidelabs/svc-identity/commits/910b5624b74190dfaa51938d851563a4c5254926"
|
||||
"href": "https://bitbucket.org/somecoollabs/svc-identity/commits/910b5624b74190dfaa51938d851563a4c5254926"
|
||||
}
|
||||
},
|
||||
"type": "commit"
|
||||
|
@ -109,42 +97,38 @@
|
|||
"hash": "263736ecc250113fad56a93f83b712093554ad42",
|
||||
"links": {
|
||||
"self": {
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commit/263736ecc250113fad56a93f83b712093554ad42"
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commit/263736ecc250113fad56a93f83b712093554ad42"
|
||||
},
|
||||
"html": {
|
||||
"href": "https://bitbucket.org/lightsidelabs/svc-identity/commits/263736ecc250113fad56a93f83b712093554ad42"
|
||||
"href": "https://bitbucket.org/somecoollabs/svc-identity/commits/263736ecc250113fad56a93f83b712093554ad42"
|
||||
}
|
||||
},
|
||||
"author": {
|
||||
"raw": "Chris Winters <chris@cwinters.com>",
|
||||
"raw": "John Smith <j@smith.com>",
|
||||
"user": {
|
||||
"username": "cwinters",
|
||||
"account_id": "jsmith",
|
||||
"links": {
|
||||
"self": {
|
||||
"href": "https://api.bitbucket.org/2.0/users/cwinters"
|
||||
"href": "https://api.bitbucket.org/2.0/users/jsmith"
|
||||
},
|
||||
"avatar": {
|
||||
"href": "https://bitbucket.org/account/cwinters/avatar/32/"
|
||||
},
|
||||
"html": {
|
||||
"href": "https://bitbucket.org/cwinters/"
|
||||
"href": "https://bitbucket.org/account/jsmith/avatar/32/"
|
||||
}
|
||||
},
|
||||
"uuid": "{a6209615-6d75-4294-8181-dbf96d40fc6b}",
|
||||
"type": "user",
|
||||
"display_name": "Chris Winters"
|
||||
"display_name": "John Smith"
|
||||
}
|
||||
}
|
||||
},
|
||||
"links": {
|
||||
"self": {
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/refs/branches/master"
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/refs/branches/master"
|
||||
},
|
||||
"commits": {
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commits/master"
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commits/master"
|
||||
},
|
||||
"html": {
|
||||
"href": "https://bitbucket.org/lightsidelabs/svc-identity/branch/master"
|
||||
"href": "https://bitbucket.org/somecoollabs/svc-identity/branch/master"
|
||||
}
|
||||
},
|
||||
"name": "master",
|
||||
|
@ -152,13 +136,13 @@
|
|||
},
|
||||
"links": {
|
||||
"diff": {
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/diff/a0ec139843b2bb281ab21a433266ddc498e605dc..263736ecc250113fad56a93f83b712093554ad42"
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/diff/a0ec139843b2bb281ab21a433266ddc498e605dc..263736ecc250113fad56a93f83b712093554ad42"
|
||||
},
|
||||
"commits": {
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commits?include=a0ec139843b2bb281ab21a433266ddc498e605dc&exclude=263736ecc250113fad56a93f83b712093554ad42"
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commits?include=a0ec139843b2bb281ab21a433266ddc498e605dc&exclude=263736ecc250113fad56a93f83b712093554ad42"
|
||||
},
|
||||
"html": {
|
||||
"href": "https://bitbucket.org/lightsidelabs/svc-identity/branches/compare/a0ec139843b2bb281ab21a433266ddc498e605dc..263736ecc250113fad56a93f83b712093554ad42"
|
||||
"href": "https://bitbucket.org/somecoollabs/svc-identity/branches/compare/a0ec139843b2bb281ab21a433266ddc498e605dc..263736ecc250113fad56a93f83b712093554ad42"
|
||||
}
|
||||
},
|
||||
"new": {
|
||||
|
@ -168,10 +152,10 @@
|
|||
"hash": "263736ecc250113fad56a93f83b712093554ad42",
|
||||
"links": {
|
||||
"self": {
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commit/263736ecc250113fad56a93f83b712093554ad42"
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commit/263736ecc250113fad56a93f83b712093554ad42"
|
||||
},
|
||||
"html": {
|
||||
"href": "https://bitbucket.org/lightsidelabs/svc-identity/commits/263736ecc250113fad56a93f83b712093554ad42"
|
||||
"href": "https://bitbucket.org/somecoollabs/svc-identity/commits/263736ecc250113fad56a93f83b712093554ad42"
|
||||
}
|
||||
},
|
||||
"type": "commit"
|
||||
|
@ -183,25 +167,25 @@
|
|||
"hash": "a0ec139843b2bb281ab21a433266ddc498e605dc",
|
||||
"links": {
|
||||
"self": {
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commit/a0ec139843b2bb281ab21a433266ddc498e605dc"
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commit/a0ec139843b2bb281ab21a433266ddc498e605dc"
|
||||
},
|
||||
"html": {
|
||||
"href": "https://bitbucket.org/lightsidelabs/svc-identity/commits/a0ec139843b2bb281ab21a433266ddc498e605dc"
|
||||
"href": "https://bitbucket.org/somecoollabs/svc-identity/commits/a0ec139843b2bb281ab21a433266ddc498e605dc"
|
||||
}
|
||||
},
|
||||
"author": {
|
||||
"raw": "scripts/autotag_version.py <utilitybelt@lightside>"
|
||||
"raw": "scripts/autotag_version.py <utilitybelt@somecoollabs.com>"
|
||||
}
|
||||
},
|
||||
"links": {
|
||||
"self": {
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/refs/branches/master"
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/refs/branches/master"
|
||||
},
|
||||
"commits": {
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commits/master"
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commits/master"
|
||||
},
|
||||
"html": {
|
||||
"href": "https://bitbucket.org/lightsidelabs/svc-identity/branch/master"
|
||||
"href": "https://bitbucket.org/somecoollabs/svc-identity/branch/master"
|
||||
}
|
||||
},
|
||||
"name": "master",
|
|
@ -4,7 +4,7 @@
|
|||
{
|
||||
"links": {
|
||||
"commits": {
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commits?include=a0ec139843b2bb281ab21a433266ddc498e605dc"
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commits?include=a0ec139843b2bb281ab21a433266ddc498e605dc"
|
||||
}
|
||||
},
|
||||
"closed": false,
|
||||
|
@ -13,10 +13,10 @@
|
|||
"date": "2015-09-25T00:55:08+00:00",
|
||||
"links": {
|
||||
"html": {
|
||||
"href": "https://bitbucket.org/lightsidelabs/svc-identity/commits/a0ec139843b2bb281ab21a433266ddc498e605dc"
|
||||
"href": "https://bitbucket.org/somecoollabs/svc-identity/commits/a0ec139843b2bb281ab21a433266ddc498e605dc"
|
||||
},
|
||||
"self": {
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commit/a0ec139843b2bb281ab21a433266ddc498e605dc"
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commit/a0ec139843b2bb281ab21a433266ddc498e605dc"
|
||||
}
|
||||
},
|
||||
"message": "Update version.py to 0.1.2 [skip ci]\n\n(by utilitybelt/scripts/autotag_version.py)\n",
|
||||
|
@ -25,10 +25,10 @@
|
|||
{
|
||||
"links": {
|
||||
"html": {
|
||||
"href": "https://bitbucket.org/lightsidelabs/svc-identity/commits/263736ecc250113fad56a93f83b712093554ad42"
|
||||
"href": "https://bitbucket.org/somecoollabs/svc-identity/commits/263736ecc250113fad56a93f83b712093554ad42"
|
||||
},
|
||||
"self": {
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commit/263736ecc250113fad56a93f83b712093554ad42"
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commit/263736ecc250113fad56a93f83b712093554ad42"
|
||||
}
|
||||
},
|
||||
"hash": "263736ecc250113fad56a93f83b712093554ad42",
|
||||
|
@ -37,19 +37,19 @@
|
|||
],
|
||||
"hash": "a0ec139843b2bb281ab21a433266ddc498e605dc",
|
||||
"author": {
|
||||
"raw": "scripts/autotag_version.py <utilitybelt@lightside>"
|
||||
"raw": "scripts/autotag_version.py <utilitybelt@somecoollabs.com>"
|
||||
}
|
||||
},
|
||||
"name": "0.1.2",
|
||||
"links": {
|
||||
"html": {
|
||||
"href": "https://bitbucket.org/lightsidelabs/svc-identity/commits/tag/0.1.2"
|
||||
"href": "https://bitbucket.org/somecoollabs/svc-identity/commits/tag/0.1.2"
|
||||
},
|
||||
"self": {
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/refs/tags/0.1.2"
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/refs/tags/0.1.2"
|
||||
},
|
||||
"commits": {
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commits/0.1.2"
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commits/0.1.2"
|
||||
}
|
||||
},
|
||||
"type": "tag"
|
||||
|
@ -65,53 +65,44 @@
|
|||
"name": "svc-identity",
|
||||
"links": {
|
||||
"html": {
|
||||
"href": "https://bitbucket.org/lightsidelabs/svc-identity"
|
||||
"href": "https://bitbucket.org/somecoollabs/svc-identity"
|
||||
},
|
||||
"self": {
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity"
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity"
|
||||
},
|
||||
"avatar": {
|
||||
"href": "https://bitbucket.org/lightsidelabs/svc-identity/avatar/16/"
|
||||
"href": "https://bitbucket.org/somecoollabs/svc-identity/avatar/16/"
|
||||
}
|
||||
},
|
||||
"is_private": true,
|
||||
"type": "repository",
|
||||
"scm": "git",
|
||||
"owner": {
|
||||
"username": "lightsidelabs",
|
||||
"account_id": "somecoollabs",
|
||||
"links": {
|
||||
"html": {
|
||||
"href": "https://bitbucket.org/lightsidelabs/"
|
||||
},
|
||||
"self": {
|
||||
"href": "https://api.bitbucket.org/2.0/teams/lightsidelabs"
|
||||
"href": "https://api.bitbucket.org/2.0/teams/somecoollabs"
|
||||
},
|
||||
"avatar": {
|
||||
"href": "https://bitbucket.org/account/lightsidelabs/avatar/32/"
|
||||
"href": "https://bitbucket.org/account/somecoollabs/avatar/32/"
|
||||
}
|
||||
},
|
||||
"display_name": "LightSIDE Labs",
|
||||
"uuid": "{456c5f28-7338-4d89-9506-c7b889ba2d11}",
|
||||
"display_name": "Some Cool Labs",
|
||||
"type": "team"
|
||||
},
|
||||
"full_name": "lightsidelabs/svc-identity",
|
||||
"uuid": "{3400bed9-5cde-45b9-8d86-c1dac5d5e610}"
|
||||
"full_name": "somecoollabs/svc-identity"
|
||||
},
|
||||
"actor": {
|
||||
"username": "LightSide_CodeShip",
|
||||
"account_id": "SomeCoolLabs_CodeShip",
|
||||
"links": {
|
||||
"html": {
|
||||
"href": "https://bitbucket.org/LightSide_CodeShip/"
|
||||
},
|
||||
"self": {
|
||||
"href": "https://api.bitbucket.org/2.0/users/LightSide_CodeShip"
|
||||
"href": "https://api.bitbucket.org/2.0/users/SomeCoolLabs_CodeShip"
|
||||
},
|
||||
"avatar": {
|
||||
"href": "https://bitbucket.org/account/LightSide_CodeShip/avatar/32/"
|
||||
"href": "https://bitbucket.org/account/SomeCoolLabs_CodeShip/avatar/32/"
|
||||
}
|
||||
},
|
||||
"display_name": "CodeShip Tagging",
|
||||
"uuid": "{d009ab20-b8b8-4840-9491-bfe72fbf666e}",
|
||||
"type": "user"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,68 @@
|
|||
{
|
||||
"push": {
|
||||
"changes": [
|
||||
|
||||
]
|
||||
},
|
||||
"actor": {
|
||||
"account_id": "jsmith",
|
||||
"display_name": "John Smith",
|
||||
"type": "user",
|
||||
"links": {
|
||||
"self": {
|
||||
"href": "https:\/\/api.bitbucket.org\/2.0\/users\/jsmith"
|
||||
},
|
||||
"avatar": {
|
||||
"href": "https:\/\/bitbucket.org\/account\/jsmith\/avatar\/32\/"
|
||||
}
|
||||
}
|
||||
},
|
||||
"repository": {
|
||||
"website": "",
|
||||
"scm": "git",
|
||||
"name": "slip-api",
|
||||
"links": {
|
||||
"self": {
|
||||
"href": "https:\/\/api.bitbucket.org\/2.0\/repositories\/goldcuff\/slip-api"
|
||||
},
|
||||
"html": {
|
||||
"href": "https:\/\/bitbucket.org\/goldcuff\/slip-api"
|
||||
},
|
||||
"avatar": {
|
||||
"href": "https:\/\/bitbucket.org\/goldcuff\/slip-api\/avatar\/32\/"
|
||||
}
|
||||
},
|
||||
"project": {
|
||||
"links": {
|
||||
"self": {
|
||||
"href": "https:\/\/api.bitbucket.org\/2.0\/teams\/goldcuff\/projects\/SLIP"
|
||||
},
|
||||
"html": {
|
||||
"href": "https:\/\/bitbucket.org\/account\/user\/goldcuff\/projects\/SLIP"
|
||||
},
|
||||
"avatar": {
|
||||
"href": "https:\/\/bitbucket.org\/account\/user\/goldcuff\/projects\/SLIP\/avatar\/32"
|
||||
}
|
||||
},
|
||||
"type": "project",
|
||||
"name": "SLIP",
|
||||
"key": "SLIP"
|
||||
},
|
||||
"full_name": "goldcuff\/slip-api",
|
||||
"owner": {
|
||||
"account_id": "goldcuff",
|
||||
"display_name": "Goldcuff",
|
||||
"type": "team",
|
||||
"links": {
|
||||
"self": {
|
||||
"href": "https:\/\/api.bitbucket.org\/2.0\/teams\/goldcuff"
|
||||
},
|
||||
"avatar": {
|
||||
"href": "https:\/\/bitbucket.org\/account\/goldcuff\/avatar\/32\/"
|
||||
}
|
||||
}
|
||||
},
|
||||
"type": "repository",
|
||||
"is_private": true
|
||||
}
|
||||
}
|
|
@ -4,67 +4,66 @@
|
|||
{
|
||||
"links": {
|
||||
"commits": {
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo/commits?include=af64ae7188685f8424040b4735ad12941b980d75&exclude=1784139225279a587e0afb151bed1f9ba3dd509e"
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo/commits?include=af64ae7188685f8424040b4735ad12941b980d75&exclude=1784139225279a587e0afb151bed1f9ba3dd509e"
|
||||
},
|
||||
"diff": {
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo/diff/af64ae7188685f8424040b4735ad12941b980d75..1784139225279a587e0afb151bed1f9ba3dd509e"
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo/diff/af64ae7188685f8424040b4735ad12941b980d75..1784139225279a587e0afb151bed1f9ba3dd509e"
|
||||
},
|
||||
"html": {
|
||||
"href": "https://bitbucket.org/jscoreos/another-repo/branches/compare/af64ae7188685f8424040b4735ad12941b980d75..1784139225279a587e0afb151bed1f9ba3dd509e"
|
||||
"href": "https://bitbucket.org/jsmith/another-repo/branches/compare/af64ae7188685f8424040b4735ad12941b980d75..1784139225279a587e0afb151bed1f9ba3dd509e"
|
||||
}
|
||||
},
|
||||
"old": {
|
||||
"name": "master",
|
||||
"links": {
|
||||
"commits": {
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo/commits/master"
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo/commits/master"
|
||||
},
|
||||
"html": {
|
||||
"href": "https://bitbucket.org/jscoreos/another-repo/branch/master"
|
||||
"href": "https://bitbucket.org/jsmith/another-repo/branch/master"
|
||||
},
|
||||
"self": {
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo/refs/branches/master"
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo/refs/branches/master"
|
||||
}
|
||||
},
|
||||
"type": "branch",
|
||||
"target": {
|
||||
"links": {
|
||||
"html": {
|
||||
"href": "https://bitbucket.org/jscoreos/another-repo/commits/1784139225279a587e0afb151bed1f9ba3dd509e"
|
||||
"href": "https://bitbucket.org/jsmith/another-repo/commits/1784139225279a587e0afb151bed1f9ba3dd509e"
|
||||
},
|
||||
"self": {
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo/commit/1784139225279a587e0afb151bed1f9ba3dd509e"
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo/commit/1784139225279a587e0afb151bed1f9ba3dd509e"
|
||||
}
|
||||
},
|
||||
"author": {
|
||||
"user": {
|
||||
"links": {
|
||||
"avatar": {
|
||||
"href": "https://bitbucket.org/account/jscoreos/avatar/32/"
|
||||
"href": "https://bitbucket.org/account/jsmith/avatar/32/"
|
||||
},
|
||||
"html": {
|
||||
"href": "https://bitbucket.org/jscoreos/"
|
||||
"href": "https://bitbucket.org/jsmith/"
|
||||
},
|
||||
"self": {
|
||||
"href": "https://api.bitbucket.org/2.0/users/jscoreos"
|
||||
"href": "https://api.bitbucket.org/2.0/users/jsmith"
|
||||
}
|
||||
},
|
||||
"uuid": "{2fa27577-f361-45bb-999a-f4450c546b73}",
|
||||
"type": "user",
|
||||
"display_name": "Joseph Schorr",
|
||||
"username": "jscoreos"
|
||||
"display_name": "John Smith",
|
||||
"account_id": "jsmith"
|
||||
},
|
||||
"raw": "Joseph Schorr <joseph.schorr@coreos.com>"
|
||||
"raw": "John Smith <j@smith.com>"
|
||||
},
|
||||
"date": "2015-09-10T20:37:54+00:00",
|
||||
"parents": [
|
||||
{
|
||||
"links": {
|
||||
"html": {
|
||||
"href": "https://bitbucket.org/jscoreos/another-repo/commits/5329daa0961ec968de9ef36f30024bfa0da73103"
|
||||
"href": "https://bitbucket.org/jsmith/another-repo/commits/5329daa0961ec968de9ef36f30024bfa0da73103"
|
||||
},
|
||||
"self": {
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo/commit/5329daa0961ec968de9ef36f30024bfa0da73103"
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo/commit/5329daa0961ec968de9ef36f30024bfa0da73103"
|
||||
}
|
||||
},
|
||||
"type": "commit",
|
||||
|
@ -84,28 +83,24 @@
|
|||
"user": {
|
||||
"links": {
|
||||
"avatar": {
|
||||
"href": "https://bitbucket.org/account/jscoreos/avatar/32/"
|
||||
},
|
||||
"html": {
|
||||
"href": "https://bitbucket.org/jscoreos/"
|
||||
"href": "https://bitbucket.org/account/jsmith/avatar/32/"
|
||||
},
|
||||
"self": {
|
||||
"href": "https://api.bitbucket.org/2.0/users/jscoreos"
|
||||
"href": "https://api.bitbucket.org/2.0/users/jsmith"
|
||||
}
|
||||
},
|
||||
"uuid": "{2fa27577-f361-45bb-999a-f4450c546b73}",
|
||||
"type": "user",
|
||||
"display_name": "Joseph Schorr",
|
||||
"username": "jscoreos"
|
||||
"display_name": "John Smith",
|
||||
"account_id": "jsmith"
|
||||
},
|
||||
"raw": "Joseph Schorr <joseph.schorr@coreos.com>"
|
||||
"raw": "John Smith <j@smith.com>"
|
||||
},
|
||||
"links": {
|
||||
"html": {
|
||||
"href": "https://bitbucket.org/jscoreos/another-repo/commits/af64ae7188685f8424040b4735ad12941b980d75"
|
||||
"href": "https://bitbucket.org/jsmith/another-repo/commits/af64ae7188685f8424040b4735ad12941b980d75"
|
||||
},
|
||||
"self": {
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo/commit/af64ae7188685f8424040b4735ad12941b980d75"
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo/commit/af64ae7188685f8424040b4735ad12941b980d75"
|
||||
}
|
||||
},
|
||||
"message": "Dockerfile edited online with Bitbucket",
|
||||
|
@ -117,54 +112,50 @@
|
|||
"name": "master",
|
||||
"links": {
|
||||
"commits": {
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo/commits/master"
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo/commits/master"
|
||||
},
|
||||
"html": {
|
||||
"href": "https://bitbucket.org/jscoreos/another-repo/branch/master"
|
||||
"href": "https://bitbucket.org/jsmith/another-repo/branch/master"
|
||||
},
|
||||
"self": {
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo/refs/branches/master"
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo/refs/branches/master"
|
||||
}
|
||||
},
|
||||
"type": "branch",
|
||||
"target": {
|
||||
"links": {
|
||||
"html": {
|
||||
"href": "https://bitbucket.org/jscoreos/another-repo/commits/af64ae7188685f8424040b4735ad12941b980d75"
|
||||
"href": "https://bitbucket.org/jsmith/another-repo/commits/af64ae7188685f8424040b4735ad12941b980d75"
|
||||
},
|
||||
"self": {
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo/commit/af64ae7188685f8424040b4735ad12941b980d75"
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo/commit/af64ae7188685f8424040b4735ad12941b980d75"
|
||||
}
|
||||
},
|
||||
"author": {
|
||||
"user": {
|
||||
"links": {
|
||||
"avatar": {
|
||||
"href": "https://bitbucket.org/account/jscoreos/avatar/32/"
|
||||
},
|
||||
"html": {
|
||||
"href": "https://bitbucket.org/jscoreos/"
|
||||
"href": "https://bitbucket.org/account/jsmith/avatar/32/"
|
||||
},
|
||||
"self": {
|
||||
"href": "https://api.bitbucket.org/2.0/users/jscoreos"
|
||||
"href": "https://api.bitbucket.org/2.0/users/jsmith"
|
||||
}
|
||||
},
|
||||
"uuid": "{2fa27577-f361-45bb-999a-f4450c546b73}",
|
||||
"type": "user",
|
||||
"display_name": "Joseph Schorr",
|
||||
"username": "jscoreos"
|
||||
"display_name": "John Smith",
|
||||
"account_id": "jsmith"
|
||||
},
|
||||
"raw": "Joseph Schorr <joseph.schorr@coreos.com>"
|
||||
"raw": "John Smith <j@smith.com>"
|
||||
},
|
||||
"date": "2015-09-10T20:40:54+00:00",
|
||||
"parents": [
|
||||
{
|
||||
"links": {
|
||||
"html": {
|
||||
"href": "https://bitbucket.org/jscoreos/another-repo/commits/1784139225279a587e0afb151bed1f9ba3dd509e"
|
||||
"href": "https://bitbucket.org/jsmith/another-repo/commits/1784139225279a587e0afb151bed1f9ba3dd509e"
|
||||
},
|
||||
"self": {
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo/commit/1784139225279a587e0afb151bed1f9ba3dd509e"
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo/commit/1784139225279a587e0afb151bed1f9ba3dd509e"
|
||||
}
|
||||
},
|
||||
"type": "commit",
|
||||
|
@ -184,54 +175,45 @@
|
|||
"repository": {
|
||||
"links": {
|
||||
"avatar": {
|
||||
"href": "https://bitbucket.org/jscoreos/another-repo/avatar/16/"
|
||||
"href": "https://bitbucket.org/jsmith/another-repo/avatar/16/"
|
||||
},
|
||||
"html": {
|
||||
"href": "https://bitbucket.org/jscoreos/another-repo"
|
||||
"href": "https://bitbucket.org/jsmith/another-repo"
|
||||
},
|
||||
"self": {
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo"
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo"
|
||||
}
|
||||
},
|
||||
"full_name": "jscoreos/another-repo",
|
||||
"uuid": "{b3459203-3e58-497b-8059-ad087b6b01de}",
|
||||
"full_name": "jsmith/another-repo",
|
||||
"type": "repository",
|
||||
"is_private": true,
|
||||
"name": "Another Repo",
|
||||
"owner": {
|
||||
"links": {
|
||||
"avatar": {
|
||||
"href": "https://bitbucket.org/account/jscoreos/avatar/32/"
|
||||
},
|
||||
"html": {
|
||||
"href": "https://bitbucket.org/jscoreos/"
|
||||
"href": "https://bitbucket.org/account/jsmith/avatar/32/"
|
||||
},
|
||||
"self": {
|
||||
"href": "https://api.bitbucket.org/2.0/users/jscoreos"
|
||||
"href": "https://api.bitbucket.org/2.0/users/jsmith"
|
||||
}
|
||||
},
|
||||
"uuid": "{2fa27577-f361-45bb-999a-f4450c546b73}",
|
||||
"type": "user",
|
||||
"display_name": "Joseph Schorr",
|
||||
"username": "jscoreos"
|
||||
"display_name": "John Smith",
|
||||
"account_id": "jsmith"
|
||||
},
|
||||
"scm": "git"
|
||||
},
|
||||
"actor": {
|
||||
"links": {
|
||||
"avatar": {
|
||||
"href": "https://bitbucket.org/account/jscoreos/avatar/32/"
|
||||
},
|
||||
"html": {
|
||||
"href": "https://bitbucket.org/jscoreos/"
|
||||
"href": "https://bitbucket.org/account/jsmith/avatar/32/"
|
||||
},
|
||||
"self": {
|
||||
"href": "https://api.bitbucket.org/2.0/users/jscoreos"
|
||||
"href": "https://api.bitbucket.org/2.0/users/jsmith"
|
||||
}
|
||||
},
|
||||
"uuid": "{2fa27577-f361-45bb-999a-f4450c546b73}",
|
||||
"type": "user",
|
||||
"display_name": "Joseph Schorr",
|
||||
"username": "jscoreos"
|
||||
"display_name": "John Smith",
|
||||
"account_id": "jsmith"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,153 @@
|
|||
{
|
||||
"ref": "refs/heads/master",
|
||||
"before": "9ea43cab474709d4a61afb7e3340de1ffc405b41",
|
||||
"after": "410f4cdf8ff09b87f245b13845e8497f90b90a4c",
|
||||
"created": false,
|
||||
"deleted": false,
|
||||
"forced": false,
|
||||
"base_ref": null,
|
||||
"compare": "https://github.com/jsmith/anothertest/compare/9ea43cab4747...410f4cdf8ff0",
|
||||
"commits": [
|
||||
{
|
||||
"id": "410f4cdf8ff09b87f245b13845e8497f90b90a4c",
|
||||
"distinct": true,
|
||||
"message": "Update Dockerfile",
|
||||
"timestamp": "2015-09-11T14:26:16-04:00",
|
||||
"url": "https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c",
|
||||
"author": {
|
||||
"name": "jsmith",
|
||||
"email": "jsmith@users.noreply.github.com",
|
||||
"username": "jsmith"
|
||||
},
|
||||
"committer": {
|
||||
"name": "jsmith",
|
||||
"email": "jsmith@users.noreply.github.com",
|
||||
"username": "jsmith"
|
||||
},
|
||||
"added": [],
|
||||
"removed": [],
|
||||
"modified": [
|
||||
"Dockerfile"
|
||||
]
|
||||
}
|
||||
],
|
||||
"head_commit": {
|
||||
"id": "410f4cdf8ff09b87f245b13845e8497f90b90a4c",
|
||||
"distinct": true,
|
||||
"message": "Update Dockerfile",
|
||||
"timestamp": "2015-09-11T14:26:16-04:00",
|
||||
"url": "https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c",
|
||||
"author": {
|
||||
"name": "jsmith",
|
||||
"email": "jsmith@users.noreply.github.com",
|
||||
"username": "jsmith"
|
||||
},
|
||||
"committer": {
|
||||
"name": "jsmith",
|
||||
"email": "jsmith@users.noreply.github.com",
|
||||
"username": "jsmith"
|
||||
},
|
||||
"added": [],
|
||||
"removed": [],
|
||||
"modified": [
|
||||
"Dockerfile"
|
||||
]
|
||||
},
|
||||
"repository": {
|
||||
"id": 1234567,
|
||||
"name": "anothertest",
|
||||
"full_name": "jsmith/anothertest",
|
||||
"owner": {
|
||||
"name": "jsmith",
|
||||
"email": "jsmith@users.noreply.github.com"
|
||||
},
|
||||
"private": false,
|
||||
"html_url": "https://github.com/jsmith/anothertest",
|
||||
"description": "",
|
||||
"fork": false,
|
||||
"url": "https://github.com/jsmith/anothertest",
|
||||
"forks_url": "https://api.github.com/repos/jsmith/anothertest/forks",
|
||||
"keys_url": "https://api.github.com/repos/jsmith/anothertest/keys{/key_id}",
|
||||
"collaborators_url": "https://api.github.com/repos/jsmith/anothertest/collaborators{/collaborator}",
|
||||
"teams_url": "https://api.github.com/repos/jsmith/anothertest/teams",
|
||||
"hooks_url": "https://api.github.com/repos/jsmith/anothertest/hooks",
|
||||
"issue_events_url": "https://api.github.com/repos/jsmith/anothertest/issues/events{/number}",
|
||||
"events_url": "https://api.github.com/repos/jsmith/anothertest/events",
|
||||
"assignees_url": "https://api.github.com/repos/jsmith/anothertest/assignees{/user}",
|
||||
"branches_url": "https://api.github.com/repos/jsmith/anothertest/branches{/branch}",
|
||||
"tags_url": "https://api.github.com/repos/jsmith/anothertest/tags",
|
||||
"blobs_url": "https://api.github.com/repos/jsmith/anothertest/git/blobs{/sha}",
|
||||
"git_tags_url": "https://api.github.com/repos/jsmith/anothertest/git/tags{/sha}",
|
||||
"git_refs_url": "https://api.github.com/repos/jsmith/anothertest/git/refs{/sha}",
|
||||
"trees_url": "https://api.github.com/repos/jsmith/anothertest/git/trees{/sha}",
|
||||
"statuses_url": "https://api.github.com/repos/jsmith/anothertest/statuses/{sha}",
|
||||
"languages_url": "https://api.github.com/repos/jsmith/anothertest/languages",
|
||||
"stargazers_url": "https://api.github.com/repos/jsmith/anothertest/stargazers",
|
||||
"contributors_url": "https://api.github.com/repos/jsmith/anothertest/contributors",
|
||||
"subscribers_url": "https://api.github.com/repos/jsmith/anothertest/subscribers",
|
||||
"subscription_url": "https://api.github.com/repos/jsmith/anothertest/subscription",
|
||||
"commits_url": "https://api.github.com/repos/jsmith/anothertest/commits{/sha}",
|
||||
"git_commits_url": "https://api.github.com/repos/jsmith/anothertest/git/commits{/sha}",
|
||||
"comments_url": "https://api.github.com/repos/jsmith/anothertest/comments{/number}",
|
||||
"issue_comment_url": "https://api.github.com/repos/jsmith/anothertest/issues/comments{/number}",
|
||||
"contents_url": "https://api.github.com/repos/jsmith/anothertest/contents/{+path}",
|
||||
"compare_url": "https://api.github.com/repos/jsmith/anothertest/compare/{base}...{head}",
|
||||
"merges_url": "https://api.github.com/repos/jsmith/anothertest/merges",
|
||||
"archive_url": "https://api.github.com/repos/jsmith/anothertest/{archive_format}{/ref}",
|
||||
"downloads_url": "https://api.github.com/repos/jsmith/anothertest/downloads",
|
||||
"issues_url": "https://api.github.com/repos/jsmith/anothertest/issues{/number}",
|
||||
"pulls_url": "https://api.github.com/repos/jsmith/anothertest/pulls{/number}",
|
||||
"milestones_url": "https://api.github.com/repos/jsmith/anothertest/milestones{/number}",
|
||||
"notifications_url": "https://api.github.com/repos/jsmith/anothertest/notifications{?since,all,participating}",
|
||||
"labels_url": "https://api.github.com/repos/jsmith/anothertest/labels{/name}",
|
||||
"releases_url": "https://api.github.com/repos/jsmith/anothertest/releases{/id}",
|
||||
"created_at": 1430426945,
|
||||
"updated_at": "2015-04-30T20:49:05Z",
|
||||
"pushed_at": 1441995976,
|
||||
"git_url": "git://github.com/jsmith/anothertest.git",
|
||||
"ssh_url": "git@github.com:jsmith/anothertest.git",
|
||||
"clone_url": "https://github.com/jsmith/anothertest.git",
|
||||
"svn_url": "https://github.com/jsmith/anothertest",
|
||||
"homepage": null,
|
||||
"size": 144,
|
||||
"stargazers_count": 0,
|
||||
"watchers_count": 0,
|
||||
"language": null,
|
||||
"has_issues": true,
|
||||
"has_downloads": true,
|
||||
"has_wiki": true,
|
||||
"has_pages": false,
|
||||
"forks_count": 0,
|
||||
"mirror_url": null,
|
||||
"open_issues_count": 0,
|
||||
"forks": 0,
|
||||
"open_issues": 0,
|
||||
"watchers": 0,
|
||||
"default_branch": "master",
|
||||
"stargazers": 0,
|
||||
"master_branch": "master"
|
||||
},
|
||||
"pusher": {
|
||||
"name": "jsmith",
|
||||
"email": "jsmith@users.noreply.github.com"
|
||||
},
|
||||
"sender": {
|
||||
"login": "jsmith",
|
||||
"id": 1234567,
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/1234567?v=3",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/jsmith",
|
||||
"html_url": "https://github.com/jsmith",
|
||||
"followers_url": "https://api.github.com/users/jsmith/followers",
|
||||
"following_url": "https://api.github.com/users/jsmith/following{/other_user}",
|
||||
"gists_url": "https://api.github.com/users/jsmith/gists{/gist_id}",
|
||||
"starred_url": "https://api.github.com/users/jsmith/starred{/owner}{/repo}",
|
||||
"subscriptions_url": "https://api.github.com/users/jsmith/subscriptions",
|
||||
"organizations_url": "https://api.github.com/users/jsmith/orgs",
|
||||
"repos_url": "https://api.github.com/users/jsmith/repos",
|
||||
"events_url": "https://api.github.com/users/jsmith/events{/privacy}",
|
||||
"received_events_url": "https://api.github.com/users/jsmith/received_events",
|
||||
"type": "User",
|
||||
"site_admin": false
|
||||
}
|
||||
}
|
|
@ -0,0 +1,199 @@
|
|||
{
|
||||
"ref": "refs/heads/master",
|
||||
"before": "c7fa613b99d509c0d4fcbf946f0415b5f024150b",
|
||||
"after": "456806b662cb903a0febbaed8344f3ed42f27bab",
|
||||
"created": false,
|
||||
"deleted": false,
|
||||
"forced": false,
|
||||
"base_ref": null,
|
||||
"compare": "https://github.com/jsmith/somerepo/compare/c7fa613b99d5...456806b662cb",
|
||||
"commits": [
|
||||
{
|
||||
"id": "e00365b225ad7f454982e9198756cc1ab5dc4428",
|
||||
"distinct": true,
|
||||
"message": "Assign the exception to a variable to log it",
|
||||
"timestamp": "2015-12-08T18:03:48-05:00",
|
||||
"url": "https://github.com/jsmith/somerepo/commit/e00365b225ad7f454982e9198756cc1ab5dc4428",
|
||||
"author": {
|
||||
"name": "John Smith",
|
||||
"email": "j@smith.com",
|
||||
"username": "jsmith"
|
||||
},
|
||||
"committer": {
|
||||
"name": "John Smith",
|
||||
"email": "j@smith.com",
|
||||
"username": "jsmith"
|
||||
},
|
||||
"added": [
|
||||
|
||||
],
|
||||
"removed": [
|
||||
|
||||
],
|
||||
"modified": [
|
||||
"storage/basestorage.py"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "456806b662cb903a0febbaed8344f3ed42f27bab",
|
||||
"distinct": true,
|
||||
"message": "Merge pull request #1044 from jsmith/errerror\n\nAssign the exception to a variable to log it",
|
||||
"timestamp": "2015-12-08T18:07:03-05:00",
|
||||
"url": "https://github.com/jsmith/somerepo/commit/456806b662cb903a0febbaed8344f3ed42f27bab",
|
||||
"author": {
|
||||
"name": "John Smith",
|
||||
"email": "j@smith.com",
|
||||
"username": "jsmith"
|
||||
},
|
||||
"committer": {
|
||||
"name": "John Smith",
|
||||
"email": "j@smith.com",
|
||||
"username": "jsmith"
|
||||
},
|
||||
"added": [
|
||||
|
||||
],
|
||||
"removed": [
|
||||
|
||||
],
|
||||
"modified": [
|
||||
"storage/basestorage.py"
|
||||
]
|
||||
}
|
||||
],
|
||||
"head_commit": {
|
||||
"id": "456806b662cb903a0febbaed8344f3ed42f27bab",
|
||||
"distinct": true,
|
||||
"message": "Merge pull request #1044 from jsmith/errerror\n\nAssign the exception to a variable to log it",
|
||||
"timestamp": "2015-12-08T18:07:03-05:00",
|
||||
"url": "https://github.com/jsmith/somerepo/commit/456806b662cb903a0febbaed8344f3ed42f27bab",
|
||||
"author": {
|
||||
"name": "John Smith",
|
||||
"email": "j@smith.com",
|
||||
"username": "jsmith"
|
||||
},
|
||||
"committer": {
|
||||
"name": "John Smith",
|
||||
"email": "j@smith.com",
|
||||
"username": "jsmith"
|
||||
},
|
||||
"added": [
|
||||
|
||||
],
|
||||
"removed": [
|
||||
|
||||
],
|
||||
"modified": [
|
||||
"storage/basestorage.py"
|
||||
]
|
||||
},
|
||||
"repository": {
|
||||
"id": 12345678,
|
||||
"name": "somerepo",
|
||||
"full_name": "jsmith/somerepo",
|
||||
"owner": {
|
||||
"name": "jsmith",
|
||||
"email": null
|
||||
},
|
||||
"private": true,
|
||||
"html_url": "https://github.com/jsmith/somerepo",
|
||||
"description": "Some Cool Repo",
|
||||
"fork": false,
|
||||
"url": "https://github.com/jsmith/somerepo",
|
||||
"forks_url": "https://api.github.com/repos/jsmith/somerepo/forks",
|
||||
"keys_url": "https://api.github.com/repos/jsmith/somerepo/keys{/key_id}",
|
||||
"collaborators_url": "https://api.github.com/repos/jsmith/somerepo/collaborators{/collaborator}",
|
||||
"teams_url": "https://api.github.com/repos/jsmith/somerepo/teams",
|
||||
"hooks_url": "https://api.github.com/repos/jsmith/somerepo/hooks",
|
||||
"issue_events_url": "https://api.github.com/repos/jsmith/somerepo/issues/events{/number}",
|
||||
"events_url": "https://api.github.com/repos/jsmith/somerepo/events",
|
||||
"assignees_url": "https://api.github.com/repos/jsmith/somerepo/assignees{/user}",
|
||||
"branches_url": "https://api.github.com/repos/jsmith/somerepo/branches{/branch}",
|
||||
"tags_url": "https://api.github.com/repos/jsmith/somerepo/tags",
|
||||
"blobs_url": "https://api.github.com/repos/jsmith/somerepo/git/blobs{/sha}",
|
||||
"git_tags_url": "https://api.github.com/repos/jsmith/somerepo/git/tags{/sha}",
|
||||
"git_refs_url": "https://api.github.com/repos/jsmith/somerepo/git/refs{/sha}",
|
||||
"trees_url": "https://api.github.com/repos/jsmith/somerepo/git/trees{/sha}",
|
||||
"statuses_url": "https://api.github.com/repos/jsmith/somerepo/statuses/{sha}",
|
||||
"languages_url": "https://api.github.com/repos/jsmith/somerepo/languages",
|
||||
"stargazers_url": "https://api.github.com/repos/jsmith/somerepo/stargazers",
|
||||
"contributors_url": "https://api.github.com/repos/jsmith/somerepo/contributors",
|
||||
"subscribers_url": "https://api.github.com/repos/jsmith/somerepo/subscribers",
|
||||
"subscription_url": "https://api.github.com/repos/jsmith/somerepo/subscription",
|
||||
"commits_url": "https://api.github.com/repos/jsmith/somerepo/commits{/sha}",
|
||||
"git_commits_url": "https://api.github.com/repos/jsmith/somerepo/git/commits{/sha}",
|
||||
"comments_url": "https://api.github.com/repos/jsmith/somerepo/comments{/number}",
|
||||
"issue_comment_url": "https://api.github.com/repos/jsmith/somerepo/issues/comments{/number}",
|
||||
"contents_url": "https://api.github.com/repos/jsmith/somerepo/contents/{+path}",
|
||||
"compare_url": "https://api.github.com/repos/jsmith/somerepo/compare/{base}...{head}",
|
||||
"merges_url": "https://api.github.com/repos/jsmith/somerepo/merges",
|
||||
"archive_url": "https://api.github.com/repos/jsmith/somerepo/{archive_format}{/ref}",
|
||||
"downloads_url": "https://api.github.com/repos/jsmith/somerepo/downloads",
|
||||
"issues_url": "https://api.github.com/repos/jsmith/somerepo/issues{/number}",
|
||||
"pulls_url": "https://api.github.com/repos/jsmith/somerepo/pulls{/number}",
|
||||
"milestones_url": "https://api.github.com/repos/jsmith/somerepo/milestones{/number}",
|
||||
"notifications_url": "https://api.github.com/repos/jsmith/somerepo/notifications{?since,all,participating}",
|
||||
"labels_url": "https://api.github.com/repos/jsmith/somerepo/labels{/name}",
|
||||
"releases_url": "https://api.github.com/repos/jsmith/somerepo/releases{/id}",
|
||||
"created_at": 1415056063,
|
||||
"updated_at": "2015-11-12T05:16:51Z",
|
||||
"pushed_at": 1449616023,
|
||||
"git_url": "git://github.com/jsmith/somerepo.git",
|
||||
"ssh_url": "git@github.com:jsmith/somerepo.git",
|
||||
"clone_url": "https://github.com/jsmith/somerepo.git",
|
||||
"svn_url": "https://github.com/jsmith/somerepo",
|
||||
"homepage": "",
|
||||
"size": 183677,
|
||||
"stargazers_count": 3,
|
||||
"watchers_count": 3,
|
||||
"language": "Python",
|
||||
"has_issues": true,
|
||||
"has_downloads": true,
|
||||
"has_wiki": false,
|
||||
"has_pages": false,
|
||||
"forks_count": 8,
|
||||
"mirror_url": null,
|
||||
"open_issues_count": 188,
|
||||
"forks": 8,
|
||||
"open_issues": 188,
|
||||
"watchers": 3,
|
||||
"default_branch": "master",
|
||||
"stargazers": 3,
|
||||
"master_branch": "master",
|
||||
"organization": "jsmith"
|
||||
},
|
||||
"pusher": {
|
||||
"name": "jsmith",
|
||||
"email": "j@smith.com"
|
||||
},
|
||||
"organization": {
|
||||
"login": "jsmith",
|
||||
"id": 9876543,
|
||||
"url": "https://api.github.com/orgs/jsmith",
|
||||
"repos_url": "https://api.github.com/orgs/jsmith/repos",
|
||||
"events_url": "https://api.github.com/orgs/jsmith/events",
|
||||
"members_url": "https://api.github.com/orgs/jsmith/members{/member}",
|
||||
"public_members_url": "https://api.github.com/orgs/jsmith/public_members{/member}",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/5504624?v=3",
|
||||
"description": null
|
||||
},
|
||||
"sender": {
|
||||
"login": "jsmith",
|
||||
"id": 1234567,
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/000000?v=3",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/jsmith",
|
||||
"html_url": "https://github.com/jsmith",
|
||||
"followers_url": "https://api.github.com/users/jsmith/followers",
|
||||
"following_url": "https://api.github.com/users/jsmith/following{/other_user}",
|
||||
"gists_url": "https://api.github.com/users/jsmith/gists{/gist_id}",
|
||||
"starred_url": "https://api.github.com/users/jsmith/starred{/owner}{/repo}",
|
||||
"subscriptions_url": "https://api.github.com/users/jsmith/subscriptions",
|
||||
"organizations_url": "https://api.github.com/users/jsmith/orgs",
|
||||
"repos_url": "https://api.github.com/users/jsmith/repos",
|
||||
"events_url": "https://api.github.com/users/jsmith/events{/privacy}",
|
||||
"received_events_url": "https://api.github.com/users/jsmith/received_events",
|
||||
"type": "User",
|
||||
"site_admin": false
|
||||
}
|
||||
}
|
|
@ -0,0 +1,126 @@
|
|||
{
|
||||
"ref": "refs/heads/1.2.6",
|
||||
"before": "76a309ed96c72986eddffc02d2f4dda3fe689f10",
|
||||
"after": "0000000000000000000000000000000000000000",
|
||||
"created": false,
|
||||
"deleted": true,
|
||||
"forced": false,
|
||||
"base_ref": null,
|
||||
"compare": "https://github.com/jsmith/somerepo/compare/76a309ed96c7...000000000000",
|
||||
"commits": [
|
||||
|
||||
],
|
||||
"head_commit": null,
|
||||
"repository": {
|
||||
"id": 12345678,
|
||||
"name": "somerepo",
|
||||
"full_name": "jsmith/somerepo",
|
||||
"owner": {
|
||||
"name": "jsmith",
|
||||
"email": "j@smith.com"
|
||||
},
|
||||
"private": true,
|
||||
"html_url": "https://github.com/jsmith/somerepo",
|
||||
"description": "Dockerfile for some repo",
|
||||
"fork": false,
|
||||
"url": "https://github.com/jsmith/somerepo",
|
||||
"forks_url": "https://api.github.com/repos/jsmith/somerepo/forks",
|
||||
"keys_url": "https://api.github.com/repos/jsmith/somerepo/keys{/key_id}",
|
||||
"collaborators_url": "https://api.github.com/repos/jsmith/somerepo/collaborators{/collaborator}",
|
||||
"teams_url": "https://api.github.com/repos/jsmith/somerepo/teams",
|
||||
"hooks_url": "https://api.github.com/repos/jsmith/somerepo/hooks",
|
||||
"issue_events_url": "https://api.github.com/repos/jsmith/somerepo/issues/events{/number}",
|
||||
"events_url": "https://api.github.com/repos/jsmith/somerepo/events",
|
||||
"assignees_url": "https://api.github.com/repos/jsmith/somerepo/assignees{/user}",
|
||||
"branches_url": "https://api.github.com/repos/jsmith/somerepo/branches{/branch}",
|
||||
"tags_url": "https://api.github.com/repos/jsmith/somerepo/tags",
|
||||
"blobs_url": "https://api.github.com/repos/jsmith/somerepo/git/blobs{/sha}",
|
||||
"git_tags_url": "https://api.github.com/repos/jsmith/somerepo/git/tags{/sha}",
|
||||
"git_refs_url": "https://api.github.com/repos/jsmith/somerepo/git/refs{/sha}",
|
||||
"trees_url": "https://api.github.com/repos/jsmith/somerepo/git/trees{/sha}",
|
||||
"statuses_url": "https://api.github.com/repos/jsmith/somerepo/statuses/{sha}",
|
||||
"languages_url": "https://api.github.com/repos/jsmith/somerepo/languages",
|
||||
"stargazers_url": "https://api.github.com/repos/jsmith/somerepo/stargazers",
|
||||
"contributors_url": "https://api.github.com/repos/jsmith/somerepo/contributors",
|
||||
"subscribers_url": "https://api.github.com/repos/jsmith/somerepo/subscribers",
|
||||
"subscription_url": "https://api.github.com/repos/jsmith/somerepo/subscription",
|
||||
"commits_url": "https://api.github.com/repos/jsmith/somerepo/commits{/sha}",
|
||||
"git_commits_url": "https://api.github.com/repos/jsmith/somerepo/git/commits{/sha}",
|
||||
"comments_url": "https://api.github.com/repos/jsmith/somerepo/comments{/number}",
|
||||
"issue_comment_url": "https://api.github.com/repos/jsmith/somerepo/issues/comments{/number}",
|
||||
"contents_url": "https://api.github.com/repos/jsmith/somerepo/contents/{+path}",
|
||||
"compare_url": "https://api.github.com/repos/jsmith/somerepo/compare/{base}...{head}",
|
||||
"merges_url": "https://api.github.com/repos/jsmith/somerepo/merges",
|
||||
"archive_url": "https://api.github.com/repos/jsmith/somerepo/{archive_format}{/ref}",
|
||||
"downloads_url": "https://api.github.com/repos/jsmith/somerepo/downloads",
|
||||
"issues_url": "https://api.github.com/repos/jsmith/somerepo/issues{/number}",
|
||||
"pulls_url": "https://api.github.com/repos/jsmith/somerepo/pulls{/number}",
|
||||
"milestones_url": "https://api.github.com/repos/jsmith/somerepo/milestones{/number}",
|
||||
"notifications_url": "https://api.github.com/repos/jsmith/somerepo/notifications{?since,all,participating}",
|
||||
"labels_url": "https://api.github.com/repos/jsmith/somerepo/labels{/name}",
|
||||
"releases_url": "https://api.github.com/repos/jsmith/somerepo/releases{/id}",
|
||||
"deployments_url": "https://api.github.com/repos/jsmith/somerepo/deployments",
|
||||
"created_at": 1461165926,
|
||||
"updated_at": "2016-11-03T18:20:01Z",
|
||||
"pushed_at": 1479313569,
|
||||
"git_url": "git://github.com/jsmith/somerepo.git",
|
||||
"ssh_url": "git@github.com:jsmith/somerepo.git",
|
||||
"clone_url": "https://github.com/jsmith/somerepo.git",
|
||||
"svn_url": "https://github.com/jsmith/somerepo",
|
||||
"homepage": "",
|
||||
"size": 3114,
|
||||
"stargazers_count": 0,
|
||||
"watchers_count": 0,
|
||||
"language": "Shell",
|
||||
"has_issues": true,
|
||||
"has_downloads": true,
|
||||
"has_wiki": true,
|
||||
"has_pages": false,
|
||||
"forks_count": 0,
|
||||
"mirror_url": null,
|
||||
"open_issues_count": 0,
|
||||
"forks": 0,
|
||||
"open_issues": 0,
|
||||
"watchers": 0,
|
||||
"default_branch": "master",
|
||||
"stargazers": 0,
|
||||
"master_branch": "master",
|
||||
"organization": "jsmith"
|
||||
},
|
||||
"pusher": {
|
||||
"name": "jsmith",
|
||||
"email": "j@smith.com"
|
||||
},
|
||||
"organization": {
|
||||
"login": "jsmith",
|
||||
"id": 9876543,
|
||||
"url": "https://api.github.com/orgs/jsmith",
|
||||
"repos_url": "https://api.github.com/orgs/jsmith/repos",
|
||||
"events_url": "https://api.github.com/orgs/jsmith/events",
|
||||
"hooks_url": "https://api.github.com/orgs/jsmith/hooks",
|
||||
"issues_url": "https://api.github.com/orgs/jsmith/issues",
|
||||
"members_url": "https://api.github.com/orgs/jsmith/members{/member}",
|
||||
"public_members_url": "https://api.github.com/orgs/jsmith/public_members{/member}",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/1234567?v=3",
|
||||
"description": "Open Source Projects for Linux Containers"
|
||||
},
|
||||
"sender": {
|
||||
"login": "jsmith",
|
||||
"id": 12345678,
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/1234567?v=3",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/jsmith",
|
||||
"html_url": "https://github.com/jsmith",
|
||||
"followers_url": "https://api.github.com/users/jsmith/followers",
|
||||
"following_url": "https://api.github.com/users/jsmith/following{/other_user}",
|
||||
"gists_url": "https://api.github.com/users/jsmith/gists{/gist_id}",
|
||||
"starred_url": "https://api.github.com/users/jsmith/starred{/owner}{/repo}",
|
||||
"subscriptions_url": "https://api.github.com/users/jsmith/subscriptions",
|
||||
"organizations_url": "https://api.github.com/users/jsmith/orgs",
|
||||
"repos_url": "https://api.github.com/users/jsmith/repos",
|
||||
"events_url": "https://api.github.com/users/jsmith/events{/privacy}",
|
||||
"received_events_url": "https://api.github.com/users/jsmith/received_events",
|
||||
"type": "User",
|
||||
"site_admin": false
|
||||
}
|
||||
}
|
|
@ -0,0 +1,133 @@
|
|||
{
|
||||
"ref": "refs/heads/master",
|
||||
"before": "9ea43cab474709d4a61afb7e3340de1ffc405b41",
|
||||
"after": "410f4cdf8ff09b87f245b13845e8497f90b90a4c",
|
||||
"created": false,
|
||||
"deleted": false,
|
||||
"forced": false,
|
||||
"base_ref": null,
|
||||
"compare": "https://github.com/jsmith/anothertest/compare/9ea43cab4747...410f4cdf8ff0",
|
||||
"commits": [
|
||||
{
|
||||
"id": "410f4cdf8ff09b87f245b13845e8497f90b90a4c",
|
||||
"distinct": true,
|
||||
"message": "Update Dockerfile",
|
||||
"timestamp": "2015-09-11T14:26:16-04:00",
|
||||
"url": "https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c",
|
||||
"added": [],
|
||||
"removed": [],
|
||||
"modified": [
|
||||
"Dockerfile"
|
||||
]
|
||||
}
|
||||
],
|
||||
"head_commit": {
|
||||
"id": "410f4cdf8ff09b87f245b13845e8497f90b90a4c",
|
||||
"distinct": true,
|
||||
"message": "Update Dockerfile",
|
||||
"timestamp": "2015-09-11T14:26:16-04:00",
|
||||
"url": "https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c",
|
||||
"added": [],
|
||||
"removed": [],
|
||||
"modified": [
|
||||
"Dockerfile"
|
||||
]
|
||||
},
|
||||
"repository": {
|
||||
"id": 12345678,
|
||||
"name": "anothertest",
|
||||
"full_name": "jsmith/anothertest",
|
||||
"owner": {
|
||||
"name": "jsmith",
|
||||
"email": "jsmith@users.noreply.github.com"
|
||||
},
|
||||
"private": false,
|
||||
"html_url": "https://github.com/jsmith/anothertest",
|
||||
"description": "",
|
||||
"fork": false,
|
||||
"url": "https://github.com/jsmith/anothertest",
|
||||
"forks_url": "https://api.github.com/repos/jsmith/anothertest/forks",
|
||||
"keys_url": "https://api.github.com/repos/jsmith/anothertest/keys{/key_id}",
|
||||
"collaborators_url": "https://api.github.com/repos/jsmith/anothertest/collaborators{/collaborator}",
|
||||
"teams_url": "https://api.github.com/repos/jsmith/anothertest/teams",
|
||||
"hooks_url": "https://api.github.com/repos/jsmith/anothertest/hooks",
|
||||
"issue_events_url": "https://api.github.com/repos/jsmith/anothertest/issues/events{/number}",
|
||||
"events_url": "https://api.github.com/repos/jsmith/anothertest/events",
|
||||
"assignees_url": "https://api.github.com/repos/jsmith/anothertest/assignees{/user}",
|
||||
"branches_url": "https://api.github.com/repos/jsmith/anothertest/branches{/branch}",
|
||||
"tags_url": "https://api.github.com/repos/jsmith/anothertest/tags",
|
||||
"blobs_url": "https://api.github.com/repos/jsmith/anothertest/git/blobs{/sha}",
|
||||
"git_tags_url": "https://api.github.com/repos/jsmith/anothertest/git/tags{/sha}",
|
||||
"git_refs_url": "https://api.github.com/repos/jsmith/anothertest/git/refs{/sha}",
|
||||
"trees_url": "https://api.github.com/repos/jsmith/anothertest/git/trees{/sha}",
|
||||
"statuses_url": "https://api.github.com/repos/jsmith/anothertest/statuses/{sha}",
|
||||
"languages_url": "https://api.github.com/repos/jsmith/anothertest/languages",
|
||||
"stargazers_url": "https://api.github.com/repos/jsmith/anothertest/stargazers",
|
||||
"contributors_url": "https://api.github.com/repos/jsmith/anothertest/contributors",
|
||||
"subscribers_url": "https://api.github.com/repos/jsmith/anothertest/subscribers",
|
||||
"subscription_url": "https://api.github.com/repos/jsmith/anothertest/subscription",
|
||||
"commits_url": "https://api.github.com/repos/jsmith/anothertest/commits{/sha}",
|
||||
"git_commits_url": "https://api.github.com/repos/jsmith/anothertest/git/commits{/sha}",
|
||||
"comments_url": "https://api.github.com/repos/jsmith/anothertest/comments{/number}",
|
||||
"issue_comment_url": "https://api.github.com/repos/jsmith/anothertest/issues/comments{/number}",
|
||||
"contents_url": "https://api.github.com/repos/jsmith/anothertest/contents/{+path}",
|
||||
"compare_url": "https://api.github.com/repos/jsmith/anothertest/compare/{base}...{head}",
|
||||
"merges_url": "https://api.github.com/repos/jsmith/anothertest/merges",
|
||||
"archive_url": "https://api.github.com/repos/jsmith/anothertest/{archive_format}{/ref}",
|
||||
"downloads_url": "https://api.github.com/repos/jsmith/anothertest/downloads",
|
||||
"issues_url": "https://api.github.com/repos/jsmith/anothertest/issues{/number}",
|
||||
"pulls_url": "https://api.github.com/repos/jsmith/anothertest/pulls{/number}",
|
||||
"milestones_url": "https://api.github.com/repos/jsmith/anothertest/milestones{/number}",
|
||||
"notifications_url": "https://api.github.com/repos/jsmith/anothertest/notifications{?since,all,participating}",
|
||||
"labels_url": "https://api.github.com/repos/jsmith/anothertest/labels{/name}",
|
||||
"releases_url": "https://api.github.com/repos/jsmith/anothertest/releases{/id}",
|
||||
"created_at": 1430426945,
|
||||
"updated_at": "2015-04-30T20:49:05Z",
|
||||
"pushed_at": 1441995976,
|
||||
"git_url": "git://github.com/jsmith/anothertest.git",
|
||||
"ssh_url": "git@github.com:jsmith/anothertest.git",
|
||||
"clone_url": "https://github.com/jsmith/anothertest.git",
|
||||
"svn_url": "https://github.com/jsmith/anothertest",
|
||||
"homepage": null,
|
||||
"size": 144,
|
||||
"stargazers_count": 0,
|
||||
"watchers_count": 0,
|
||||
"language": null,
|
||||
"has_issues": true,
|
||||
"has_downloads": true,
|
||||
"has_wiki": true,
|
||||
"has_pages": false,
|
||||
"forks_count": 0,
|
||||
"mirror_url": null,
|
||||
"open_issues_count": 0,
|
||||
"forks": 0,
|
||||
"open_issues": 0,
|
||||
"watchers": 0,
|
||||
"default_branch": "master",
|
||||
"stargazers": 0,
|
||||
"master_branch": "master"
|
||||
},
|
||||
"pusher": {
|
||||
"name": "jsmith",
|
||||
"email": "jsmith@users.noreply.github.com"
|
||||
},
|
||||
"sender": {
|
||||
"login": "jsmith",
|
||||
"id": 1234567,
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/4073002?v=3",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/jsmith",
|
||||
"html_url": "https://github.com/jsmith",
|
||||
"followers_url": "https://api.github.com/users/jsmith/followers",
|
||||
"following_url": "https://api.github.com/users/jsmith/following{/other_user}",
|
||||
"gists_url": "https://api.github.com/users/jsmith/gists{/gist_id}",
|
||||
"starred_url": "https://api.github.com/users/jsmith/starred{/owner}{/repo}",
|
||||
"subscriptions_url": "https://api.github.com/users/jsmith/subscriptions",
|
||||
"organizations_url": "https://api.github.com/users/jsmith/orgs",
|
||||
"repos_url": "https://api.github.com/users/jsmith/repos",
|
||||
"events_url": "https://api.github.com/users/jsmith/events{/privacy}",
|
||||
"received_events_url": "https://api.github.com/users/jsmith/received_events",
|
||||
"type": "User",
|
||||
"site_admin": false
|
||||
}
|
||||
}
|
|
@ -0,0 +1,149 @@
|
|||
{
|
||||
"ref": "refs/heads/master",
|
||||
"before": "9716b516939221dc754a056e0f9ddf599e71d4b8",
|
||||
"after": "118b07121695d9f2e40a5ff264fdcc2917680870",
|
||||
"created": false,
|
||||
"deleted": false,
|
||||
"forced": false,
|
||||
"base_ref": null,
|
||||
"compare": "https://github.com/jsmith/docker-test/compare/9716b5169392...118b07121695",
|
||||
"commits": [
|
||||
{
|
||||
"id": "118b07121695d9f2e40a5ff264fdcc2917680870",
|
||||
"distinct": true,
|
||||
"message": "Fail",
|
||||
"timestamp": "2015-09-25T14:55:11-04:00",
|
||||
"url": "https://github.com/jsmith/docker-test/commit/118b07121695d9f2e40a5ff264fdcc2917680870",
|
||||
"author": {
|
||||
"name": "John Smith",
|
||||
"email": "j@smith.com"
|
||||
},
|
||||
"committer": {
|
||||
"name": "John Smith",
|
||||
"email": "j@smith.com"
|
||||
},
|
||||
"added": [],
|
||||
"removed": [],
|
||||
"modified": [
|
||||
"README.md"
|
||||
]
|
||||
}
|
||||
],
|
||||
"head_commit": {
|
||||
"id": "118b07121695d9f2e40a5ff264fdcc2917680870",
|
||||
"distinct": true,
|
||||
"message": "Fail",
|
||||
"timestamp": "2015-09-25T14:55:11-04:00",
|
||||
"url": "https://github.com/jsmith/docker-test/commit/118b07121695d9f2e40a5ff264fdcc2917680870",
|
||||
"author": {
|
||||
"name": "John Smith",
|
||||
"email": "j@smith.com"
|
||||
},
|
||||
"committer": {
|
||||
"name": "John Smith",
|
||||
"email": "j@smith.com"
|
||||
},
|
||||
"added": [],
|
||||
"removed": [],
|
||||
"modified": [
|
||||
"README.md"
|
||||
]
|
||||
},
|
||||
"repository": {
|
||||
"id": 1234567,
|
||||
"name": "docker-test",
|
||||
"full_name": "jsmith/docker-test",
|
||||
"owner": {
|
||||
"name": "jsmith",
|
||||
"email": "j@smith.com"
|
||||
},
|
||||
"private": false,
|
||||
"html_url": "https://github.com/jsmith/docker-test",
|
||||
"description": "",
|
||||
"fork": false,
|
||||
"url": "https://github.com/jsmith/docker-test",
|
||||
"forks_url": "https://api.github.com/repos/jsmith/docker-test/forks",
|
||||
"keys_url": "https://api.github.com/repos/jsmith/docker-test/keys{/key_id}",
|
||||
"collaborators_url": "https://api.github.com/repos/jsmith/docker-test/collaborators{/collaborator}",
|
||||
"teams_url": "https://api.github.com/repos/jsmith/docker-test/teams",
|
||||
"hooks_url": "https://api.github.com/repos/jsmith/docker-test/hooks",
|
||||
"issue_events_url": "https://api.github.com/repos/jsmith/docker-test/issues/events{/number}",
|
||||
"events_url": "https://api.github.com/repos/jsmith/docker-test/events",
|
||||
"assignees_url": "https://api.github.com/repos/jsmith/docker-test/assignees{/user}",
|
||||
"branches_url": "https://api.github.com/repos/jsmith/docker-test/branches{/branch}",
|
||||
"tags_url": "https://api.github.com/repos/jsmith/docker-test/tags",
|
||||
"blobs_url": "https://api.github.com/repos/jsmith/docker-test/git/blobs{/sha}",
|
||||
"git_tags_url": "https://api.github.com/repos/jsmith/docker-test/git/tags{/sha}",
|
||||
"git_refs_url": "https://api.github.com/repos/jsmith/docker-test/git/refs{/sha}",
|
||||
"trees_url": "https://api.github.com/repos/jsmith/docker-test/git/trees{/sha}",
|
||||
"statuses_url": "https://api.github.com/repos/jsmith/docker-test/statuses/{sha}",
|
||||
"languages_url": "https://api.github.com/repos/jsmith/docker-test/languages",
|
||||
"stargazers_url": "https://api.github.com/repos/jsmith/docker-test/stargazers",
|
||||
"contributors_url": "https://api.github.com/repos/jsmith/docker-test/contributors",
|
||||
"subscribers_url": "https://api.github.com/repos/jsmith/docker-test/subscribers",
|
||||
"subscription_url": "https://api.github.com/repos/jsmith/docker-test/subscription",
|
||||
"commits_url": "https://api.github.com/repos/jsmith/docker-test/commits{/sha}",
|
||||
"git_commits_url": "https://api.github.com/repos/jsmith/docker-test/git/commits{/sha}",
|
||||
"comments_url": "https://api.github.com/repos/jsmith/docker-test/comments{/number}",
|
||||
"issue_comment_url": "https://api.github.com/repos/jsmith/docker-test/issues/comments{/number}",
|
||||
"contents_url": "https://api.github.com/repos/jsmith/docker-test/contents/{+path}",
|
||||
"compare_url": "https://api.github.com/repos/jsmith/docker-test/compare/{base}...{head}",
|
||||
"merges_url": "https://api.github.com/repos/jsmith/docker-test/merges",
|
||||
"archive_url": "https://api.github.com/repos/jsmith/docker-test/{archive_format}{/ref}",
|
||||
"downloads_url": "https://api.github.com/repos/jsmith/docker-test/downloads",
|
||||
"issues_url": "https://api.github.com/repos/jsmith/docker-test/issues{/number}",
|
||||
"pulls_url": "https://api.github.com/repos/jsmith/docker-test/pulls{/number}",
|
||||
"milestones_url": "https://api.github.com/repos/jsmith/docker-test/milestones{/number}",
|
||||
"notifications_url": "https://api.github.com/repos/jsmith/docker-test/notifications{?since,all,participating}",
|
||||
"labels_url": "https://api.github.com/repos/jsmith/docker-test/labels{/name}",
|
||||
"releases_url": "https://api.github.com/repos/jsmith/docker-test/releases{/id}",
|
||||
"created_at": 1442254053,
|
||||
"updated_at": "2015-09-14T18:07:33Z",
|
||||
"pushed_at": 1443207315,
|
||||
"git_url": "git://github.com/jsmith/docker-test.git",
|
||||
"ssh_url": "git@github.com:jsmith/docker-test.git",
|
||||
"clone_url": "https://github.com/jsmith/docker-test.git",
|
||||
"svn_url": "https://github.com/jsmith/docker-test",
|
||||
"homepage": null,
|
||||
"size": 108,
|
||||
"stargazers_count": 0,
|
||||
"watchers_count": 0,
|
||||
"language": null,
|
||||
"has_issues": true,
|
||||
"has_downloads": true,
|
||||
"has_wiki": true,
|
||||
"has_pages": false,
|
||||
"forks_count": 0,
|
||||
"mirror_url": null,
|
||||
"open_issues_count": 0,
|
||||
"forks": 0,
|
||||
"open_issues": 0,
|
||||
"watchers": 0,
|
||||
"default_branch": "master",
|
||||
"stargazers": 0,
|
||||
"master_branch": "master"
|
||||
},
|
||||
"pusher": {
|
||||
"name": "jsmith",
|
||||
"email": "j@smith.com"
|
||||
},
|
||||
"sender": {
|
||||
"login": "jsmith",
|
||||
"id": 1234567,
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/1234567?v=3",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/jsmith",
|
||||
"html_url": "https://github.com/jsmith",
|
||||
"followers_url": "https://api.github.com/users/jsmith/followers",
|
||||
"following_url": "https://api.github.com/users/jsmith/following{/other_user}",
|
||||
"gists_url": "https://api.github.com/users/jsmith/gists{/gist_id}",
|
||||
"starred_url": "https://api.github.com/users/jsmith/starred{/owner}{/repo}",
|
||||
"subscriptions_url": "https://api.github.com/users/jsmith/subscriptions",
|
||||
"organizations_url": "https://api.github.com/users/jsmith/orgs",
|
||||
"repos_url": "https://api.github.com/users/jsmith/repos",
|
||||
"events_url": "https://api.github.com/users/jsmith/events{/privacy}",
|
||||
"received_events_url": "https://api.github.com/users/jsmith/received_events",
|
||||
"type": "User",
|
||||
"site_admin": false
|
||||
}
|
||||
}
|
|
@ -0,0 +1,153 @@
|
|||
{
|
||||
"ref": "refs/heads/slash/branch",
|
||||
"before": "9ea43cab474709d4a61afb7e3340de1ffc405b41",
|
||||
"after": "410f4cdf8ff09b87f245b13845e8497f90b90a4c",
|
||||
"created": false,
|
||||
"deleted": false,
|
||||
"forced": false,
|
||||
"base_ref": null,
|
||||
"compare": "https://github.com/jsmith/anothertest/compare/9ea43cab4747...410f4cdf8ff0",
|
||||
"commits": [
|
||||
{
|
||||
"id": "410f4cdf8ff09b87f245b13845e8497f90b90a4c",
|
||||
"distinct": true,
|
||||
"message": "Update Dockerfile",
|
||||
"timestamp": "2015-09-11T14:26:16-04:00",
|
||||
"url": "https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c",
|
||||
"author": {
|
||||
"name": "jsmith",
|
||||
"email": "jsmith@users.noreply.github.com",
|
||||
"username": "jsmith"
|
||||
},
|
||||
"committer": {
|
||||
"name": "jsmith",
|
||||
"email": "jsmith@users.noreply.github.com",
|
||||
"username": "jsmith"
|
||||
},
|
||||
"added": [],
|
||||
"removed": [],
|
||||
"modified": [
|
||||
"Dockerfile"
|
||||
]
|
||||
}
|
||||
],
|
||||
"head_commit": {
|
||||
"id": "410f4cdf8ff09b87f245b13845e8497f90b90a4c",
|
||||
"distinct": true,
|
||||
"message": "Update Dockerfile",
|
||||
"timestamp": "2015-09-11T14:26:16-04:00",
|
||||
"url": "https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c",
|
||||
"author": {
|
||||
"name": "jsmith",
|
||||
"email": "jsmith@users.noreply.github.com",
|
||||
"username": "jsmith"
|
||||
},
|
||||
"committer": {
|
||||
"name": "jsmith",
|
||||
"email": "jsmith@users.noreply.github.com",
|
||||
"username": "jsmith"
|
||||
},
|
||||
"added": [],
|
||||
"removed": [],
|
||||
"modified": [
|
||||
"Dockerfile"
|
||||
]
|
||||
},
|
||||
"repository": {
|
||||
"id": 1234567,
|
||||
"name": "anothertest",
|
||||
"full_name": "jsmith/anothertest",
|
||||
"owner": {
|
||||
"name": "jsmith",
|
||||
"email": "jsmith@users.noreply.github.com"
|
||||
},
|
||||
"private": false,
|
||||
"html_url": "https://github.com/jsmith/anothertest",
|
||||
"description": "",
|
||||
"fork": false,
|
||||
"url": "https://github.com/jsmith/anothertest",
|
||||
"forks_url": "https://api.github.com/repos/jsmith/anothertest/forks",
|
||||
"keys_url": "https://api.github.com/repos/jsmith/anothertest/keys{/key_id}",
|
||||
"collaborators_url": "https://api.github.com/repos/jsmith/anothertest/collaborators{/collaborator}",
|
||||
"teams_url": "https://api.github.com/repos/jsmith/anothertest/teams",
|
||||
"hooks_url": "https://api.github.com/repos/jsmith/anothertest/hooks",
|
||||
"issue_events_url": "https://api.github.com/repos/jsmith/anothertest/issues/events{/number}",
|
||||
"events_url": "https://api.github.com/repos/jsmith/anothertest/events",
|
||||
"assignees_url": "https://api.github.com/repos/jsmith/anothertest/assignees{/user}",
|
||||
"branches_url": "https://api.github.com/repos/jsmith/anothertest/branches{/branch}",
|
||||
"tags_url": "https://api.github.com/repos/jsmith/anothertest/tags",
|
||||
"blobs_url": "https://api.github.com/repos/jsmith/anothertest/git/blobs{/sha}",
|
||||
"git_tags_url": "https://api.github.com/repos/jsmith/anothertest/git/tags{/sha}",
|
||||
"git_refs_url": "https://api.github.com/repos/jsmith/anothertest/git/refs{/sha}",
|
||||
"trees_url": "https://api.github.com/repos/jsmith/anothertest/git/trees{/sha}",
|
||||
"statuses_url": "https://api.github.com/repos/jsmith/anothertest/statuses/{sha}",
|
||||
"languages_url": "https://api.github.com/repos/jsmith/anothertest/languages",
|
||||
"stargazers_url": "https://api.github.com/repos/jsmith/anothertest/stargazers",
|
||||
"contributors_url": "https://api.github.com/repos/jsmith/anothertest/contributors",
|
||||
"subscribers_url": "https://api.github.com/repos/jsmith/anothertest/subscribers",
|
||||
"subscription_url": "https://api.github.com/repos/jsmith/anothertest/subscription",
|
||||
"commits_url": "https://api.github.com/repos/jsmith/anothertest/commits{/sha}",
|
||||
"git_commits_url": "https://api.github.com/repos/jsmith/anothertest/git/commits{/sha}",
|
||||
"comments_url": "https://api.github.com/repos/jsmith/anothertest/comments{/number}",
|
||||
"issue_comment_url": "https://api.github.com/repos/jsmith/anothertest/issues/comments{/number}",
|
||||
"contents_url": "https://api.github.com/repos/jsmith/anothertest/contents/{+path}",
|
||||
"compare_url": "https://api.github.com/repos/jsmith/anothertest/compare/{base}...{head}",
|
||||
"merges_url": "https://api.github.com/repos/jsmith/anothertest/merges",
|
||||
"archive_url": "https://api.github.com/repos/jsmith/anothertest/{archive_format}{/ref}",
|
||||
"downloads_url": "https://api.github.com/repos/jsmith/anothertest/downloads",
|
||||
"issues_url": "https://api.github.com/repos/jsmith/anothertest/issues{/number}",
|
||||
"pulls_url": "https://api.github.com/repos/jsmith/anothertest/pulls{/number}",
|
||||
"milestones_url": "https://api.github.com/repos/jsmith/anothertest/milestones{/number}",
|
||||
"notifications_url": "https://api.github.com/repos/jsmith/anothertest/notifications{?since,all,participating}",
|
||||
"labels_url": "https://api.github.com/repos/jsmith/anothertest/labels{/name}",
|
||||
"releases_url": "https://api.github.com/repos/jsmith/anothertest/releases{/id}",
|
||||
"created_at": 1430426945,
|
||||
"updated_at": "2015-04-30T20:49:05Z",
|
||||
"pushed_at": 1441995976,
|
||||
"git_url": "git://github.com/jsmith/anothertest.git",
|
||||
"ssh_url": "git@github.com:jsmith/anothertest.git",
|
||||
"clone_url": "https://github.com/jsmith/anothertest.git",
|
||||
"svn_url": "https://github.com/jsmith/anothertest",
|
||||
"homepage": null,
|
||||
"size": 144,
|
||||
"stargazers_count": 0,
|
||||
"watchers_count": 0,
|
||||
"language": null,
|
||||
"has_issues": true,
|
||||
"has_downloads": true,
|
||||
"has_wiki": true,
|
||||
"has_pages": false,
|
||||
"forks_count": 0,
|
||||
"mirror_url": null,
|
||||
"open_issues_count": 0,
|
||||
"forks": 0,
|
||||
"open_issues": 0,
|
||||
"watchers": 0,
|
||||
"default_branch": "master",
|
||||
"stargazers": 0,
|
||||
"master_branch": "master"
|
||||
},
|
||||
"pusher": {
|
||||
"name": "jsmith",
|
||||
"email": "jsmith@users.noreply.github.com"
|
||||
},
|
||||
"sender": {
|
||||
"login": "jsmith",
|
||||
"id": 1234567,
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/1234567?v=3",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/jsmith",
|
||||
"html_url": "https://github.com/jsmith",
|
||||
"followers_url": "https://api.github.com/users/jsmith/followers",
|
||||
"following_url": "https://api.github.com/users/jsmith/following{/other_user}",
|
||||
"gists_url": "https://api.github.com/users/jsmith/gists{/gist_id}",
|
||||
"starred_url": "https://api.github.com/users/jsmith/starred{/owner}{/repo}",
|
||||
"subscriptions_url": "https://api.github.com/users/jsmith/subscriptions",
|
||||
"organizations_url": "https://api.github.com/users/jsmith/orgs",
|
||||
"repos_url": "https://api.github.com/users/jsmith/repos",
|
||||
"events_url": "https://api.github.com/users/jsmith/events{/privacy}",
|
||||
"received_events_url": "https://api.github.com/users/jsmith/received_events",
|
||||
"type": "User",
|
||||
"site_admin": false
|
||||
}
|
||||
}
|
|
@ -0,0 +1,54 @@
|
|||
{
|
||||
"object_kind": "push",
|
||||
"before": "11fcaca195e8b17ca7e3dc47d9608d5b6b892f45",
|
||||
"after": "fb88379ee45de28a0a4590fddcbd8eff8b36026e",
|
||||
"ref": "refs/heads/master",
|
||||
"checkout_sha": "fb88379ee45de28a0a4590fddcbd8eff8b36026e",
|
||||
"message": null,
|
||||
"user_id": 98765,
|
||||
"user_name": "John Smith",
|
||||
"user_email": "j@smith.com",
|
||||
"project_id": 12344567,
|
||||
"repository": {
|
||||
"name": "somerepo",
|
||||
"url": "git@gitlab.com:jsmith/somerepo.git",
|
||||
"description": "",
|
||||
"homepage": "https://gitlab.com/jsmith/somerepo",
|
||||
"git_http_url": "https://gitlab.com/jsmith/somerepo.git",
|
||||
"git_ssh_url": "git@gitlab.com:jsmith/somerepo.git",
|
||||
"visibility_level": 20
|
||||
},
|
||||
"commits": [
|
||||
{
|
||||
"id": "fb88379ee45de28a0a4590fddcbd8eff8b36026e",
|
||||
"message": "Fix link\n",
|
||||
"timestamp": "2015-08-13T19:33:18+00:00",
|
||||
"url": "https://gitlab.com/jsmith/somerepo/commit/fb88379ee45de28a0a4590fddcbd8eff8b36026e",
|
||||
"author": {
|
||||
"name": "Jane Smith",
|
||||
"email": "jane@smith.com"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "4ca166bc0b511f21fa331873f260f1a7cb38d723",
|
||||
"message": "Do Some Cool Thing",
|
||||
"timestamp": "2015-08-13T15:52:15+00:00",
|
||||
"url": "https://gitlab.com/jsmith/somerepo/commit/4ca166bc0b511f21fa331873f260f1a7cb38d723",
|
||||
"author": {
|
||||
"name": "Jane Smith",
|
||||
"email": "jane@smith.com"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "11fcaca195e8b17ca7e3dc47d9608d5b6b892f45",
|
||||
"message": "Merge another cool thing",
|
||||
"timestamp": "2015-08-13T09:31:47+00:00",
|
||||
"url": "https://gitlab.com/jsmith/somerepo/commit/11fcaca195e8b17ca7e3dc47d9608d5b6b892f45",
|
||||
"author": {
|
||||
"name": "Kate Smith",
|
||||
"email": "kate@smith.com"
|
||||
}
|
||||
}
|
||||
],
|
||||
"total_commits_count": 3
|
||||
}
|
|
@ -0,0 +1,61 @@
|
|||
{
|
||||
"ref": "refs/tags/fourthtag",
|
||||
"user_id": 4797254,
|
||||
"object_kind": "tag_push",
|
||||
"repository": {
|
||||
"git_ssh_url": "git@gitlab.com:someuser/some-test-project.git",
|
||||
"name": "Some test project",
|
||||
"url": "git@gitlab.com:someuser/some-test-project.git",
|
||||
"git_http_url": "https://gitlab.com/someuser/some-test-project.git",
|
||||
"visibility_level": 0,
|
||||
"homepage": "https://gitlab.com/someuser/some-test-project",
|
||||
"description": "Some test project"
|
||||
},
|
||||
"event_name": "tag_push",
|
||||
"commits": [
|
||||
{
|
||||
"added": [],
|
||||
"author": {
|
||||
"name": "Some User",
|
||||
"email": "someuser@somedomain.com"
|
||||
},
|
||||
"url": "https://gitlab.com/someuser/some-test-project/commit/770830e7ca132856991e6db4f7fc0f4dbe20bd5f",
|
||||
"timestamp": "2019-10-17T18:07:48Z",
|
||||
"message": "Update Dockerfile",
|
||||
"removed": [],
|
||||
"modified": [
|
||||
"Dockerfile"
|
||||
],
|
||||
"id": "770830e7ca132856991e6db4f7fc0f4dbe20bd5f"
|
||||
}
|
||||
],
|
||||
"after": "770830e7ca132856991e6db4f7fc0f4dbe20bd5f",
|
||||
"project": {
|
||||
"git_ssh_url": "git@gitlab.com:someuser/some-test-project.git",
|
||||
"ci_config_path": null,
|
||||
"web_url": "https://gitlab.com/someuser/some-test-project",
|
||||
"description": "Some test project",
|
||||
"url": "git@gitlab.com:someuser/some-test-project.git",
|
||||
"namespace": "Some User",
|
||||
"default_branch": "master",
|
||||
"homepage": "https://gitlab.com/someuser/some-test-project",
|
||||
"git_http_url": "https://gitlab.com/someuser/some-test-project.git",
|
||||
"avatar_url": null,
|
||||
"ssh_url": "git@gitlab.com:someuser/some-test-project.git",
|
||||
"http_url": "https://gitlab.com/someuser/some-test-project.git",
|
||||
"path_with_namespace": "someuser/some-test-project",
|
||||
"visibility_level": 0,
|
||||
"id": 14838571,
|
||||
"name": "Some test project"
|
||||
},
|
||||
"user_username": "someuser",
|
||||
"checkout_sha": "770830e7ca132856991e6db4f7fc0f4dbe20bd5f",
|
||||
"total_commits_count": 1,
|
||||
"before": "0000000000000000000000000000000000000000",
|
||||
"user_avatar": "https://secure.gravatar.com/avatar/0ea05bdf5c3f2cb8aac782a4a2ac3177?s=80&d=identicon",
|
||||
"message": "",
|
||||
"project_id": 14838571,
|
||||
"user_name": "Some User",
|
||||
"user_email": "",
|
||||
"push_options": {}
|
||||
}
|
|
@ -0,0 +1,100 @@
|
|||
{
|
||||
"object_kind": "push",
|
||||
"event_name": "push",
|
||||
"before": "0da5b5ebb397f0a8569c97f28e266c718607e8da",
|
||||
"after": "9a052a0b2fbe01d4a1a88638dd9fe31c1c56ef53",
|
||||
"ref": "refs\/heads\/master",
|
||||
"checkout_sha": "9a052a0b2fbe01d4a1a88638dd9fe31c1c56ef53",
|
||||
"message": null,
|
||||
"user_id": 750047,
|
||||
"user_name": "John Smith",
|
||||
"user_email": "j@smith.com",
|
||||
"user_avatar": "https:\/\/secure.gravatar.com\/avatar\/32784623495678234678234?s=80&d=identicon",
|
||||
"project_id": 1756744,
|
||||
"project": {
|
||||
"name": "some-test-project",
|
||||
"description": "",
|
||||
"web_url": "https:\/\/gitlab.com\/jsmith\/some-test-project",
|
||||
"avatar_url": null,
|
||||
"git_ssh_url": "git@gitlab.com:jsmith\/some-test-project.git",
|
||||
"git_http_url": "https:\/\/gitlab.com\/jsmith\/some-test-project.git",
|
||||
"namespace": "jsmith",
|
||||
"visibility_level": 0,
|
||||
"path_with_namespace": "jsmith\/some-test-project",
|
||||
"default_branch": "master",
|
||||
"homepage": "https:\/\/gitlab.com\/jsmith\/some-test-project",
|
||||
"url": "git@gitlab.com:jsmith\/some-test-project.git",
|
||||
"ssh_url": "git@gitlab.com:jsmith\/some-test-project.git",
|
||||
"http_url": "https:\/\/gitlab.com\/jsmith\/some-test-project.git"
|
||||
},
|
||||
"commits": [
|
||||
{
|
||||
"id": "f00a0a6a71118721ac1f586bf79650170042609f",
|
||||
"message": "Add changelog",
|
||||
"timestamp": "2016-09-29T14:59:23+00:00",
|
||||
"url": "https:\/\/gitlab.com\/jsmith\/some-test-project\/commit\/f00a0a6a71118721ac1f586bf79650170042609f",
|
||||
"author": {
|
||||
"name": "John Smith",
|
||||
"email": "j@smith.com"
|
||||
},
|
||||
"added": [
|
||||
"CHANGELOG"
|
||||
],
|
||||
"modified": [
|
||||
|
||||
],
|
||||
"removed": [
|
||||
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "cc66287314cb154c986665a6c29377ef42edee60",
|
||||
"message": "Add new file",
|
||||
"timestamp": "2016-09-29T15:02:01+00:00",
|
||||
"url": "https:\/\/gitlab.com\/jsmith\/some-test-project\/commit\/cc66287314cb154c986665a6c29377ef42edee60",
|
||||
"author": {
|
||||
"name": "John Smith",
|
||||
"email": "j@smith.com"
|
||||
},
|
||||
"added": [
|
||||
"YetAnotherFIle"
|
||||
],
|
||||
"modified": [
|
||||
|
||||
],
|
||||
"removed": [
|
||||
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "9a052a0b2fbe01d4a1a88638dd9fe31c1c56ef53",
|
||||
"message": "Merge branch 'foobar' into 'master'\r\n\r\nAdd changelog\r\n\r\nSome merge thing\r\n\r\nSee merge request !1",
|
||||
"timestamp": "2016-09-29T15:02:41+00:00",
|
||||
"url": "https:\/\/gitlab.com\/jsmith\/some-test-project\/commit\/9a052a0b2fbe01d4a1a88638dd9fe31c1c56ef53",
|
||||
"author": {
|
||||
"name": "John Smith",
|
||||
"email": "j@smith.com"
|
||||
},
|
||||
"added": [
|
||||
"CHANGELOG",
|
||||
"YetAnotherFIle"
|
||||
],
|
||||
"modified": [
|
||||
|
||||
],
|
||||
"removed": [
|
||||
|
||||
]
|
||||
}
|
||||
],
|
||||
"total_commits_count": 3,
|
||||
"repository": {
|
||||
"name": "some-test-project",
|
||||
"url": "git@gitlab.com:jsmith\/some-test-project.git",
|
||||
"description": "",
|
||||
"homepage": "https:\/\/gitlab.com\/jsmith\/some-test-project",
|
||||
"git_http_url": "https:\/\/gitlab.com\/jsmith\/some-test-project.git",
|
||||
"git_ssh_url": "git@gitlab.com:jsmith\/some-test-project.git",
|
||||
"visibility_level": 0
|
||||
}
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
{
|
||||
"object_kind": "push",
|
||||
"event_name": "push",
|
||||
"before": "cc66287314cb154c986665a6c29377ef42edee60",
|
||||
"after": "0000000000000000000000000000000000000000",
|
||||
"ref": "refs\/heads\/foobar",
|
||||
"checkout_sha": null,
|
||||
"message": null,
|
||||
"user_id": 750047,
|
||||
"user_name": "John Smith",
|
||||
"user_email": "j@smith.com",
|
||||
"user_avatar": "https:\/\/secure.gravatar.com\/avatar\/2348972348972348973?s=80&d=identicon",
|
||||
"project_id": 1756744,
|
||||
"project": {
|
||||
"name": "some-test-project",
|
||||
"description": "",
|
||||
"web_url": "https:\/\/gitlab.com\/jsmith\/some-test-project",
|
||||
"avatar_url": null,
|
||||
"git_ssh_url": "git@gitlab.com:jsmith\/some-test-project.git",
|
||||
"git_http_url": "https:\/\/gitlab.com\/jsmith\/some-test-project.git",
|
||||
"namespace": "jsmith",
|
||||
"visibility_level": 0,
|
||||
"path_with_namespace": "jsmith\/some-test-project",
|
||||
"default_branch": "master",
|
||||
"homepage": "https:\/\/gitlab.com\/jsmith\/some-test-project",
|
||||
"url": "git@gitlab.com:jsmith\/some-test-project.git",
|
||||
"ssh_url": "git@gitlab.com:jsmith\/some-test-project.git",
|
||||
"http_url": "https:\/\/gitlab.com\/jsmith\/some-test-project.git"
|
||||
},
|
||||
"commits": [
|
||||
|
||||
],
|
||||
"total_commits_count": 0,
|
||||
"repository": {
|
||||
"name": "some-test-project",
|
||||
"url": "git@gitlab.com:jsmith\/some-test-project.git",
|
||||
"description": "",
|
||||
"homepage": "https:\/\/gitlab.com\/jsmith\/some-test-project",
|
||||
"git_http_url": "https:\/\/gitlab.com\/jsmith\/some-test-project.git",
|
||||
"git_ssh_url": "git@gitlab.com:jsmith\/some-test-project.git",
|
||||
"visibility_level": 0
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,61 @@
|
|||
{
|
||||
"after": "770830e7ca132856991e6db4f7fc0f4dbe20bd5f",
|
||||
"before": "0000000000000000000000000000000000000000",
|
||||
"checkout_sha": "770830e7ca132856991e6db4f7fc0f4dbe20bd5f",
|
||||
"commits": [
|
||||
{
|
||||
"added": [],
|
||||
"author": {
|
||||
"name": "Some User",
|
||||
"email": "some.user@someplace.com"
|
||||
},
|
||||
"id": "770830e7ca132856991e6db4f7fc0f4dbe20bd5f",
|
||||
"message": "Update Dockerfile",
|
||||
"modified": [
|
||||
"Dockerfile"
|
||||
],
|
||||
"removed": [],
|
||||
"timestamp": "2019-10-17T18:07:48Z",
|
||||
"url": "https://gitlab.com/someuser/some-test-project/commit/770830e7ca132856991e6db4f7fc0f4dbe20bd5f"
|
||||
}
|
||||
],
|
||||
"event_name": "tag_push",
|
||||
"message": "",
|
||||
"object_kind": "tag_push",
|
||||
"project": {
|
||||
"avatar_url": null,
|
||||
"ci_config_path": null,
|
||||
"default_branch": "master",
|
||||
"description": "Some test project",
|
||||
"git_http_url": "https://gitlab.com/someuser/some-test-project.git",
|
||||
"git_ssh_url": "git@gitlab.com:someuser/some-test-project.git",
|
||||
"homepage": "https://gitlab.com/someuser/some-test-project",
|
||||
"http_url": "https://gitlab.com/someuser/some-test-project.git",
|
||||
"id": 14838571,
|
||||
"name": "Some test project",
|
||||
"namespace": "Joey Schorr",
|
||||
"path_with_namespace": "someuser/some-test-project",
|
||||
"ssh_url": "git@gitlab.com:someuser/some-test-project.git",
|
||||
"url": "git@gitlab.com:someuser/some-test-project.git",
|
||||
"visibility_level": 0,
|
||||
"web_url": "https://gitlab.com/someuser/some-test-project"
|
||||
},
|
||||
"project_id": 14838571,
|
||||
"push_options": {},
|
||||
"ref": "refs/tags/thirdtag",
|
||||
"repository": {
|
||||
"description": "Some test project",
|
||||
"git_http_url": "https://gitlab.com/someuser/some-test-project.git",
|
||||
"git_ssh_url": "git@gitlab.com:someuser/some-test-project.git",
|
||||
"homepage": "https://gitlab.com/someuser/some-test-project",
|
||||
"name": "Some test project",
|
||||
"url": "git@gitlab.com:someuser/some-test-project.git",
|
||||
"visibility_level": 0
|
||||
},
|
||||
"total_commits_count": 1,
|
||||
"user_avatar": "https://secure.gravatar.com/avatar/someavatar?s=80&d=identicon",
|
||||
"user_email": "",
|
||||
"user_id": 4797254,
|
||||
"user_name": "Some User",
|
||||
"user_username": "someuser"
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
{
|
||||
"object_kind": "tag_push",
|
||||
"before": "0000000000000000000000000000000000000000",
|
||||
"after": "82b3d5ae55f7080f1e6022629cdb57bfae7cccc7",
|
||||
"ref": "refs/tags/v1.0.0",
|
||||
"checkout_sha": null,
|
||||
"user_id": 1,
|
||||
"user_name": "John Smith",
|
||||
"user_avatar": "https://s.gravatar.com/avatar/d4c74594d841139328695756648b6bd6?s=8://s.gravatar.com/avatar/d4c74594d841139328695756648b6bd6?s=80",
|
||||
"project_id": 1,
|
||||
"project":{
|
||||
"name":"Example",
|
||||
"description":"",
|
||||
"web_url":"http://example.com/jsmith/example",
|
||||
"avatar_url":null,
|
||||
"git_ssh_url":"git@example.com:jsmith/example.git",
|
||||
"git_http_url":"http://example.com/jsmith/example.git",
|
||||
"namespace":"Jsmith",
|
||||
"visibility_level":0,
|
||||
"path_with_namespace":"jsmith/example",
|
||||
"default_branch":"master",
|
||||
"homepage":"http://example.com/jsmith/example",
|
||||
"url":"git@example.com:jsmith/example.git",
|
||||
"ssh_url":"git@example.com:jsmith/example.git",
|
||||
"http_url":"http://example.com/jsmith/example.git"
|
||||
},
|
||||
"repository":{
|
||||
"name": "Example",
|
||||
"url": "ssh://git@example.com/jsmith/example.git",
|
||||
"description": "",
|
||||
"homepage": "http://example.com/jsmith/example",
|
||||
"git_http_url":"http://example.com/jsmith/example.git",
|
||||
"git_ssh_url":"git@example.com:jsmith/example.git",
|
||||
"visibility_level":0
|
||||
},
|
||||
"commits": [],
|
||||
"total_commits_count": 0
|
||||
}
|
|
@ -3,37 +3,43 @@ import io
|
|||
import logging
|
||||
import re
|
||||
|
||||
class InvalidPayloadException(Exception):
|
||||
class TriggerException(Exception):
|
||||
pass
|
||||
|
||||
class BuildArchiveException(Exception):
|
||||
class TriggerAuthException(TriggerException):
|
||||
pass
|
||||
|
||||
class InvalidServiceException(Exception):
|
||||
class InvalidPayloadException(TriggerException):
|
||||
pass
|
||||
|
||||
class TriggerActivationException(Exception):
|
||||
class BuildArchiveException(TriggerException):
|
||||
pass
|
||||
|
||||
class TriggerDeactivationException(Exception):
|
||||
class InvalidServiceException(TriggerException):
|
||||
pass
|
||||
|
||||
class TriggerStartException(Exception):
|
||||
class TriggerActivationException(TriggerException):
|
||||
pass
|
||||
|
||||
class ValidationRequestException(Exception):
|
||||
class TriggerDeactivationException(TriggerException):
|
||||
pass
|
||||
|
||||
class SkipRequestException(Exception):
|
||||
class TriggerStartException(TriggerException):
|
||||
pass
|
||||
|
||||
class EmptyRepositoryException(Exception):
|
||||
class ValidationRequestException(TriggerException):
|
||||
pass
|
||||
|
||||
class RepositoryReadException(Exception):
|
||||
class SkipRequestException(TriggerException):
|
||||
pass
|
||||
|
||||
class TriggerProviderException(Exception):
|
||||
class EmptyRepositoryException(TriggerException):
|
||||
pass
|
||||
|
||||
class RepositoryReadException(TriggerException):
|
||||
pass
|
||||
|
||||
class TriggerProviderException(TriggerException):
|
||||
pass
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
|
|
@ -1,209 +0,0 @@
|
|||
resource_types:
|
||||
- name: pull-request
|
||||
type: docker-image
|
||||
source:
|
||||
repository: quay.io/quay/pr-resource
|
||||
username: {{quay-robot-username}}
|
||||
password: {{quay-robot-password}}
|
||||
|
||||
resources:
|
||||
- name: build-image
|
||||
type: docker-image
|
||||
source:
|
||||
repository: quay.io/quay/quay-build-image
|
||||
username: {{quay-robot-username}}
|
||||
password: {{quay-robot-password}}
|
||||
|
||||
- name: quay-pull-request
|
||||
type: pull-request
|
||||
source:
|
||||
access_token: {{quay-github-token}}
|
||||
private_key: {{quay-git-private-key}}
|
||||
repo: coreos-inc/quay
|
||||
uri: git@github.com:coreos-inc/quay.git
|
||||
base: master
|
||||
disable_forks: false
|
||||
|
||||
jobs:
|
||||
- name: style
|
||||
max_in_flight: 4
|
||||
plan:
|
||||
- get: quay-pull-request
|
||||
trigger: true
|
||||
version: every
|
||||
|
||||
- get: build-image
|
||||
|
||||
- put: quay-pull-request
|
||||
params:
|
||||
path: quay-pull-request
|
||||
context: style
|
||||
status: pending
|
||||
|
||||
- task: style
|
||||
image: build-image
|
||||
file: quay-pull-request/ci/tasks/style.yaml
|
||||
on_success:
|
||||
put: quay-pull-request
|
||||
params:
|
||||
path: quay-pull-request
|
||||
context: style
|
||||
status: success
|
||||
on_failure:
|
||||
put: quay-pull-request
|
||||
params:
|
||||
path: quay-pull-request
|
||||
context: style
|
||||
status: failure
|
||||
|
||||
- name: karma
|
||||
max_in_flight: 4
|
||||
plan:
|
||||
- get: quay-pull-request
|
||||
trigger: true
|
||||
version: every
|
||||
|
||||
- get: build-image
|
||||
|
||||
- put: quay-pull-request
|
||||
params:
|
||||
path: quay-pull-request
|
||||
context: karma
|
||||
status: pending
|
||||
|
||||
- task: karma
|
||||
image: build-image
|
||||
file: quay-pull-request/ci/tasks/karma.yaml
|
||||
on_success:
|
||||
put: quay-pull-request
|
||||
params:
|
||||
path: quay-pull-request
|
||||
context: karma
|
||||
status: success
|
||||
on_failure:
|
||||
put: quay-pull-request
|
||||
params:
|
||||
path: quay-pull-request
|
||||
context: karma
|
||||
status: failure
|
||||
|
||||
- name: unit
|
||||
max_in_flight: 4
|
||||
plan:
|
||||
- get: quay-pull-request
|
||||
trigger: true
|
||||
version: every
|
||||
|
||||
- get: build-image
|
||||
|
||||
- put: quay-pull-request
|
||||
params:
|
||||
path: quay-pull-request
|
||||
context: unit
|
||||
status: pending
|
||||
|
||||
- task: unit
|
||||
image: build-image
|
||||
file: quay-pull-request/ci/tasks/unit.yaml
|
||||
on_success:
|
||||
put: quay-pull-request
|
||||
params:
|
||||
path: quay-pull-request
|
||||
context: unit
|
||||
status: success
|
||||
on_failure:
|
||||
put: quay-pull-request
|
||||
params:
|
||||
path: quay-pull-request
|
||||
context: unit
|
||||
status: failure
|
||||
|
||||
- name: registry
|
||||
plan:
|
||||
- get: quay-pull-request
|
||||
trigger: true
|
||||
version: every
|
||||
|
||||
- get: build-image
|
||||
|
||||
- put: quay-pull-request
|
||||
params:
|
||||
path: quay-pull-request
|
||||
context: registry
|
||||
status: pending
|
||||
|
||||
- task: registry
|
||||
image: build-image
|
||||
file: quay-pull-request/ci/tasks/registry.yaml
|
||||
on_success:
|
||||
put: quay-pull-request
|
||||
params:
|
||||
path: quay-pull-request
|
||||
context: registry
|
||||
status: success
|
||||
on_failure:
|
||||
put: quay-pull-request
|
||||
params:
|
||||
path: quay-pull-request
|
||||
context: registry
|
||||
status: failure
|
||||
|
||||
- name: mysql
|
||||
plan:
|
||||
- get: quay-pull-request
|
||||
trigger: true
|
||||
version: every
|
||||
|
||||
- get: build-image
|
||||
|
||||
- put: quay-pull-request
|
||||
params:
|
||||
path: quay-pull-request
|
||||
context: mysql
|
||||
status: pending
|
||||
|
||||
- task: mysql
|
||||
image: build-image
|
||||
file: quay-pull-request/ci/tasks/mysql.yaml
|
||||
on_success:
|
||||
put: quay-pull-request
|
||||
params:
|
||||
path: quay-pull-request
|
||||
context: mysql
|
||||
status: success
|
||||
on_failure:
|
||||
put: quay-pull-request
|
||||
params:
|
||||
path: quay-pull-request
|
||||
context: mysql
|
||||
status: failure
|
||||
|
||||
- name: postgres
|
||||
plan:
|
||||
- get: quay-pull-request
|
||||
trigger: true
|
||||
version: every
|
||||
|
||||
- get: build-image
|
||||
|
||||
- put: quay-pull-request
|
||||
params:
|
||||
path: quay-pull-request
|
||||
context: postgres
|
||||
status: pending
|
||||
|
||||
- task: postgres
|
||||
image: build-image
|
||||
file: quay-pull-request/ci/tasks/postgres.yaml
|
||||
on_success:
|
||||
put: quay-pull-request
|
||||
params:
|
||||
path: quay-pull-request
|
||||
context: postgres
|
||||
status: success
|
||||
on_failure:
|
||||
put: quay-pull-request
|
||||
params:
|
||||
path: quay-pull-request
|
||||
context: postgres
|
||||
status: failure
|
|
@ -1,13 +0,0 @@
|
|||
---
|
||||
platform: linux
|
||||
inputs:
|
||||
- name: quay-pull-request
|
||||
run:
|
||||
path: /bin/sh
|
||||
args:
|
||||
- -c
|
||||
- |
|
||||
set -eux
|
||||
cd quay-pull-request
|
||||
yarn install --ignore-engines
|
||||
yarn test
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue