Compare commits

...

No commits in common. "old_history" and "master" have entirely different histories.

666 changed files with 29021 additions and 9549 deletions

View File

@ -1,4 +0,0 @@
FROM nginx:alpine
MAINTAINER Antoine Legrand <2t.antoine@gmail.com>
COPY . /usr/share/nginx/html
EXPOSE 80

View File

@ -32,3 +32,4 @@ static/build/**
.gitlab-ci.*
docker-compose.yaml
test/dockerclients/**
node_modules

1
.gitignore vendored
View File

@ -22,7 +22,6 @@ htmlcov
.cache
.npm-debug.log
Dockerfile-e
build/
.vscode
*.iml
.DS_Store

View File

@ -1,5 +1,6 @@
---
language: python
python: 2.7
sudo: required
@ -29,7 +30,6 @@ cache:
stages:
- build
- test
- database
- clean
# We should label the steps if Travis ever supports it:
@ -37,27 +37,61 @@ stages:
jobs:
include:
- stage: build
name: Build
script: scripts/ci build
# To further shard, change the script to shard_X_of_XS and add new steps
- stage: test
script: scripts/ci unit
name: Unit tests (shard 1)
script: scripts/ci unit shard_1_of_2
- stage: test
name: Unit tests (shard 2)
script: scripts/ci unit shard_2_of_2
- stage: test
script: scripts/ci registry
name: Registry tests (shard 1)
script: scripts/ci registry shard_1_of_5
- stage: test
name: Registry tests (shard 2)
script: scripts/ci registry shard_2_of_5
- stage: test
name: Registry tests (shard 3)
script: scripts/ci registry shard_3_of_5
- stage: test
name: Registry tests (shard 4)
script: scripts/ci registry shard_4_of_5
- stage: test
name: Registry tests (shard 5)
script: scripts/ci registry shard_5_of_5
- stage: test
name: Legacy registry tests
script: scripts/ci registry_old
- stage: test
name: Custom TLS certs test
script: scripts/ci certs_test
- stage: database
script: scripts/ci mysql
- stage: test
name: Gunicorn worker test
script: scripts/ci gunicorn_test
- stage: database
script: scripts/ci postgres
- stage: test
name: MySQL unit tests (shard 1)
script: scripts/ci mysql shard_1_of_2
- stage: test
name: MySQL unit tests (shard 2)
script: scripts/ci mysql shard_2_of_2
- stage: test
name: Postgres unit tests (shard 1)
script: scripts/ci postgres shard_1_of_2
- stage: test
name: Postgres unit tests (shard 2)
script: scripts/ci postgres shard_2_of_2
- stage: clean
name: Cleanup
script: scripts/ci clean
notifications:

View File

@ -1,3 +1,88 @@
### v3.1.2
- Fixed: Repository mirroring properly updates status
- Fixed: Application repositories in public namespaces shown in UI
- Fixed: Description of log operations in UI
- Fixed: Quay V3 upgrade fails with "id field missing from v1Compatibility JSON"
- Fixed: Security token for storage proxy properly URL encoded
### v3.1.1
- Fixed: Quoting of username/password for repository mirror
- Fixed: Changing next sync date in repository mirror UI
- Fixed: Enable cancel button in repository mirror UI
### v3.1.0
- Added: New Repository Mirror functionality to continously synchronize repositories from external source registries into Quay
- Added: New Repository Mode setting (Normal, Mirrored, Read-Only) to indicate how a repository is updated
- Added: New Quay Setup Operator (Dev Preview) to automate configuring Quay on OpenShift
- Added: Support for using Red Hat OpenShift Container Storage 3 as a Quay storage backend
- Added: Support for using the Crunchy Data Operator to deploy Postgresql as Quay database
- Added: Ability to use build ARGS as first line in Dockerfiles in Quay builds
- Added: New Red Hat color scheme in Quay web UI
- Fixed: Display of repo_verb logs in logs panel
- Fixed: Ensure robot accounts being granted access actually belongs in same namespace
- Fixed: Numerous documentation improvements
### v3.0.5
- Fixed: LDAP config error when user search results exceeds 1000 objects (#1736)[https://jira.coreos.com/browse/QUAY-1736]
- Fixed: Remove obsolete 01_copy_syslog_config.sh (#1768)[https://jira.coreos.com/browse/QUAY-1768)
- Fixed: Config tool fails to set up database when password string contains "$" (#1510)[https://jira.coreos.com/browse/QUAY-1510)
- Added: Config flag to disable TLSv1.0 support (#1726)[https://jira.coreos.com/browse/QUAY-1726]
### v3.0.4
- Fixed: Package vulnerability notifications now shown in UI
- Fixed: Error deleting manifest after pushing new tag
- Fixed: Manifest now shown in UI for all types
- Fixed: CSRF rotation corrected
- Fixed: nginx access and error logs now to stdout
### v3.0.3
- Fixed: Security scan notifications endpoint not working (part #2) (#3472)
- Fixed: Exception raised during parallel pushes of same manifest on Postgres (#3478)
- Fixed: Connection pooling was ignoring environment variable (#3480)
- Fixed: Exception when in OAuth approval flow (#3491)
### v3.0.2
- Fixed: Configuration tool now operates in disconnected environments (#3468)
- Fixed: Security scan notifications endpoint not working (#3472)
### v3.0.1
- Fixed: Instance health endpoint (`/health/instance`) (#3467)
### v3.0.0
**IMPORTANT NOTE:** This release is a **major** release and has special upgrade instructions. Please see the upgrade instructions documentation.
- Added: Full support for Docker Manifest Version 2, Schema 2, including support for manifest lists and Windows images
- Added: New, distinct configuration tool for Quay that can be run outside of Quay itself and perform in-place configuration changes
- Added: Disabling of V1 push support by default and support for whitelist-enabling specific namespaces for this legacy protocol (#3398)
- Added: Full support for blob mounting via the Docker protocol (#3057)
- Added: Have all registry operations be disabled if a namespace is disabled (#3091)
- Added: Allow syncing of team members from LDAP/Keystone groups, even if user creation is disabled (#3089)
- Added: Add a feature flag to allow username confirmation to be disabled (#3099)
- Added: New indexes which should result in significant database performance when accessing lists of tags
- Added: Add support for POST on OIDC endpoints, to support those providers that POST back (#3246)
- Added: Add support for configuration of the claims required for OIDC authentication (#3246)
- Added: Have the instance health check verify the disk space available to ensure it doesnt run out and cause problems for nginx (#3241)
- Added: Support for basic auth on security scanner API endpoints (#3255)
- Added: Support for geo-blocking pulls in a namespace from a country (#3300)
- Fixed: Ensure that starred public repositories appear in the starred repositories list (#3098)
- Fixed: Add rate limiting to the catalog endpoint (#3106)
- Fixed: Have the catalog endpoint return empty for a namespace if it is disabled (#3106)
- Fixed: Have user logs start writing to a new LogEntry3 table, which has a BigInteger ID column, to ensure no overflow
- Fixed: Improve loading of action logs to be less jumpy (#3299)
- Fixed: Ensure that all upload segments are deleted in Swift storage engine once no longer necessary (#3260)
- Fixed: Handling of unicode in manifests (#3325)
- Fixed: Unauthorized request handling under podman for public repositories when anonymous access is disabled (#3365)
### v2.9.2
**IMPORTANT NOTE:** This release fixes a bug in which the deletion of namespaces did not result in the deletion of robot accounts under that namespace. While this is not a security issue (no permissions or credentials are leaked), it can appear unusual to users, so an upgrade is highly recommended. This change also includes a migration that cleans up the aforementioned robot accounts, so the migration step can take **several minutes**. Please plan accordingly.

View File

@ -1,9 +1,14 @@
# vim:ft=dockerfile
FROM centos:7
LABEL maintainer "thomasmckay@redhat.com"
FROM phusion/baseimage:0.10.0
ENV PYTHON_VERSION=2.7 \
PATH=$HOME/.local/bin/:$PATH \
PYTHONUNBUFFERED=1 \
PYTHONIOENCODING=UTF-8 \
LC_ALL=en_US.UTF-8 \
LANG=en_US.UTF-8 \
PIP_NO_CACHE_DIR=off
ENV DEBIAN_FRONTEND noninteractive
ENV HOME /root
ENV QUAYDIR /quay-registry
ENV QUAYCONF /quay-registry/conf
ENV QUAYPATH "."
@ -11,143 +16,113 @@ ENV QUAYPATH "."
RUN mkdir $QUAYDIR
WORKDIR $QUAYDIR
# This is so we don't break http golang/go#17066
# When Ubuntu has nginx >= 1.11.0 we can switch back.
ENV NGINX_GPGKEY 573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62
RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 \
--keyserver-options timeout=10 --recv-keys "${NGINX_GPGKEY}"
RUN add-apt-repository --enable-source \
"deb http://nginx.org/packages/ubuntu/ xenial nginx"
# Add Yarn repository until it is officially added to Ubuntu
RUN curl -fsSL https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - \
&& add-apt-repository "deb https://dl.yarnpkg.com/debian/ stable main"
RUN curl -fsSL https://deb.nodesource.com/setup_8.x | bash -
# Install system packages
RUN apt-get update && apt-get upgrade -y \
&& apt-get install -y \
dnsmasq \
g++ \
gdb \
gdebi-core \
git \
jpegoptim \
libevent-2.0.5 \
libevent-dev \
libffi-dev \
libfreetype6-dev \
libgpgme11 \
libgpgme11-dev \
libjpeg62 \
libjpeg62-dev \
libjpeg8 \
libldap-2.4-2 \
libldap2-dev \
libmagic1 \
libpq-dev \
libpq5 \
libsasl2-dev \
libsasl2-modules \
memcached \
nginx \
nodejs \
optipng \
openssl \
python-dbg \
python-dev \
python-pip \
python-virtualenv \
yarn=0.22.0-1 \
w3m # 13JUL2018
# Install cfssl
RUN curl -fsSL -o /bin/cfssljson https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 \
&& curl -fsSL -o /bin/cfssl https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 \
&& chmod 0755 /bin/cfssl /bin/cfssljson
# Install jwtproxy
RUN curl -fsSL -o /usr/local/bin/jwtproxy https://github.com/coreos/jwtproxy/releases/download/v0.0.1/jwtproxy-linux-x64 \
&& chmod +x /usr/local/bin/jwtproxy
# Install prometheus-aggregator
RUN curl -fsSL -o /usr/local/bin/prometheus-aggregator https://github.com/coreos/prometheus-aggregator/releases/download/v0.0.1-alpha/prometheus-aggregator \
&& chmod +x /usr/local/bin/prometheus-aggregator
# Install python dependencies
# docutils is a setup dependency of botocore required by s3transfer. It's under
# GPLv3, and so should be removed.
COPY requirements.txt requirements-tests.txt ./
RUN virtualenv --distribute venv \
&& venv/bin/pip install -r requirements.txt \
&& venv/bin/pip install -r requirements-tests.txt \
&& (venv/bin/pip uninstall -y docutils || echo "docutils is not installed") \
&& venv/bin/pip freeze
# Check python dependencies for the GPL
# Due to the following bug, pip results must be piped to a file before grepping:
# https://github.com/pypa/pip/pull/3304
RUN venv/bin/pip freeze | grep -v "^-e" | awk -F'==' '{print $1}' | xargs venv/bin/pip --disable-pip-version-check show > pipinfo.txt && \
test -z "$(cat pipinfo.txt | grep GPL | grep -v LGPL)" && \
rm pipinfo.txt
# Install front-end dependencies
COPY static/ package.json tsconfig.json webpack.config.js tslint.json yarn.lock ./
RUN yarn install --ignore-engines
RUN mkdir -p /etc/my_init.d /etc/systlog-ng /usr/local/bin $QUAYDIR/static/fonts $QUAYDIR/static/ldn /usr/local/nginx/logs/
RUN rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* /root/.cache
VOLUME ["/var/log", "/datastorage", "/tmp"]
RUN adduser memcached --disabled-login --system
WORKDIR $QUAYDIR
# JS compile
COPY bill-of-materials.json bill-of-materials.json
COPY static static
RUN yarn build \
&& jpegoptim static/img/**/*.jpg \
&& optipng -clobber -quiet static/img/**/*.png
# Config app js compile
COPY config_app/ config_app/
RUN yarn build-config-app
RUN INSTALL_PKGS="\
python27 \
python27-python-pip \
rh-nginx112 rh-nginx112-nginx \
openldap \
scl-utils \
gcc-c++ git \
openldap-devel \
gpgme-devel \
dnsmasq \
memcached \
openssl \
skopeo \
" && \
yum install -y yum-utils && \
yum install -y epel-release centos-release-scl && \
yum -y --setopt=tsflags=nodocs --setopt=skip_missing_names_on_install=False install $INSTALL_PKGS && \
yum -y update && \
yum -y clean all
COPY . .
RUN PYTHONPATH=$QUAYPATH venv/bin/python -m external_libraries
RUN scl enable python27 "\
pip install --upgrade setuptools pip && \
pip install -r requirements.txt --no-cache && \
pip install -r requirements-tests.txt --no-cache && \
pip freeze && \
mkdir -p $QUAYDIR/static/webfonts && \
mkdir -p $QUAYDIR/static/fonts && \
mkdir -p $QUAYDIR/static/ldn && \
PYTHONPATH=$QUAYPATH python -m external_libraries \
"
RUN cp -r $QUAYDIR/static/ldn $QUAYDIR/config_app/static/ldn && \
cp -r $QUAYDIR/static/fonts $QUAYDIR/config_app/static/fonts && \
cp -r $QUAYDIR/static/webfonts $QUAYDIR/config_app/static/webfonts
# Check python dependencies for GPL
# Due to the following bug, pip results must be piped to a file before grepping:
# https://github.com/pypa/pip/pull/3304
# 'docutils' is a setup dependency of botocore required by s3transfer. It's under
# GPLv3, and so is manually removed.
RUN rm -Rf /opt/rh/python27/root/usr/lib/python2.7/site-packages/docutils && \
scl enable python27 "pip freeze" | grep -v '^-e' | awk -F == '{print $1}' | grep -v docutils > piplist.txt && \
scl enable python27 "xargs -a piplist.txt pip --disable-pip-version-check show" > pipinfo.txt && \
test -z "$(cat pipinfo.txt | grep GPL | grep -v LGPL)" && \
rm -f piplist.txt pipinfo.txt
# # Front-end
RUN curl --silent --location https://rpm.nodesource.com/setup_8.x | bash - && \
yum install -y nodejs && \
curl --silent --location https://dl.yarnpkg.com/rpm/yarn.repo | tee /etc/yum.repos.d/yarn.repo && \
rpm --import https://dl.yarnpkg.com/rpm/pubkey.gpg && \
yum install -y yarn && \
yarn install --ignore-engines && \
yarn build && \
yarn build-config-app
# TODO: Build jwtproxy in dist-git
# https://jira.coreos.com/browse/QUAY-1315
RUN curl -fsSL -o /usr/local/bin/jwtproxy https://github.com/coreos/jwtproxy/releases/download/v0.0.3/jwtproxy-linux-x64 && \
chmod +x /usr/local/bin/jwtproxy
# TODO: Build prometheus-aggregator in dist-git
# https://jira.coreos.com/browse/QUAY-1324
RUN curl -fsSL -o /usr/local/bin/prometheus-aggregator https://github.com/coreos/prometheus-aggregator/releases/download/v0.0.1-alpha/prometheus-aggregator &&\
chmod +x /usr/local/bin/prometheus-aggregator
# Update local copy of AWS IP Ranges.
RUN curl -fsSL https://ip-ranges.amazonaws.com/ip-ranges.json -o util/ipresolver/aws-ip-ranges.json
# Set up the init system
RUN mkdir -p /etc/my_init.d /etc/systlog-ng /usr/local/bin static/fonts static/ldn /usr/local/nginx/logs/ \
&& cp $QUAYCONF/init/syslog-ng.conf /etc/syslog-ng/ \
&& cp $QUAYCONF/init/logrotate.conf /etc/logrotate.conf \
&& cp .git/HEAD GIT_HEAD \
&& rm -rf /etc/service/syslog-forwarder
RUN ln -s $QUAYCONF /conf && \
mkdir /var/log/nginx && \
ln -sf /dev/stdout /var/log/nginx/access.log && \
ln -sf /dev/stdout /var/log/nginx/error.log && \
chmod -R a+rwx /var/log/nginx
RUN ln -s $QUAYCONF /conf
# Cleanup
RUN UNINSTALL_PKGS="\
gcc-c++ \
openldap-devel \
gpgme-devel \
optipng \
kernel-headers \
" && \
yum remove -y $UNINSTALL_PKGS && \
yum clean all && \
rm -rf /var/cache/yum /tmp/* /var/tmp/* /root/.cache
# Cleanup any NPM-related stuff.
# RUN apt-get remove -y --auto-remove python-dev g++ libjpeg62-dev libevent-dev libldap2-dev libsasl2-dev libpq-dev libffi-dev libgpgme11-dev nodejs jpegoptim optipng w3m \
# && apt-get autoremove -y \
# && apt-get clean
# && rm -rf /root/.npm /.npm /usr/local/lib/node_modules /usr/share/yarn/node_modules \
# /root/node_modules /node_modules /grunt
RUN PYTHONPATH=$QUAYPATH venv/bin/alembic heads | grep -E '^[0-9a-f]+ \(head\)$' > ALEMBIC_HEAD
EXPOSE 8080 8443 7443
RUN ./scripts/detect-config.sh
RUN chgrp -R 0 $QUAYDIR && \
chmod -R g=u $QUAYDIR
EXPOSE 443 8443 80
RUN mkdir /datastorage && chgrp 0 /datastorage && chmod g=u /datastorage && \
mkdir -p /var/log/nginx && chgrp 0 /var/log/nginx && chmod g=u /var/log/nginx && \
mkdir -p /conf/stack && chgrp 0 /conf/stack && chmod g=u /conf/stack && \
mkdir -p /tmp && chgrp 0 /tmp && chmod g=u /tmp && \
chmod g=u /etc/passwd
RUN chgrp 0 /var/opt/rh/rh-nginx112/log/nginx && chmod g=u /var/opt/rh/rh-nginx112/log/nginx
VOLUME ["/var/log", "/datastorage", "/tmp", "/conf/stack"]
ENTRYPOINT ["/quay-registry/quay-entrypoint.sh"]
CMD ["registry"]
# root required to create and install certs
# https://jira.coreos.com/browse/QUAY-1468
# USER 1001

8
Dockerfile.cirun Normal file
View File

@ -0,0 +1,8 @@
FROM quay-ci-base
RUN mkdir -p conf/stack
RUN rm -rf test/data/test.db
ENV ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE remove-old-fields
ADD cirun.config.yaml conf/stack/config.yaml
RUN /usr/bin/scl enable python27 rh-nginx112 "LOGGING_LEVEL=INFO python initdb.py"
ENTRYPOINT ["/quay-registry/quay-entrypoint.sh"]
CMD ["registry"]

142
Dockerfile.osbs Normal file
View File

@ -0,0 +1,142 @@
FROM registry.redhat.io/rhel7:7.7
LABEL maintainer "thomasmckay@redhat.com"
ENV PYTHON_VERSION=2.7 \
PATH=$HOME/.local/bin/:$PATH \
PYTHONUNBUFFERED=1 \
PYTHONIOENCODING=UTF-8 \
LC_ALL=en_US.UTF-8 \
LANG=en_US.UTF-8 \
PIP_NO_CACHE_DIR=off
ENV QUAYDIR /quay-registry
ENV QUAYCONF /quay-registry/conf
ENV QUAYPATH "."
RUN mkdir $QUAYDIR
WORKDIR $QUAYDIR
RUN INSTALL_PKGS="\
python27 \
python27-python-pip \
rh-nginx112 rh-nginx112-nginx \
openldap \
scl-utils \
gcc-c++ git \
openldap-devel \
gpgme-devel \
dnsmasq \
memcached \
openssl \
skopeo \
" && \
yum install -y yum-utils && \
yum-config-manager --quiet --disable "*" >/dev/null && \
yum-config-manager --quiet --enable \
rhel-7-server-rpms \
rhel-server-rhscl-7-rpms \
rhel-7-server-optional-rpms \
rhel-7-server-extras-rpms \
--save >/dev/null && \
yum -y --setopt=tsflags=nodocs --setopt=skip_missing_names_on_install=False install $INSTALL_PKGS && \
yum -y update && \
yum -y clean all
COPY . .
RUN scl enable python27 "\
pip install --upgrade setuptools pip && \
pip install -r requirements.txt --no-cache && \
pip freeze && \
mkdir -p $QUAYDIR/static/webfonts && \
mkdir -p $QUAYDIR/static/fonts && \
mkdir -p $QUAYDIR/static/ldn && \
PYTHONPATH=$QUAYPATH python -m external_libraries \
"
RUN cp -r $QUAYDIR/static/ldn $QUAYDIR/config_app/static/ldn && \
cp -r $QUAYDIR/static/fonts $QUAYDIR/config_app/static/fonts && \
cp -r $QUAYDIR/static/webfonts $QUAYDIR/config_app/static/webfonts
# Check python dependencies for GPL
# Due to the following bug, pip results must be piped to a file before grepping:
# https://github.com/pypa/pip/pull/3304
# 'docutils' is a setup dependency of botocore required by s3transfer. It's under
# GPLv3, and so is manually removed.
RUN rm -Rf /opt/rh/python27/root/usr/lib/python2.7/site-packages/docutils && \
scl enable python27 "pip freeze" | grep -v '^-e' | awk -F == '{print $1}' | grep -v docutils > piplist.txt && \
scl enable python27 "xargs -a piplist.txt pip --disable-pip-version-check show" > pipinfo.txt && \
test -z "$(cat pipinfo.txt | grep GPL | grep -v LGPL)" && \
rm -f piplist.txt pipinfo.txt
# Front-end
RUN curl --silent --location https://rpm.nodesource.com/setup_8.x | bash - && \
yum install -y nodejs && \
curl --silent --location https://dl.yarnpkg.com/rpm/yarn.repo | tee /etc/yum.repos.d/yarn.repo && \
rpm --import https://dl.yarnpkg.com/rpm/pubkey.gpg && \
yum install -y yarn && \
yarn install --ignore-engines && \
yarn build && \
yarn build-config-app
# TODO: Build jwtproxy in dist-git
# https://jira.coreos.com/browse/QUAY-1315
RUN curl -fsSL -o /usr/local/bin/jwtproxy https://github.com/coreos/jwtproxy/releases/download/v0.0.3/jwtproxy-linux-x64 && \
chmod +x /usr/local/bin/jwtproxy
# TODO: Build prometheus-aggregator in dist-git
# https://jira.coreos.com/browse/QUAY-1324
RUN curl -fsSL -o /usr/local/bin/prometheus-aggregator https://github.com/coreos/prometheus-aggregator/releases/download/v0.0.1-alpha/prometheus-aggregator &&\
chmod +x /usr/local/bin/prometheus-aggregator
# Update local copy of AWS IP Ranges.
RUN curl -fsSL https://ip-ranges.amazonaws.com/ip-ranges.json -o util/ipresolver/aws-ip-ranges.json
RUN ln -s $QUAYCONF /conf && \
mkdir /var/log/nginx && \
ln -sf /dev/stdout /var/log/nginx/access.log && \
ln -sf /dev/stdout /var/log/nginx/error.log && \
chmod -R a+rwx /var/log/nginx
# Cleanup
RUN UNINSTALL_PKGS="\
gcc-c++ git \
openldap-devel \
gpgme-devel \
optipng \
kernel-headers \
" && \
yum remove -y $UNINSTALL_PKGS && \
yum clean all && \
rm -rf /var/cache/yum /tmp/* /var/tmp/* /root/.cache
EXPOSE 8080 8443 7443
RUN chgrp -R 0 $QUAYDIR && \
chmod -R g=u $QUAYDIR
RUN mkdir /datastorage && chgrp 0 /datastorage && chmod g=u /datastorage && \
mkdir -p /var/log/nginx && chgrp 0 /var/log/nginx && chmod g=u /var/log/nginx && \
mkdir -p /conf/stack && chgrp 0 /conf/stack && chmod g=u /conf/stack && \
mkdir -p /tmp && chgrp 0 /tmp && chmod g=u /tmp && \
chmod g=u /etc/passwd
RUN chgrp 0 /var/opt/rh/rh-nginx112/log/nginx && chmod g=u /var/opt/rh/rh-nginx112/log/nginx
# Allow TLS certs to be created and installed as non-root user
RUN chgrp -R 0 /etc/pki/ca-trust/extracted && \
chmod -R g=u /etc/pki/ca-trust/extracted && \
chgrp -R 0 /etc/pki/ca-trust/source/anchors && \
chmod -R g=u /etc/pki/ca-trust/source/anchors && \
chgrp -R 0 /opt/rh/python27/root/usr/lib/python2.7/site-packages/requests && \
chmod -R g=u /opt/rh/python27/root/usr/lib/python2.7/site-packages/requests && \
chgrp -R 0 /opt/rh/python27/root/usr/lib/python2.7/site-packages/certifi && \
chmod -R g=u /opt/rh/python27/root/usr/lib/python2.7/site-packages/certifi
VOLUME ["/var/log", "/datastorage", "/tmp", "/conf/stack"]
USER 1001
ENTRYPOINT ["/quay-registry/quay-entrypoint.sh"]
CMD ["registry"]

133
Dockerfile.rhel7 Normal file
View File

@ -0,0 +1,133 @@
FROM registry.redhat.io/rhel7:7.7
LABEL maintainer "thomasmckay@redhat.com"
ENV PYTHON_VERSION=2.7 \
PATH=$HOME/.local/bin/:$PATH \
PYTHONUNBUFFERED=1 \
PYTHONIOENCODING=UTF-8 \
LC_ALL=en_US.UTF-8 \
LANG=en_US.UTF-8 \
PIP_NO_CACHE_DIR=off
ENV QUAYDIR /quay-registry
ENV QUAYCONF /quay-registry/conf
ENV QUAYPATH "."
RUN mkdir $QUAYDIR
WORKDIR $QUAYDIR
RUN INSTALL_PKGS="\
python27 \
python27-python-pip \
rh-nginx112 rh-nginx112-nginx \
openldap \
scl-utils \
gcc-c++ git \
openldap-devel \
gpgme-devel \
dnsmasq \
memcached \
openssl \
skopeo \
" && \
yum install -y yum-utils && \
yum-config-manager --quiet --disable "*" >/dev/null && \
yum-config-manager --quiet --enable \
rhel-7-server-rpms \
rhel-server-rhscl-7-rpms \
rhel-7-server-optional-rpms \
rhel-7-server-extras-rpms \
--save >/dev/null && \
yum -y --setopt=tsflags=nodocs --setopt=skip_missing_names_on_install=False install $INSTALL_PKGS && \
yum -y update && \
yum -y clean all
COPY . .
RUN scl enable python27 "\
pip install --upgrade setuptools pip && \
pip install -r requirements.txt --no-cache && \
pip freeze && \
mkdir -p $QUAYDIR/static/webfonts && \
mkdir -p $QUAYDIR/static/fonts && \
mkdir -p $QUAYDIR/static/ldn && \
PYTHONPATH=$QUAYPATH python -m external_libraries \
"
RUN cp -r $QUAYDIR/static/ldn $QUAYDIR/config_app/static/ldn && \
cp -r $QUAYDIR/static/fonts $QUAYDIR/config_app/static/fonts && \
cp -r $QUAYDIR/static/webfonts $QUAYDIR/config_app/static/webfonts
# Check python dependencies for GPL
# Due to the following bug, pip results must be piped to a file before grepping:
# https://github.com/pypa/pip/pull/3304
# 'docutils' is a setup dependency of botocore required by s3transfer. It's under
# GPLv3, and so is manually removed.
RUN rm -Rf /opt/rh/python27/root/usr/lib/python2.7/site-packages/docutils && \
scl enable python27 "pip freeze" | grep -v '^-e' | awk -F == '{print $1}' | grep -v docutils > piplist.txt && \
scl enable python27 "xargs -a piplist.txt pip --disable-pip-version-check show" > pipinfo.txt && \
test -z "$(cat pipinfo.txt | grep GPL | grep -v LGPL)" && \
rm -f piplist.txt pipinfo.txt
# Front-end
RUN curl --silent --location https://rpm.nodesource.com/setup_8.x | bash - && \
yum install -y nodejs && \
curl --silent --location https://dl.yarnpkg.com/rpm/yarn.repo | tee /etc/yum.repos.d/yarn.repo && \
rpm --import https://dl.yarnpkg.com/rpm/pubkey.gpg && \
yum install -y yarn && \
yarn install --ignore-engines && \
yarn build && \
yarn build-config-app
# TODO: Build jwtproxy in dist-git
# https://jira.coreos.com/browse/QUAY-1315
RUN curl -fsSL -o /usr/local/bin/jwtproxy https://github.com/coreos/jwtproxy/releases/download/v0.0.3/jwtproxy-linux-x64 && \
chmod +x /usr/local/bin/jwtproxy
# TODO: Build prometheus-aggregator in dist-git
# https://jira.coreos.com/browse/QUAY-1324
RUN curl -fsSL -o /usr/local/bin/prometheus-aggregator https://github.com/coreos/prometheus-aggregator/releases/download/v0.0.1-alpha/prometheus-aggregator &&\
chmod +x /usr/local/bin/prometheus-aggregator
# Update local copy of AWS IP Ranges.
RUN curl -fsSL https://ip-ranges.amazonaws.com/ip-ranges.json -o util/ipresolver/aws-ip-ranges.json
RUN ln -s $QUAYCONF /conf && \
mkdir /var/log/nginx && \
ln -sf /dev/stdout /var/log/nginx/access.log && \
ln -sf /dev/stdout /var/log/nginx/error.log && \
chmod -R a+rwx /var/log/nginx
# Cleanup
RUN UNINSTALL_PKGS="\
gcc-c++ git \
openldap-devel \
gpgme-devel \
optipng \
kernel-headers \
" && \
yum remove -y $UNINSTALL_PKGS && \
yum clean all && \
rm -rf /var/cache/yum /tmp/* /var/tmp/* /root/.cache
EXPOSE 8080 8443 7443
RUN chgrp -R 0 $QUAYDIR && \
chmod -R g=u $QUAYDIR
RUN mkdir /datastorage && chgrp 0 /datastorage && chmod g=u /datastorage && \
mkdir -p /var/log/nginx && chgrp 0 /var/log/nginx && chmod g=u /var/log/nginx && \
mkdir -p /conf/stack && chgrp 0 /conf/stack && chmod g=u /conf/stack && \
mkdir -p /tmp && chgrp 0 /tmp && chmod g=u /tmp && \
chmod g=u /etc/passwd
RUN chgrp 0 /var/opt/rh/rh-nginx112/log/nginx && chmod g=u /var/opt/rh/rh-nginx112/log/nginx
VOLUME ["/var/log", "/datastorage", "/tmp", "/conf/stack"]
ENTRYPOINT ["/quay-registry/quay-entrypoint.sh"]
CMD ["registry"]
# root required to create and install certs
# https://jira.coreos.com/browse/QUAY-1468
# USER 1001

66
GOVERNANCE.md Normal file
View File

@ -0,0 +1,66 @@
# Project Quay Governance
Project Quay is run according to the guidelines specified below. This is a living document and is expected to evolve along with Project Quay itself.
## Principles
Project Quay strives to follow these principles at all times:
* Openness - Quay evolves and improves out in the open, with transparent work and decision making that is clear and well understood.
* Respectfulness - Quay is a project for a diverse community where different points of view are welcomed. Healthy and respectful discussions help us meet our goals and deliver a better end product.
* Meritocracy - In the Quay community all ideas are heard but only the best ideas help drive the project forward. As an open, respectful community we will judge all ideas on their technical merit and alignment with Quay's design principles.
* Accountability - The Quay community is accountable
* to our users to deliver the best software possible
* to the project to ensure each Contributor and Maintainer carries out their duties to the best of their abilities
* to itself to ensure the Quay remains a project where indviduals can be passionate about contributing their time and energy
## Maintainers
Maintainers play a special role to ensure that contributions align with the expected quality, consistency and long term vision for Project Quay. Each Maintainer is vital to the success of Project Quay and has decided to make the commitment to that cause. Being a Maintainer is difficult work and not for everyone. Therefore Project Quay will have a small group of Maintainers- as many as deemed necessary to handle the pipeline of contributions being made to the project.
### Becoming a Maintainer
Each Maintainer must also be a Contributor. Candidates for the Maintainer role are individuals who have made recent, substantial and recurring contributions to the project. The existing Maintainers will periodically identify Contributors and make recommendations to the community that those individuals become Maintainers. The Maintainers will then vote on the candidate and if so agreed the candidate will be invited to raise a PR to add their name into the MAINTAINERS.md file. Approval of that PR signals the Contributor is now a Maintainer.
### Responsibilities of a Maintainer
Project Quay's success depends on how well Maintainers perform their duties. Maintainers are responsible to monitor Slack and e-mail lists, help triage issues on the Project Quay JIRA board, review PRs and ensure responses are being provided to Contributors, assist with regular Project Quay releases. If Contributors are the lifeblood of an open source community, the Maintainers act as the heart, hands, eyes and ears, helping to keep the project moving and viable.
### Stepping Down as a Maintainer
A Maintainer may decide they are no longer interested in or able to carry out the role. In such a situation the Maintainer should notify the other Maintainers of their intentions to step down and help identify a replacement from existing Contributors. Ideally the outgoing Maintainer will ensure that any outstanding work has been transitioned to another Maintainer. To carry out the actual removal the outgoing Maintainer raises a PR against MAINTAINERS.md file to remove their name.
## Contributors
Anyone can be a Contributor to Project Quay. No special approval is required- simply go through our Getting Started guide, fork one of our repositories and submit a PR. All types of conributions will be welcome, whether it is through bug reports via JIRA, code, or documentation.
## Sub-Projects
Project Quay will be primarily focused on the delivery of Quay itself but also contains various sub-projects such as Clair and Quay-Builders. Each sub-project must have their own dedicated repositories containing a MAINTAINERS.md file. Each sub-project will abide by this Governance model.
Requests for new sub-projects under Project Quay should be raised to the Maintainers.
## Code of Conduct
Project Quay abides by the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
## How Decisons Are Made
Most of the decison making for Project Quay will happen through the regular PR approval process. We stand by the notion that what exists in the Project Quay repositories are the end result of countless community-driven decisions.
When a more complex decision is required, for example a technical issue related to a PR, it is expected that involved parties will resolve the dispute in a respectful and efficent manner. If the dispute cannot be resolved between the involved parties then the Maintainers will review the dispute and come to an agreement via majority vote amongst themselves. All decision making should be tracked via a JIRA issue and performed transparently via the Project Quay communications channels.
## Project Quay Releases
On a regular basis, Project Quay will issue a release. The release cadence will not be strictly defined but should happen approximately every 3 months. Maintainers will be part of a rotating "Release Nanny" role whereby each Maintainer shares the responsibility of creating a Quay release.
Release duties include:
* Creating the Release Notes
* Verifying the automated tests have passed
* Building the necessary Quay, Clair-JWT, and Quay-Builder container images
* Publishing the container images to quay.io
* Updating the github release pages
* Notifying the community of the new release
## DCO and Licenses
Project Quay uses the [Apache 2.0](https://opensource.org/licenses/Apache-2.0) license.

201
LICENSE Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,3 +0,0 @@
Jake Moshenko <jake.moshenko@coreos.com> (@jakedt)
Jimmy Zelinskie <jimmy.zelinskie@coreos.com> (@jzelinskie)
Joseph Schorr <joseph.schorr@coreos.com> (@josephschorr)

View File

@ -13,7 +13,7 @@ MODIFIED_FILES = $(shell git diff --name-only $(GIT_MERGE_BASED) | grep -E .+\.p
show-modified:
echo $(MODIFIED_FILES)
.PHONY: all unit-test registry-test registry-test-old test pkgs build run clean
.PHONY: all unit-test registry-test registry-test-old buildman-test test pkgs build run clean
all: clean pkgs test build
@ -43,33 +43,44 @@ conf/stack/license: $(QUAY_CONFIG)/local/license
ln -s $(QUAY_CONFIG)/local/license conf/stack/license
unit-test:
TEST=true PYTHONPATH="." py.test \
ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields TEST=true PYTHONPATH="." py.test \
--cov="." --cov-report=html --cov-report=term-missing \
--timeout=3600 --verbose -x \
./
registry-test:
TEST=true PYTHONPATH="." py.test \
TEST=true ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields PYTHONPATH="." py.test \
--cov="." --cov-report=html --cov-report=term-missing \
--timeout=3600 --verbose --show-count -x \
test/registry/registry_tests.py
registry-test-old:
TEST=true PYTHONPATH="." py.test \
TEST=true PYTHONPATH="." ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields py.test \
--cov="." --cov-report=html --cov-report=term-missing \
--timeout=3600 --verbose --show-count -x \
./test/registry_tests.py
buildman-test:
TEST=true PYTHONPATH="." ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields py.test \
--cov="." --cov-report=html --cov-report=term-missing \
--timeout=3600 --verbose --show-count -x \
./buildman/
certs-test:
./test/test_certs_install.sh
full-db-test: ensure-test-db
TEST=true PYTHONPATH=. alembic upgrade head
TEST=true PYTHONPATH=. SKIP_DB_SCHEMA=true py.test --timeout=7200 \
TEST=true PYTHONPATH=. QUAY_OVERRIDE_CONFIG='{"DATABASE_SECRET_KEY": "anothercrazykey!"}' \
ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields alembic upgrade head
TEST=true PYTHONPATH=. ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields \
SKIP_DB_SCHEMA=true py.test --timeout=7200 \
--verbose --show-count -x --ignore=endpoints/appr/test/ \
./
test: unit-test registry-test
clients-test:
cd test/clients; python clients_test.py
test: unit-test registry-test registry-test-old certs-test
ensure-test-db:
@if [ -z $(TEST_DATABASE_URI) ]; then \
@ -129,6 +140,15 @@ docker-build: pkgs build
git checkout $(NAME)
echo $(TAG)
app-sre-docker-build:
# get named head (ex: branch, tag, etc..)
export NAME=$(shell git rev-parse --abbrev-ref HEAD)
# checkout commit so .git/HEAD points to full sha (used in Dockerfile)
echo "$(SHA)"
git checkout $(SHA)
$(BUILD_CMD) -t ${IMG} .
git checkout $(NAME)
run: license
goreman start

69
Makefile.ci Normal file
View File

@ -0,0 +1,69 @@
SHELL := /bin/bash
PYTEST_MARK ?= shard_1_of_1
export PATH := ./venv/bin:$(PATH)
.PHONY: all unit-test registry-test registry-test-old test
all: test
unit-test:
TEST=true PYTHONPATH="." ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields py.test \
-m $(PYTEST_MARK) \
--cov="." --cov-report=html --cov-report=term-missing \
--timeout=3600 --verbose -x \
./
registry-test:
TEST=true PYTHONPATH="." ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields py.test \
-m $(PYTEST_MARK) \
--cov="." --cov-report=html --cov-report=term-missing \
--timeout=3600 --verbose --show-count -x \
test/registry/registry_tests.py
registry-test-old:
TEST=true PYTHONPATH="." ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields py.test \
--cov="." --cov-report=html --cov-report=term-missing \
--timeout=3600 --verbose --show-count -x \
./test/registry_tests.py
certs-test:
./test/test_certs_install.sh
gunicorn-tests:
./test/test_gunicorn_running.sh
full-db-test: ensure-test-db
TEST=true PYTHONPATH=. QUAY_OVERRIDE_CONFIG='{"DATABASE_SECRET_KEY": "anothercrazykey!"}' \
ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields alembic upgrade head
TEST=true PYTHONPATH=. ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields \
SKIP_DB_SCHEMA=true py.test --timeout=7200 \
-m $(PYTEST_MARK) \
--verbose --show-count -x --ignore=endpoints/appr/test/ \
./
test: unit-test registry-test
ensure-test-db:
@if [ -z $(TEST_DATABASE_URI) ]; then \
echo "TEST_DATABASE_URI is undefined"; \
exit 1; \
fi
PG_PASSWORD := quay
PG_USER := quay
PG_HOST := postgresql://$(PG_USER):$(PG_PASSWORD)@localhost/quay
test_postgres : TEST_ENV := SKIP_DB_SCHEMA=true TEST=true \
TEST_DATABASE_URI=$(PG_HOST) PYTHONPATH=.
test_postgres:
docker rm -f postgres-testrunner-postgres || true
docker run --name postgres-testrunner-postgres \
-e POSTGRES_PASSWORD=$(PG_PASSWORD) -e POSTGRES_USER=${PG_USER} \
-p 5432:5432 -d postgres:9.2
until pg_isready -d $(PG_HOST); do sleep 1; echo "Waiting for postgres"; done
$(TEST_ENV) alembic upgrade head
$(TEST_ENV) py.test --timeout=7200 --verbose --show-count ./ --color=no \
--ignore=endpoints/appr/test/ -x
docker rm -f postgres-testrunner-postgres || true

View File

@ -1,3 +1,4 @@
app: gunicorn -c conf/gunicorn_local.py application:application
webpack: npm run watch
builder: python -m buildman.builder

405
README.md
View File

@ -1,36 +1,36 @@
# quay
# Project Quay
[![Build Status](https://travis-ci.com/quay/quay.svg?token=pWvEz2TeyDsVn69Hkiwq&branch=master)](https://travis-ci.com/quay/quay)
![Docker Repository on Quay](https://quay.io/repository/quay/quay/status?token=7bffbc13-8bb0-4fb4-8a70-684a0cf485d3 "Docker Repository on Quay")
:warning: The `master` branch may be in an *unstable or even broken state* during development.
Please use [releases] instead of the `master` branch in order to get stable binaries.
Please use [releases] instead of the `master` branch in order to get stable software.
![Quay Logo](static/img/quay_preview.png)
[releases]: https://github.com/quay/quay/releases
Quay is project to build, store, and distribute container images.
![Project Quay Logo](project_quay_logo.png)
Project Quay builds, stores, and distributes your container images.
High-level features include:
- Docker Registry Protocol [v1], [v2]
- Docker Manifest Schema [v2.1]
- Docker Registry Protocol [v2]
- Docker Manifest Schema [v2.1], [v2.2]
- [AppC Image Discovery] via on-demand transcoding
- Image Squashing via on-demand transcoding
- Authentication provided by [LDAP], [Keystone], [Dex], [Google], [GitHub]
- Authentication provided by [LDAP], [Keystone], [OIDC], [Google], and [GitHub]
- ACLs, team management, and auditability logs
- Geo-replicated storage provided by local filesystems, [S3], [GCS], [Swift], [Ceph]
- Geo-replicated storage provided by local filesystems, [S3], [GCS], [Swift], and [Ceph]
- Continuous Integration integrated with [GitHub], [Bitbucket], [GitLab], and [git]
- Security Vulnerability Analysis via [Clair]
- [Swagger]-compliant HTTP API
[releases]: https://github.com/coreos-inc/quay/releases
[v1]: https://docs.docker.com/v1.6/reference/api/registry_api/
[v2]: https://docs.docker.com/v1.6/registry/
[v2]: https://docs.docker.com/registry/spec/api/
[v2.1]: https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md
[v2.2]: https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-2.md
[AppC Image Discovery]: https://github.com/appc/spec/blob/master/spec/discovery.md
[LDAP]: https://en.wikipedia.org/wiki/Lightweight_Directory_Access_Protocol
[Keystone]: http://docs.openstack.org/developer/keystone
[Dex]: https://github.com/coreos/dex
[OIDC]: https://en.wikipedia.org/wiki/OpenID_Connect
[Google]: https://developers.google.com/identity/sign-in/web/sign-in
[GitHub]: https://developer.github.com/v3/oauth
[S3]: https://aws.amazon.com/s3
@ -41,371 +41,34 @@ High-level features include:
[Bitbucket]: https://bitbucket.com
[GitLab]: https://gitlab.com
[git]: https://git-scm.com
[Clair]: https://github.com/coreos/clair
[Clair]: https://github.com/quay/clair
[Swagger]: http://swagger.io
## Table of Contents
1. **[Getting Started](#getting-started)**
1. [macOS](#macos)
3. [Linux](#linux)
2. **[Development](#development)**
1. [PivotalTracker Integration](#pivotaltracker-integration)
3. **[Running and Testing](#running-and-testing)**
1. [Test Data](#test-data)
2. [Local Scripts](#local-scripts)
3. [Development inside Docker](#development-inside-docker)
4. [Adding a Python Dependency](#adding-a-python-dependency)
5. [Adding a Yarn Dependency](#adding-a-yarn-dependency)
6. [Running the Build System](#running-the-build-system)
7. [To run individual tests](#to-run-individual-tests)
1. [Pytest](#pytest)
2. [Tox](#tox)
8. [Running Migrations](#running-migrations)
9. [How to run a build with tests for a push or merge](#how-to-run-a-build-with-tests-for-a-push-or-merge)
4. **[Documentation](#documentation)**
1. [Architecture at a Glance](#architecture-at-a-glance)
2. [Terminology](#terminology)
1. [Organizations](#organizations)
2. [Concepts](#concepts)
3. [Software](#software)
## Getting Started
### macOS
* Explore a live instance of Project Quay hosted at [Quay.io]
* Watch [talks] given about Project Quay
* Review the [documentation] for Red Hat Quay
* Get up and running with a containerized [development environment]
macOS developers will need:
[Quay.io]: https://quay.io
[talks]: /docs/talks.md
[documentation]: https://access.redhat.com/documentation/en-us/red_hat_quay
[development environment]: /docs/development-container.md
* [command line tools] or [xcode]
* [brew]
## Community
[command line tools]: https://developer.apple.com/downloads
[xcode]: https://developer.apple.com/downloads
[brew]: https://github.com/Homebrew/brew
[docker]: https://www.docker.com/docker-mac
* Mailing List: [quay-dev@googlegroups.com]
* IRC: #quay on [freenode.net]
* Bug tracking: [JBoss JIRA]
* Security Issues: [security@redhat.com]
```sh
# Download the code
git clone git@github.com:coreos-inc/quay.git && cd quay
[quay-dev@googlegroups.com]: https://groups.google.com/forum/#!forum/quay-dev
[freenode.net]: https://webchat.freenode.net
[JBoss JIRA]: https://issues.jboss.org/projects/PROJQUAY
[security@redhat.com]: mailto:security@redhat.com
# Install the system dependencies
brew install libevent libmagic postgresql gpgme pyenv pyenv-virtualenv node yarn
## License
# Create a default virtualmachine for docker
docker-machine create -d virtualbox default
# Add these to ~/.bashrc or ~/.zshrc
eval "$(pyenv virtualenv-init -)"
eval "$(pyenv init -)"
eval $(/usr/local/bin/docker-machine env default)
export PYTHONPATH="."
# Some installs don't have /usr/include, required for finding SASL header files
# This command might fail because of the rootfs is read-only. Refer to the following:
# http://apple.stackexchange.com/questions/196224/unix-ln-s-command-not-permitted-in-osx-el-capitan-beta3
if [ ! -e /usr/include ]; then sudo ln -s `xcrun --show-sdk-path`/usr/include /usr/include; fi
# Install the Python dependencies
pyenv install 2.7.12
pyenv virtualenv 2.7.12 quay
pyenv activate quay
pyenv local quay
# Some packages may fail to build with clang (which now defaults to C11).
# If you're getting errors trying running again with CFLAGS='std=c99'.
pip install -r requirements.txt
pip install -r requirements-dev.txt
pip install -r requirements-test.txt
# Setup a local config
git clone git@github.com:quay/quay-config-local.git ../quay-config
ln -s ../../quay-config/local conf/stack
# Install Node Dependencies
yarn install
# Link Typescript
yarn link typescript
# Download external libraries
mkdir static/fonts static/ldn
python external_libraries.py
```
#### Third Party Docs
* [docker](https://beta.docker.com/docs/mac/getting-started)
* [docker-machine](https://docs.docker.com/machine/install-machine)
* [pyenv](https://github.com/yyuu/pyenv)
* [pyenv-virtualenv](https://github.com/yyuu/pyenv-virtualenv)
### Docker Compose
You'll need Docker and [Docker Compose](https://docs.docker.com/compose) installed.
If you're on macOS, [Docker for Mac](https://www.docker.com/docker-mac) should include
both tools. Otherwise, follow the docs for your platform.
You'll also need Node.js and NPM if you want to interact with the
frontend code outside a container.
Finally, you'll need a recent [Go](https://golang.org) version for the
builder.
To start Quay locally:
```sh
# Clone the various repos you'll need:
git clone https://github.com/coreos-inc/quay.git
git clone https://github.com/coreos-inc/quay-config-local.git
git clone https://github.com/coreos-inc/quay-builder.git
# Build the builder:
cd quay-builder
make build GOOS=linux
# Install NPM modules:
cd ../quay
npm install
# Build or pull images and start all Quay components:
docker-compose up
```
#### Third Party Docs
* [Docker Compose](https://docs.docker.com/compose)
* [Docker for Mac](https://www.docker.com/docker-mac)
### Linux
Do you use Linux? Send us a PR! Or use docker-compose!
## Development
### JIRA
The Quay backlog can be found in JIRA: https://jira.coreos.com/projects/QUAY
## Running and Testing
### Test Data
A SQLite database full of test data is committed to this git repository at [test/data/test.db](quay/test/data/test.db).
This database is generated by executing `python initdb.py`.
The username and password of the admin test account is `devtable` and `password`, respectively.
### Local Scripts
Running the web server locally requires [goreman](https://github.com/mattn/goreman):
```sh
go get github.com/mattn/goreman
```
* `local-run` runs the web server for testing
* `local-test` runs the unit test suite
* `yarn run build` builds front-end dependencies
* `yarn run watch` builds and watches front-end dependencies
* `yarn test` runs front-end test suite
### Development inside Docker
To build and run a development container, pass one argument to [local-docker.sh](quay/local-docker.sh):
- `buildman`: run the buildmanager
- `dev`: run web server on port 5000
- `initdb`: clear and initialize the test database
- `notifications`: run the notification worker
- `test`: run the unit test suite
### Adding a Python Dependency
```sh
# Create a new virtualenv and activate it
pyenv virtualenv 2.7.12 quay-deps
pyenv activate quay-deps
# Install unversioned dependencies with your changes
pip install -r requirements-nover.txt
# Run the unit test suite
./local-test.sh
# Freeze the versions of all of the dependencies
pip freeze > requirements.txt
# Delete the virtualenv
pyenv uninstall quay-deps
```
### Adding a Yarn Dependency
We use [Yarn](https://yarnpkg.com/) for frontend dependency management. The `yarn.lock` file ensures
that we get consistant version installs using the `yarn install` command. However, new dependencies
should be added using `yarn add <npm package>`. This will add an entry to `package.json` and `yarn.lock`.
Occassionally there will be merge conflicts with `yarn.lock`. To resolve them, use the following (taken
from [here](https://github.com/yarnpkg/yarn/issues/1776#issuecomment-269539948)).
```sh
git rebase origin/master
git checkout origin/master -- yarn.lock
yarn install
git add yarn.lock
git rebase --continue
```
### Running the Build System
TODO
```sh
# Run an instance of redis
docker run -d -p 6379:6379 quay.io/quay/redis
```
### To run individual tests
```sh
# To run a specific suite
TEST=true python -m test.test_api_usage -f
# To run a specific test in a suite
TEST=true python -m test.test_api_usage -f SuiteName
```
#### Pytest
```sh
# To run all tests
TEST=true PYTHONPATH="." py.test --verbose
# To run a specific test module
TEST=true PYTHONPATH="." py.test --verbose test/registry/registry_tests.py
# To run a specific test unique test
TEST=true PYTHONPATH="." py.test --verbose test/test_api_usage.py::TestDeleteNamespace
# To retry only last failed (--lf):
TEST=true PYTHONPATH="." py.test --verbose --lf
# To start pdb on failure:
TEST=true PYTHONPATH="." py.test --verbose --pdb
# To run a coverage report (html pages in ./htmlcov):
TEST=true PYTHONPATH="." py.test --cov="." --cov-report=html --cov-report=term-missing --cov-config=.coverage.ini --verbose
# Don't capture stdout (-s)
TEST=true PYTHONPATH="." py.test --verbose -s
```
#### Tox
To create a virtualenv to run the tests.
It allows to test the code on multiple env like python2.x and python3.x or different library versions
```sh
# Test all tox env:
tox
# Add extra parameters to the pytest command:
# tox -- [pytest ARGS]
tox -- -x
# build a single env with -e:
tox -e py27-api
```
### Running migrations
```sh
# To create a new migration with this description.
# Note there might be some errors about unique id being to long
# That's okay as long as the migration file is created
./data/migrations/migration.sh "Description goes here"
# To test the up and down of the migration
./data/migrations/migration.sh # without params
# Migrations get run when you create a docker image or you can run them
# manually with the following command.
PYTHONPATH=. alembic upgrade head
# You can also rebuild your local sqlite db image from initdb.py using
# And once you have a migration you should do this and check in the
# changes to share your migration with others.
rm test/data/test.db
python initdb.py
```
### Running tests for migrations
Use AWS/RDS to create a test image.
To create a new database from a snapshot to test against see
[this](http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_RestoreFromSnapshot.html).
Then point the migrations to the new instance using
quay-config/local/config.yaml
Remember to run this from the root of the quay directory and to set your
python environment first.
```sh
PYTHONPATH=. alembic upgrade head
```
### How to run a build with tests for a push or merge
```sh
# Inside the quay directory.
export QUAY_TAG=quay.io/quay/quay:localtest
docker build -t $QUAY_TAG --build-arg RUN_TESTS=true .
```
## Documentation
* [Red Hat Quay Documentation](https://tectonic.com/quay-enterprise/docs/latest)
* [Quay.io Documentation](https://docs.quay.io)
### Architecture at a Glance
<img width="640px" src="static/img/quay-arch.jpg">
### Terminology
#### Organizations
- **AppC**: a standards body responsible for a _Runtime_ and _Image Format_ superseded by the _Open Container Initiative_
- **Open Container Initiative**: a standards body responsible for a _Runtime_ specification and an _Image Format_
- **Docker**: a company that builds a platform that has its own _Image Formats_, _Build System_, _Container Runtime_, and _Container Orchestration_
#### Concepts
- **Image**: an archive containing all of the contents necessary to execute a container
- **Image Format**: a specification for the structure of an _Image_
- **Image Layer**: an _Image_ that may depend on being applied to other _Images_ to generate a final _Image_
- **Image Squashing**: the process of compressing an _Image_ into a single _Layer_
- **Manifest**: a text file containing metadata for a particular _Image_
- **Tag**: a human-friendly named, mutable pointer to a particular set of _Images_
- **Build System**: a program used to generate _Images_
- **Registry**: a program that speaks one or more standard protocols to store and receive _Images_
- **Repository**: a collection of related _Tags_ organized by a _Registry_
- **Push**: the act of uploading an _Image_ to a _Registry_
- **Pull**: the act of downloading an _Image_ from a _Registry_
- **Container**: an _Image_ and its execution environment
- **Container Runtime**: a program that can transform an _Image_ into a _Container_ by executing it
- **Container Orchestration**: a program or set of programs that provides a framework for deploying _Containers_
#### Software
- **Quay.io**: CoreOS's hosted _Registry_
- **Quay**: CoreOS's enterprise-grade _Registry_ product
- **quayctl**: an open source program that implements alternative methods for _pulling_ _Images_ from _Quay_
- **Clair**: an open source static analysis tool used to detect vulnerability in _Images_
- **Quay Security Scanning**: the integration between _Clair_ and _Quay_
- **Kubernetes**: an open source program implementing _Container Orchestration_
- **Docker Hub**: Docker's hosted _Registry_
- **Docker Trusted Registry**: Docker's enterprise-grade _Registry_ product
- **Notary**: an open source implementation of the TUF protocol used in _Docker Content Trust_
- **Docker Content Trust**: the integration between _Notary_ and _Docker Trusted Registry_
- **Docker Engine**: a program used to interact with all aspects of the Docker platform
- **Swarm**: a program implementing _Container Orchestration_ for the Docker platform
Project Quay is under the Apache 2.0 license.
See the LICENSE file for details.

49
TESTING.md Normal file
View File

@ -0,0 +1,49 @@
# Testing quay
## Unit tests (run in CI automatically)
Basic unit tests for testing all the functionality of Quay:
```sh
make unit-test
```
## Registry tests (run in CI automatically)
Quay has two sets of registry tests (current and legacy), which simulate Docker clients by executing
REST operations against a spanwed Quay.
```sh
make registry-test
make registry-test-old
```
## Certs tests (run in CI automatically)
Ensures that custom TLS certificates are correctly loaded into the Quay container on startup.
```sh
make certs-test
```
## Full database tests (run in CI automatically)
The full database tests runs the entire suite of Quay unit tests against a real running database
instance.
NOTE: The database *must be running* on the local machine before this test can be run.
```sh
TEST_DATABASE_URI=database-connection-string make full-db-test
```
## Clients tests (must be manually run)
The clients test spawns CoreOS virtual machines via Vagrant and VirtualBox and runs real Docker/podman
commands against a *running Quay*.
NOTE: A Quay *must be running* on the local machine before this test can be run.
```sh
make clients-test 10.0.2.2:5000 # IP+Port of the Quay on the host machine.
```

View File

@ -10,9 +10,11 @@ CONF_DIR = os.getenv("QUAYCONF", os.path.join(ROOT_DIR, "conf/"))
STATIC_DIR = os.path.join(ROOT_DIR, 'static/')
STATIC_LDN_DIR = os.path.join(STATIC_DIR, 'ldn/')
STATIC_FONTS_DIR = os.path.join(STATIC_DIR, 'fonts/')
STATIC_WEBFONTS_DIR = os.path.join(STATIC_DIR, 'webfonts/')
TEMPLATE_DIR = os.path.join(ROOT_DIR, 'templates/')
IS_TESTING = 'TEST' in os.environ
IS_BUILDING = 'BUILDING' in os.environ
IS_KUBERNETES = 'KUBERNETES_SERVICE_HOST' in os.environ
OVERRIDE_CONFIG_DIRECTORY = os.path.join(CONF_DIR, 'stack/')
@ -36,7 +38,7 @@ def _get_git_sha():
else:
try:
return subprocess.check_output(["git", "rev-parse", "HEAD"]).strip()[0:8]
except (OSError, subprocess.CalledProcessError):
except (OSError, subprocess.CalledProcessError, Exception):
pass
return "unknown"

22
active_migration.py Normal file
View File

@ -0,0 +1,22 @@
from enum import Enum, unique
from data.migrationutil import DefinedDataMigration, MigrationPhase
@unique
class ERTMigrationFlags(Enum):
""" Flags for the encrypted robot token migration. """
READ_OLD_FIELDS = 'read-old'
WRITE_OLD_FIELDS = 'write-old'
ActiveDataMigration = DefinedDataMigration(
'encrypted_robot_tokens',
'ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE',
[
MigrationPhase('add-new-fields', 'c13c8052f7a6', [ERTMigrationFlags.READ_OLD_FIELDS,
ERTMigrationFlags.WRITE_OLD_FIELDS]),
MigrationPhase('backfill-then-read-only-new',
'703298a825c2', [ERTMigrationFlags.WRITE_OLD_FIELDS]),
MigrationPhase('stop-writing-both', '703298a825c2', []),
MigrationPhase('remove-old-fields', 'c059b952ed76', []),
]
)

41
app.py
View File

@ -11,11 +11,13 @@ from flask_login import LoginManager
from flask_mail import Mail
from flask_principal import Principal
from jwkest.jwk import RSAKey
from werkzeug.contrib.fixers import ProxyFix
from werkzeug.exceptions import HTTPException
import features
from _init import config_provider, CONF_DIR, IS_KUBERNETES, IS_TESTING, OVERRIDE_CONFIG_DIRECTORY
from _init import (config_provider, CONF_DIR, IS_KUBERNETES, IS_TESTING, OVERRIDE_CONFIG_DIRECTORY,
IS_BUILDING)
from auth.auth_context import get_authenticated_user
from avatars.avatars import Avatar
@ -54,6 +56,7 @@ from util.metrics.metricqueue import MetricQueue
from util.metrics.prometheus import PrometheusPlugin
from util.saas.cloudwatch import start_cloudwatch_sender
from util.secscan.api import SecurityScannerAPI
from util.repomirror.api import RepoMirrorAPI
from util.tufmetadata.api import TUFMetadataAPI
from util.security.instancekeys import InstanceKeys
from util.security.signing import Signer
@ -73,6 +76,7 @@ logger = logging.getLogger(__name__)
# Instantiate the configuration.
is_testing = IS_TESTING
is_kubernetes = IS_KUBERNETES
is_building = IS_BUILDING
if is_testing:
from test.testconfig import TestConfig
@ -91,11 +95,30 @@ config_provider.update_app_config(app.config)
environ_config = json.loads(os.environ.get(OVERRIDE_CONFIG_KEY, '{}'))
app.config.update(environ_config)
# Fix remote address handling for Flask.
if app.config.get('PROXY_COUNT', 1):
app.wsgi_app = ProxyFix(app.wsgi_app, num_proxies=app.config.get('PROXY_COUNT', 1))
# Ensure the V3 upgrade key is specified correctly. If not, simply fail.
# TODO: Remove for V3.1.
if not is_testing and not is_building and app.config.get('SETUP_COMPLETE', False):
v3_upgrade_mode = app.config.get('V3_UPGRADE_MODE')
if v3_upgrade_mode is None:
raise Exception('Configuration flag `V3_UPGRADE_MODE` must be set. Please check the upgrade docs')
if (v3_upgrade_mode != 'background'
and v3_upgrade_mode != 'complete'
and v3_upgrade_mode != 'production-transition'
and v3_upgrade_mode != 'post-oci-rollout'
and v3_upgrade_mode != 'post-oci-roll-back-compat'):
raise Exception('Invalid value for config `V3_UPGRADE_MODE`. Please check the upgrade docs')
# Split the registry model based on config.
# TODO(jschorr): Remove once we are fully on the OCI data model.
# TODO: Remove once we are fully on the OCI data model.
registry_model.setup_split(app.config.get('OCI_NAMESPACE_PROPORTION') or 0,
app.config.get('OCI_NAMESPACE_WHITELIST') or set(),
app.config.get('V22_NAMESPACE_WHITELIST') or set())
app.config.get('V22_NAMESPACE_WHITELIST') or set(),
app.config.get('V3_UPGRADE_MODE'))
# Allow user to define a custom storage preference for the local instance.
_distributed_storage_preference = os.environ.get('QUAY_DISTRIBUTED_STORAGE_PREFERENCE', '').split()
@ -131,6 +154,11 @@ class RequestWithId(Request):
@app.before_request
def _request_start():
if os.getenv('PYDEV_DEBUG', None):
import pydevd
host, port = os.getenv('PYDEV_DEBUG').split(':')
pydevd.settrace(host, port=int(port), stdoutToServer=True, stderrToServer=True, suspend=False)
logger.debug('Starting request: %s (%s)', request.request_id, request.path,
extra={"request_id": request.request_id})
@ -252,6 +280,9 @@ secscan_api = SecurityScannerAPI(app.config, storage, app.config['SERVER_HOSTNAM
uri_creator=get_blob_download_uri_getter(app.test_request_context('/'), url_scheme_and_hostname),
instance_keys=instance_keys)
repo_mirror_api = RepoMirrorAPI(app.config, app.config['SERVER_HOSTNAME'], app.config['HTTPCLIENT'],
instance_keys=instance_keys)
tuf_metadata_api = TUFMetadataAPI(app, app.config)
# Check for a key in config. If none found, generate a new signing key for Docker V2 manifests.
@ -261,8 +292,12 @@ if os.path.exists(_v2_key_path):
else:
docker_v2_signing_key = RSAKey(key=RSA.generate(2048))
# Configure the database.
if app.config.get('DATABASE_SECRET_KEY') is None and app.config.get('SETUP_COMPLETE', False):
raise Exception('Missing DATABASE_SECRET_KEY in config; did you perhaps forget to add it?')
database.configure(app.config)
model.config.app_config = app.config
model.config.store = storage
model.config.register_image_cleanup_callback(secscan_api.cleanup_layers)

View File

@ -1,3 +1,7 @@
# NOTE: Must be before we import or call anything that may be synchronous.
from gevent import monkey
monkey.patch_all()
import os
import logging
import logging.config

View File

@ -1 +0,0 @@
Jake Moshenko <jake.moshenko@coreos.com> (@jakedt)

View File

@ -1,7 +1,7 @@
import logging
from abc import ABCMeta, abstractmethod
from cachetools import lru_cache
from cachetools.func import lru_cache
from six import add_metaclass
from app import app
@ -180,7 +180,7 @@ class ValidatedAuthContext(AuthContext):
return QuayDeferredPermissionUser.for_user(self.authed_user)
if self.token:
return Identity(self.token.code, 'token')
return Identity(self.token.get_code(), 'token')
if self.signed_data:
identity = Identity(None, 'signed_grant')
@ -244,7 +244,7 @@ class ValidatedAuthContext(AuthContext):
})
# Add legacy information.
# TODO(jschorr): Remove this all once the new code is fully deployed.
# TODO: Remove this all once the new code is fully deployed.
if self.token:
dict_data.update({
'kind': 'token',
@ -305,7 +305,7 @@ class SignedAuthContext(AuthContext):
return SignedAuthContext(entity_kind, dict_data, v1_dict_format)
# Legacy handling.
# TODO(jschorr): Remove this all once the new code is fully deployed.
# TODO: Remove this all once the new code is fully deployed.
kind_string = dict_data.get('kind', 'anonymous')
if kind_string == 'oauth':
kind_string = 'oauthtoken'
@ -333,7 +333,7 @@ class SignedAuthContext(AuthContext):
return ValidatedAuthContext(**{self.kind.value: entity_reference})
# Legacy handling.
# TODO(jschorr): Remove this all once the new code is fully deployed.
# TODO: Remove this all once the new code is fully deployed.
kind_string = self.signed_data.get('kind', 'anonymous')
if kind_string == 'oauth':
kind_string = 'oauthtoken'

View File

@ -28,6 +28,7 @@ def validate_basic_auth(auth_header):
logger.debug('Attempt to process basic auth header')
# Parse the basic auth header.
assert isinstance(auth_header, basestring)
credentials, err = _parse_basic_auth_header(auth_header)
if err is not None:
logger.debug('Got invalid basic auth header: %s', auth_header)

View File

@ -117,7 +117,7 @@ class TokenEntityHandler(ContextEntityHandler):
return ACCESS_TOKEN_USERNAME
def get_serialized_entity_reference(self, entity_reference):
return entity_reference.code
return entity_reference.get_code()
def deserialize_entity_reference(self, serialized_entity_reference):
return model.token.load_token_data(serialized_entity_reference)

View File

@ -71,8 +71,8 @@ def validate_credentials(auth_username, auth_password_or_token):
logger.debug('Successfully validated credentials for robot %s', auth_username)
return ValidateResult(AuthKind.credentials, robot=robot), CredentialKind.robot
except model.InvalidRobotException as ire:
logger.warning('Failed to validate credentials for robot %s: %s', auth_username, ire.message)
return ValidateResult(AuthKind.credentials, error_message=ire.message), CredentialKind.robot
logger.warning('Failed to validate credentials for robot %s: %s', auth_username, ire)
return ValidateResult(AuthKind.credentials, error_message=str(ire)), CredentialKind.robot
# Otherwise, treat as a standard user.
(authenticated, err) = authentication.verify_and_link_user(auth_username, auth_password_or_token,

View File

@ -44,6 +44,5 @@ def validate_oauth_token(token):
# We have a valid token
scope_set = scopes_from_scope_string(validated.scope)
logger.debug('Successfully validated oauth access token: %s with scope: %s', token,
scope_set)
logger.debug('Successfully validated oauth access token with scope: %s', scope_set)
return ValidateResult(AuthKind.oauth, oauthtoken=validated)

View File

@ -1,14 +1,19 @@
import pytest
from auth.auth_context_type import SignedAuthContext, ValidatedAuthContext, ContextEntityKind
from data import model
from data import model, database
from test.fixtures import *
def get_oauth_token(_):
return database.OAuthAccessToken.get()
@pytest.mark.parametrize('kind, entity_reference, loader', [
(ContextEntityKind.anonymous, None, None),
(ContextEntityKind.appspecifictoken, 'test', model.appspecifictoken.access_valid_token),
(ContextEntityKind.oauthtoken, 'test', model.oauth.validate_access_token),
(ContextEntityKind.appspecifictoken, '%s%s' % ('a' * 60, 'b' * 60),
model.appspecifictoken.access_valid_token),
(ContextEntityKind.oauthtoken, None, get_oauth_token),
(ContextEntityKind.robot, 'devtable+dtrobot', model.user.lookup_robot),
(ContextEntityKind.user, 'devtable', model.user.get_user),
])

View File

@ -1,3 +1,5 @@
# -*- coding: utf-8 -*-
import pytest
from base64 import b64encode
@ -12,6 +14,8 @@ from test.fixtures import *
def _token(username, password):
assert isinstance(username, basestring)
assert isinstance(password, basestring)
return 'basic ' + b64encode('%s:%s' % (username, password))
@ -31,8 +35,7 @@ def _token(username, password):
(_token('devtable', 'invalid'), ValidateResult(AuthKind.basic,
error_message='Invalid Username or Password')),
(_token('devtable+somebot', 'invalid'), ValidateResult(
AuthKind.basic, error_message='Could not find robot with username: devtable+somebot ' +
'and supplied password.')),
AuthKind.basic, error_message='Could not find robot with username: devtable+somebot')),
(_token('disabled', 'password'), ValidateResult(
AuthKind.basic,
error_message='This user has been disabled. Please contact your administrator.')),])
@ -56,15 +59,16 @@ def test_valid_robot(app):
def test_valid_token(app):
access_token = model.token.create_delegate_token('devtable', 'simple', 'sometoken')
token = _token(ACCESS_TOKEN_USERNAME, access_token.code)
token = _token(ACCESS_TOKEN_USERNAME, access_token.get_code())
result = validate_basic_auth(token)
assert result == ValidateResult(AuthKind.basic, token=access_token)
def test_valid_oauth(app):
user = model.user.get_user('devtable')
oauth_token = list(model.oauth.list_access_tokens_for_user(user))[0]
token = _token(OAUTH_TOKEN_USERNAME, oauth_token.access_token)
app = model.oauth.list_applications_for_org(model.user.get_user_or_org('buynlarge'))[0]
oauth_token, code = model.oauth.create_access_token_for_testing(user, app.client_id, 'repo:read')
token = _token(OAUTH_TOKEN_USERNAME, code)
result = validate_basic_auth(token)
assert result == ValidateResult(AuthKind.basic, oauthtoken=oauth_token)
@ -72,7 +76,8 @@ def test_valid_oauth(app):
def test_valid_app_specific_token(app):
user = model.user.get_user('devtable')
app_specific_token = model.appspecifictoken.create_token(user, 'some token')
token = _token(APP_SPECIFIC_TOKEN_USERNAME, app_specific_token.token_code)
full_token = model.appspecifictoken.get_full_token_string(app_specific_token)
token = _token(APP_SPECIFIC_TOKEN_USERNAME, full_token)
result = validate_basic_auth(token)
assert result == ValidateResult(AuthKind.basic, appspecifictoken=app_specific_token)
@ -82,3 +87,12 @@ def test_invalid_unicode(app):
header = 'basic ' + b64encode(token)
result = validate_basic_auth(header)
assert result == ValidateResult(AuthKind.basic, missing=True)
def test_invalid_unicode_2(app):
token = '“4JPCOLIVMAY32Q3XGVPHC4CBF8SKII5FWNYMASOFDIVSXTC5I5NBU”'
header = 'basic ' + b64encode('devtable+somerobot:%s' % token)
result = validate_basic_auth(header)
assert result == ValidateResult(
AuthKind.basic,
error_message='Could not find robot with username: devtable+somerobot and supplied password.')

View File

@ -1,3 +1,5 @@
# -*- coding: utf-8 -*-
from auth.credentials import validate_credentials, CredentialKind
from auth.credential_consts import (ACCESS_TOKEN_USERNAME, OAUTH_TOKEN_USERNAME,
APP_SPECIFIC_TOKEN_USERNAME)
@ -31,14 +33,15 @@ def test_valid_robot_for_disabled_user(app):
def test_valid_token(app):
access_token = model.token.create_delegate_token('devtable', 'simple', 'sometoken')
result, kind = validate_credentials(ACCESS_TOKEN_USERNAME, access_token.code)
result, kind = validate_credentials(ACCESS_TOKEN_USERNAME, access_token.get_code())
assert kind == CredentialKind.token
assert result == ValidateResult(AuthKind.credentials, token=access_token)
def test_valid_oauth(app):
user = model.user.get_user('devtable')
oauth_token = list(model.oauth.list_access_tokens_for_user(user))[0]
result, kind = validate_credentials(OAUTH_TOKEN_USERNAME, oauth_token.access_token)
app = model.oauth.list_applications_for_org(model.user.get_user_or_org('buynlarge'))[0]
oauth_token, code = model.oauth.create_access_token_for_testing(user, app.client_id, 'repo:read')
result, kind = validate_credentials(OAUTH_TOKEN_USERNAME, code)
assert kind == CredentialKind.oauth_token
assert result == ValidateResult(AuthKind.oauth, oauthtoken=oauth_token)
@ -51,8 +54,8 @@ def test_invalid_user(app):
def test_valid_app_specific_token(app):
user = model.user.get_user('devtable')
app_specific_token = model.appspecifictoken.create_token(user, 'some token')
result, kind = validate_credentials(APP_SPECIFIC_TOKEN_USERNAME, app_specific_token.token_code)
full_token = model.appspecifictoken.get_full_token_string(app_specific_token)
result, kind = validate_credentials(APP_SPECIFIC_TOKEN_USERNAME, full_token)
assert kind == CredentialKind.app_specific_token
assert result == ValidateResult(AuthKind.credentials, appspecifictoken=app_specific_token)
@ -62,8 +65,8 @@ def test_valid_app_specific_token_for_disabled_user(app):
user.save()
app_specific_token = model.appspecifictoken.create_token(user, 'some token')
result, kind = validate_credentials(APP_SPECIFIC_TOKEN_USERNAME, app_specific_token.token_code)
full_token = model.appspecifictoken.get_full_token_string(app_specific_token)
result, kind = validate_credentials(APP_SPECIFIC_TOKEN_USERNAME, full_token)
assert kind == CredentialKind.app_specific_token
err = 'This user has been disabled. Please contact your administrator.'
@ -73,3 +76,72 @@ def test_invalid_app_specific_token(app):
result, kind = validate_credentials(APP_SPECIFIC_TOKEN_USERNAME, 'somecode')
assert kind == CredentialKind.app_specific_token
assert result == ValidateResult(AuthKind.credentials, error_message='Invalid token')
def test_invalid_app_specific_token_code(app):
user = model.user.get_user('devtable')
app_specific_token = model.appspecifictoken.create_token(user, 'some token')
full_token = app_specific_token.token_name + 'something'
result, kind = validate_credentials(APP_SPECIFIC_TOKEN_USERNAME, full_token)
assert kind == CredentialKind.app_specific_token
assert result == ValidateResult(AuthKind.credentials, error_message='Invalid token')
def test_unicode(app):
result, kind = validate_credentials('someusername', 'some₪code')
assert kind == CredentialKind.user
assert not result.auth_valid
assert result == ValidateResult(AuthKind.credentials,
error_message='Invalid Username or Password')
def test_unicode_robot(app):
robot, _ = model.user.create_robot('somerobot', model.user.get_user('devtable'))
result, kind = validate_credentials(robot.username, 'some₪code')
assert kind == CredentialKind.robot
assert not result.auth_valid
msg = 'Could not find robot with username: devtable+somerobot and supplied password.'
assert result == ValidateResult(AuthKind.credentials, error_message=msg)
def test_invalid_user(app):
result, kind = validate_credentials('someinvaliduser', 'password')
assert kind == CredentialKind.user
assert not result.authed_user
assert not result.auth_valid
def test_invalid_user_password(app):
result, kind = validate_credentials('devtable', 'somepassword')
assert kind == CredentialKind.user
assert not result.authed_user
assert not result.auth_valid
def test_invalid_robot(app):
result, kind = validate_credentials('devtable+doesnotexist', 'password')
assert kind == CredentialKind.robot
assert not result.authed_user
assert not result.auth_valid
def test_invalid_robot_token(app):
robot, _ = model.user.create_robot('somerobot', model.user.get_user('devtable'))
result, kind = validate_credentials(robot.username, 'invalidpassword')
assert kind == CredentialKind.robot
assert not result.authed_user
assert not result.auth_valid
def test_invalid_unicode_robot(app):
token = '“4JPCOLIVMAY32Q3XGVPHC4CBF8SKII5FWNYMASOFDIVSXTC5I5NBU”'
result, kind = validate_credentials('devtable+somerobot', token)
assert kind == CredentialKind.robot
assert not result.auth_valid
msg = 'Could not find robot with username: devtable+somerobot'
assert result == ValidateResult(AuthKind.credentials, error_message=msg)
def test_invalid_unicode_robot_2(app):
user = model.user.get_user('devtable')
robot, password = model.user.create_robot('somerobot', user)
token = '“4JPCOLIVMAY32Q3XGVPHC4CBF8SKII5FWNYMASOFDIVSXTC5I5NBU”'
result, kind = validate_credentials('devtable+somerobot', token)
assert kind == CredentialKind.robot
assert not result.auth_valid
msg = 'Could not find robot with username: devtable+somerobot and supplied password.'
assert result == ValidateResult(AuthKind.credentials, error_message=msg)

View File

@ -18,20 +18,23 @@ def test_bearer(header, expected_result, app):
def test_valid_oauth(app):
user = model.user.get_user('devtable')
token = list(model.oauth.list_access_tokens_for_user(user))[0]
result = validate_bearer_auth('bearer ' + token.access_token)
assert result.context.oauthtoken == token
app = model.oauth.list_applications_for_org(model.user.get_user_or_org('buynlarge'))[0]
token_string = '%s%s' % ('a' * 20, 'b' * 20)
oauth_token, _ = model.oauth.create_access_token_for_testing(user, app.client_id, 'repo:read',
access_token=token_string)
result = validate_bearer_auth('bearer ' + token_string)
assert result.context.oauthtoken == oauth_token
assert result.authed_user == user
assert result.auth_valid
def test_disabled_user_oauth(app):
user = model.user.get_user('disabled')
token = model.oauth.create_access_token_for_testing(user, 'deadbeef', 'repo:admin',
access_token='foo')
token_string = '%s%s' % ('a' * 20, 'b' * 20)
oauth_token, _ = model.oauth.create_access_token_for_testing(user, 'deadbeef', 'repo:admin',
access_token=token_string)
result = validate_bearer_auth('bearer ' + token.access_token)
result = validate_bearer_auth('bearer ' + token_string)
assert result.context.oauthtoken is None
assert result.authed_user is None
assert not result.auth_valid
@ -40,10 +43,12 @@ def test_disabled_user_oauth(app):
def test_expired_token(app):
user = model.user.get_user('devtable')
token = model.oauth.create_access_token_for_testing(user, 'deadbeef', 'repo:admin',
access_token='bar', expires_in=-1000)
token_string = '%s%s' % ('a' * 20, 'b' * 20)
oauth_token, _ = model.oauth.create_access_token_for_testing(user, 'deadbeef', 'repo:admin',
access_token=token_string,
expires_in=-1000)
result = validate_bearer_auth('bearer ' + token.access_token)
result = validate_bearer_auth('bearer ' + token_string)
assert result.context.oauthtoken is None
assert result.authed_user is None
assert not result.auth_valid

View File

@ -1,3 +1,5 @@
# -*- coding: utf-8 -*-
import time
import jwt
@ -6,7 +8,7 @@ import pytest
from app import app, instance_keys
from auth.auth_context_type import ValidatedAuthContext
from auth.registry_jwt_auth import identity_from_bearer_token, InvalidJWTException
from data import model # TODO(jzelinskie): remove this after service keys are decoupled
from data import model # TODO: remove this after service keys are decoupled
from data.database import ServiceKeyApprovalType
from initdb import setup_database_for_testing, finished_database_for_testing
from util.morecollections import AttrDict
@ -190,3 +192,12 @@ def test_mixing_keys_e2e(initialized_db):
# Ensure the key no longer works.
with pytest.raises(InvalidJWTException):
_parse_token(deleted_key_token)
@pytest.mark.parametrize('token', [
u'someunicodetoken✡',
u'\xc9\xad\xbd',
])
def test_unicode_token(token):
with pytest.raises(InvalidJWTException):
_parse_token(token)

View File

@ -3,13 +3,14 @@ import pytest
from auth.auth_context import get_authenticated_context
from auth.validateresult import AuthKind, ValidateResult
from data import model
from data.database import AppSpecificAuthToken
from test.fixtures import *
def get_user():
return model.user.get_user('devtable')
def get_app_specific_token():
return model.appspecifictoken.access_valid_token('test')
return AppSpecificAuthToken.get()
def get_robot():
robot, _ = model.user.create_robot('somebot', get_user())

View File

@ -36,6 +36,10 @@ class ValidateResult(object):
result.context = self.context
return result
def __repr__(self):
return 'ValidateResult: %s (missing: %s, error: %s)' % (self.kind, self.missing,
self.error_message)
@property
def authed_user(self):
""" Returns the authenticated user, whether directly, or via an OAuth token. """

76
boot.py
View File

@ -4,7 +4,7 @@ from datetime import datetime, timedelta
from urlparse import urlunparse
from jinja2 import Template
from cachetools import lru_cache
from cachetools.func import lru_cache
import logging
import release
@ -45,40 +45,55 @@ def get_audience():
return urlunparse((scheme, hostname + ':' + port, '', '', '', ''))
def _verify_service_key():
try:
with open(app.config['INSTANCE_SERVICE_KEY_KID_LOCATION']) as f:
quay_key_id = f.read()
try:
get_service_key(quay_key_id, approved_only=False)
assert os.path.exists(app.config['INSTANCE_SERVICE_KEY_LOCATION'])
return quay_key_id
except ServiceKeyDoesNotExist:
logger.exception('Could not find non-expired existing service key %s; creating a new one',
quay_key_id)
return None
# Found a valid service key, so exiting.
except IOError:
logger.exception('Could not load existing service key; creating a new one')
return None
def setup_jwt_proxy():
"""
Creates a service key for quay to use in the jwtproxy and generates the JWT proxy configuration.
"""
if os.path.exists(os.path.join(CONF_DIR, 'jwtproxy_conf.yaml')):
# Proxy is already setup. Make sure the service key is still valid.
try:
with open(app.config['INSTANCE_SERVICE_KEY_KID_LOCATION']) as f:
quay_key_id = f.read()
quay_key_id = _verify_service_key()
if quay_key_id is not None:
return
try:
get_service_key(quay_key_id, approved_only=False)
return
except ServiceKeyDoesNotExist:
logger.exception('Could not find non-expired existing service key %s; creating a new one',
quay_key_id)
# Ensure we have an existing key if in read-only mode.
if app.config.get('REGISTRY_STATE', 'normal') == 'readonly':
quay_key_id = _verify_service_key()
if quay_key_id is None:
raise Exception('No valid service key found for read-only registry.')
else:
# Generate the key for this Quay instance to use.
minutes_until_expiration = app.config.get('INSTANCE_SERVICE_KEY_EXPIRATION', 120)
expiration = datetime.now() + timedelta(minutes=minutes_until_expiration)
quay_key, quay_key_id = generate_key(app.config['INSTANCE_SERVICE_KEY_SERVICE'],
get_audience(), expiration_date=expiration)
# Found a valid service key, so exiting.
except IOError:
logger.exception('Could not load existing service key; creating a new one')
with open(app.config['INSTANCE_SERVICE_KEY_KID_LOCATION'], mode='w') as f:
f.truncate(0)
f.write(quay_key_id)
# Generate the key for this Quay instance to use.
minutes_until_expiration = app.config.get('INSTANCE_SERVICE_KEY_EXPIRATION', 120)
expiration = datetime.now() + timedelta(minutes=minutes_until_expiration)
quay_key, quay_key_id = generate_key(app.config['INSTANCE_SERVICE_KEY_SERVICE'],
get_audience(), expiration_date=expiration)
with open(app.config['INSTANCE_SERVICE_KEY_KID_LOCATION'], mode='w') as f:
f.truncate(0)
f.write(quay_key_id)
with open(app.config['INSTANCE_SERVICE_KEY_LOCATION'], mode='w') as f:
f.truncate(0)
f.write(quay_key.exportKey())
with open(app.config['INSTANCE_SERVICE_KEY_LOCATION'], mode='w') as f:
f.truncate(0)
f.write(quay_key.exportKey())
# Generate the JWT proxy configuration.
audience = get_audience()
@ -93,6 +108,7 @@ def setup_jwt_proxy():
registry=registry,
key_id=quay_key_id,
security_issuer=security_issuer,
service_key_location=app.config['INSTANCE_SERVICE_KEY_LOCATION'],
)
with open(os.path.join(CONF_DIR, 'jwtproxy_conf.yaml'), 'w') as f:
@ -100,9 +116,11 @@ def setup_jwt_proxy():
def main():
if app.config.get('SETUP_COMPLETE', False):
sync_database_with_config(app.config)
setup_jwt_proxy()
if not app.config.get('SETUP_COMPLETE', False):
raise Exception('Your configuration bundle is either not mounted or setup has not been completed')
sync_database_with_config(app.config)
setup_jwt_proxy()
# Record deploy
if release.REGION and release.GIT_HEAD:

View File

@ -1,7 +1,17 @@
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from trollius import get_event_loop, coroutine
def wrap_with_threadpool(obj, worker_threads=1):
"""
Wraps a class in an async executor so that it can be safely used in an event loop like trollius.
"""
async_executor = ThreadPoolExecutor(worker_threads)
return AsyncWrapper(obj, executor=async_executor), async_executor
class AsyncWrapper(object):
""" Wrapper class which will transform a syncronous library to one that can be used with
trollius coroutines.

View File

@ -37,6 +37,12 @@ def run_build_manager():
time.sleep(1000)
return
if app.config.get('REGISTRY_STATE', 'normal') == 'readonly':
logger.debug('Building is disabled while in read-only mode.')
while True:
time.sleep(1000)
return
build_manager_config = app.config.get('BUILD_MANAGER')
if build_manager_config is None:
return

View File

@ -4,11 +4,11 @@ import time
import logging
import json
import trollius
import re
from autobahn.wamp.exception import ApplicationError
from trollius import From, Return
from active_migration import ActiveDataMigration, ERTMigrationFlags
from buildman.server import BuildJobResult
from buildman.component.basecomponent import BaseComponent
from buildman.component.buildparse import extract_current_step
@ -20,7 +20,6 @@ from app import app
from data.database import BUILD_PHASE, UseThenDisconnect
from data.model import InvalidRepositoryBuildException
from data.registry_model import registry_model
from data.registry_model.datatypes import RepositoryReference
from util import slash_join
HEARTBEAT_DELTA = datetime.timedelta(seconds=60)
@ -123,25 +122,22 @@ class BuildComponent(BaseComponent):
# defaults to empty string to avoid requiring a pointer on the builder.
# sub_directory: The location within the build package of the Dockerfile and the build context.
# repository: The repository for which this build is occurring.
# registry: The registry for which this build is occuring (e.g. 'quay.io', 'staging.quay.io').
# registry: The registry for which this build is occuring (e.g. 'quay.io').
# pull_token: The token to use when pulling the cache for building.
# push_token: The token to use to push the built image.
# tag_names: The name(s) of the tag(s) for the newly built image.
# base_image: The image name and credentials to use to conduct the base image pull.
# username: The username for pulling the base image (if any).
# password: The password for pulling the base image (if any).
# TODO: Charlie Tuesday, March 28, 2017 come back and clean up build_subdir.
context, dockerfile_path = self.extract_dockerfile_args(build_config)
build_arguments = {
'build_package': build_job.get_build_package_url(self.user_files),
'context': context,
'dockerfile_path': dockerfile_path,
'repository': repository_name,
'registry': self.registry_hostname,
'pull_token': build_job.repo_build.access_token.code,
'push_token': build_job.repo_build.access_token.code,
'pull_token': build_job.repo_build.access_token.get_code(),
'push_token': build_job.repo_build.access_token.get_code(),
'tag_names': build_config.get('docker_tags', ['latest']),
'base_image': base_image_information,
}
@ -151,11 +147,23 @@ class BuildComponent(BaseComponent):
# url: url used to clone the git repository
# sha: the sha1 identifier of the commit to check out
# private_key: the key used to get read access to the git repository
if build_job.repo_build.trigger.private_key is not None:
# TODO(remove-unenc): Remove legacy field.
private_key = None
if build_job.repo_build.trigger is not None and \
build_job.repo_build.trigger.secure_private_key is not None:
private_key = build_job.repo_build.trigger.secure_private_key.decrypt()
if ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS) and \
private_key is None and \
build_job.repo_build.trigger is not None:
private_key = build_job.repo_build.trigger.private_key
if private_key is not None:
build_arguments['git'] = {
'url': build_config['trigger_metadata'].get('git_url', ''),
'sha': BuildComponent._commit_sha(build_config),
'private_key': build_job.repo_build.trigger.private_key,
'private_key': private_key or '',
}
# If the build args have no buildpack, mark it as a failure before sending
@ -344,7 +352,7 @@ class BuildComponent(BaseComponent):
kwargs = {}
# Note: If we are hitting an older builder that didn't return ANY map data, then the result
# value will be a bool instead of a proper CallResult object (because autobahn sucks).
# value will be a bool instead of a proper CallResult object.
# Therefore: we have a try-except guard here to ensure we don't hit this pitfall.
try:
kwargs = result_value.kwresults
@ -366,10 +374,11 @@ class BuildComponent(BaseComponent):
if repository is not None:
for digest in manifest_digests:
with UseThenDisconnect(app.config):
manifest = registry_model.lookup_manifest_by_digest(repository, digest)
manifest = registry_model.lookup_manifest_by_digest(repository, digest,
require_available=True)
if manifest is None:
continue
registry_model.create_manifest_label(manifest, INTERNAL_LABEL_BUILD_UUID,
build_id, 'internal', 'text/plain')

View File

@ -2,7 +2,7 @@ import json
import logging
from app import app
from cachetools import lru_cache
from cachetools.func import lru_cache
from notifications import spawn_notification
from data import model
from data.registry_model import registry_model
@ -149,8 +149,6 @@ class BuildJobNotifier(object):
with UseThenDisconnect(app.config):
tags = self.build_config.get('docker_tags', ['latest'])
trigger = self.repo_build.trigger
# TODO(bison): This is weird hack. Figure this out.
if trigger is not None and trigger.id is not None:
trigger_kind = trigger.service.name
else:
@ -174,7 +172,7 @@ class BuildJobNotifier(object):
if error_message is not None:
event_data['error_message'] = error_message
# TODO(jzelinskie): remove when more endpoints have been converted to using
# TODO: remove when more endpoints have been converted to using
# interfaces
repo = AttrDict({
'namespace_name': self.repo_build.repository.namespace_user.username,

View File

@ -1,25 +1,27 @@
import logging
from buildman.manager.etcd_canceller import EtcdCanceller
from buildman.manager.orchestrator_canceller import OrchestratorCanceller
from buildman.manager.noop_canceller import NoopCanceller
logger = logging.getLogger(__name__)
CANCELLERS = {'ephemeral': EtcdCanceller}
CANCELLERS = {'ephemeral': OrchestratorCanceller}
class BuildCanceller(object):
""" A class to manage cancelling a build """
def __init__(self, app=None):
build_manager_config = app.config.get('BUILD_MANAGER')
if app is None or build_manager_config is None:
self.build_manager_config = app.config.get('BUILD_MANAGER')
if app is None or self.build_manager_config is None:
self.handler = NoopCanceller()
return
canceller = CANCELLERS.get(build_manager_config[0], NoopCanceller)
self.handler = canceller(build_manager_config[1])
else:
self.handler = None
def try_cancel_build(self, uuid):
""" A method to kill a running build """
if self.handler is None:
canceller = CANCELLERS.get(self.build_manager_config[0], NoopCanceller)
self.handler = canceller(self.build_manager_config[1])
return self.handler.try_cancel_build(uuid)

View File

@ -1,58 +1,49 @@
import logging
import etcd
import uuid
import calendar
import os.path
import json
import time
import trollius
from collections import namedtuple
from datetime import datetime, timedelta
from trollius import From, coroutine, Return, async
from concurrent.futures import ThreadPoolExecutor
from urllib3.exceptions import ReadTimeoutError, ProtocolError
from six import iteritems
from trollius import From, coroutine, Return, async, sleep
from app import metric_queue
from buildman.orchestrator import (orchestrator_from_config, KeyEvent,
OrchestratorError, OrchestratorConnectionError,
ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION)
from buildman.manager.basemanager import BaseManager
from buildman.manager.executor import PopenExecutor, EC2Executor, KubernetesExecutor
from buildman.component.buildcomponent import BuildComponent
from buildman.jobutil.buildjob import BuildJob
from buildman.asyncutil import AsyncWrapper
from buildman.server import BuildJobResult
from util import slash_join
from util.morecollections import AttrDict
logger = logging.getLogger(__name__)
ETCD_MAX_WATCH_TIMEOUT = 30
ETCD_ATOMIC_OP_TIMEOUT = 10000
RETRY_IMMEDIATELY_TIMEOUT = 0
NO_WORKER_AVAILABLE_TIMEOUT = 10
DEFAULT_EPHEMERAL_API_TIMEOUT = 20
DEFAULT_EPHEMERAL_SETUP_TIMEOUT = 500
ETCD_DOWN_SLEEP_TIMEOUT = 5
JOB_PREFIX = 'building/'
LOCK_PREFIX = 'lock/'
REALM_PREFIX = 'realm/'
CANCEL_PREFIX = 'cancel/'
METRIC_PREFIX = 'metric/'
CANCELED_LOCK_PREFIX = slash_join(LOCK_PREFIX, 'job-cancelled')
EXPIRED_LOCK_PREFIX = slash_join(LOCK_PREFIX, 'job-expired')
EPHEMERAL_API_TIMEOUT = 20
EPHEMERAL_SETUP_TIMEOUT = 500
RETRY_IMMEDIATELY_SLEEP_DURATION = 0
TOO_MANY_WORKERS_SLEEP_DURATION = 10
class EtcdAction(object):
""" Enumeration of the various kinds of etcd actions we can observe via a watch. """
GET = 'get'
SET = 'set'
EXPIRE = 'expire'
UPDATE = 'update'
DELETE = 'delete'
CREATE = 'create'
COMPARE_AND_SWAP = 'compareAndSwap'
COMPARE_AND_DELETE = 'compareAndDelete'
BuildInfo = namedtuple('BuildInfo', ['component', 'build_job', 'execution_id', 'executor_name'])
def _create_async_etcd_client(worker_threads=1, **kwargs):
client = etcd.Client(**kwargs)
async_executor = ThreadPoolExecutor(worker_threads)
return AsyncWrapper(client, executor=async_executor), async_executor
class EphemeralBuilderManager(BaseManager):
""" Build manager implementation for the Enterprise Registry. """
@ -64,24 +55,12 @@ class EphemeralBuilderManager(BaseManager):
}
def __init__(self, *args, **kwargs):
self._etcd_client_creator = kwargs.pop('etcd_creator', _create_async_etcd_client)
super(EphemeralBuilderManager, self).__init__(*args, **kwargs)
self._shutting_down = False
self._manager_config = None
self._async_thread_executor = None
self._etcd_client = None
self._etcd_realm_prefix = None
self._etcd_job_prefix = None
self._etcd_lock_prefix = None
self._etcd_metric_prefix = None
self._etcd_cancel_build_prefix = None
self._ephemeral_api_timeout = DEFAULT_EPHEMERAL_API_TIMEOUT
self._ephemeral_setup_timeout = DEFAULT_EPHEMERAL_SETUP_TIMEOUT
self._orchestrator = None
# The registered executors available for running jobs, in order.
self._ordered_executors = []
@ -89,83 +68,14 @@ class EphemeralBuilderManager(BaseManager):
# The registered executors, mapped by their unique name.
self._executor_name_to_executor = {}
# Map of etcd keys being watched to the tasks watching them
self._watch_tasks = {}
# Map from builder component to its associated job.
self._component_to_job = {}
# Map from build UUID to a BuildInfo tuple with information about the build.
self._build_uuid_to_info = {}
def _watch_etcd(self, etcd_key, change_coroutine_callback, start_index=None, recursive=True,
restarter=None):
watch_task_key = (etcd_key, recursive)
def callback_wrapper(changed_key_future):
new_index = start_index
etcd_result = None
if not changed_key_future.cancelled():
try:
etcd_result = changed_key_future.result()
existing_index = getattr(etcd_result, 'etcd_index', None)
new_index = etcd_result.modifiedIndex + 1
logger.debug('Got watch of key: %s%s at #%s with result: %s', etcd_key,
'*' if recursive else '', existing_index, etcd_result)
except ReadTimeoutError:
logger.debug('Read-timeout on etcd watch %s, rescheduling', etcd_key)
except etcd.EtcdEventIndexCleared:
# This happens if etcd2 has moved forward too fast for us to start watching
# at the index we retrieved. We therefore start a new watch at HEAD and
# (if specified) call the restarter method which should conduct a read and
# reset the state of the manager.
logger.debug('Etcd moved forward too quickly. Restarting watch cycle.')
new_index = None
if restarter is not None:
async(restarter())
except (KeyError, etcd.EtcdKeyError):
logger.debug('Etcd key already cleared: %s', etcd_key)
return
except etcd.EtcdConnectionFailed:
# If the connection has failed, then etcd is most likely down, and we need to
# sleep for a bit before checking for it to come up again.
logger.exception('Connecting to etcd failed; sleeping for %s and then trying again',
ETCD_DOWN_SLEEP_TIMEOUT)
time.sleep(ETCD_DOWN_SLEEP_TIMEOUT)
logger.exception('Connecting to etcd failed; slept for %s and now trying again',
ETCD_DOWN_SLEEP_TIMEOUT)
except etcd.EtcdException as eex:
# TODO(jschorr): This is a quick and dirty hack and should be replaced
# with a proper exception check.
if str(eex.message).find('Read timed out') >= 0:
logger.debug('Read-timeout on etcd watch %s, rescheduling', etcd_key)
else:
logger.exception('Exception on etcd watch: %s', etcd_key)
except ProtocolError:
logger.exception('Exception on etcd watch: %s', etcd_key)
if watch_task_key not in self._watch_tasks or self._watch_tasks[watch_task_key].done():
self._watch_etcd(etcd_key, change_coroutine_callback, start_index=new_index,
restarter=restarter)
if etcd_result:
async(change_coroutine_callback(etcd_result))
if not self._shutting_down:
logger.debug('Scheduling watch of key: %s%s at start index %s', etcd_key,
'*' if recursive else '', start_index)
watch_future = self._etcd_client.watch(etcd_key, recursive=recursive, index=start_index,
timeout=ETCD_MAX_WATCH_TIMEOUT)
watch_future.add_done_callback(callback_wrapper)
self._watch_tasks[watch_task_key] = async(watch_future)
def overall_setup_time(self):
return EPHEMERAL_SETUP_TIMEOUT
@coroutine
def _mark_job_incomplete(self, build_job, build_info):
@ -174,101 +84,97 @@ class EphemeralBuilderManager(BaseManager):
execution_id = build_info.execution_id
logger.warning('Build executor failed to successfully boot with execution id %s',
execution_id)
execution_id)
# Take a lock to ensure that only one manager reports the build as incomplete for this
# execution.
got_lock = yield From(self._take_etcd_atomic_lock('job-expired', build_job.build_uuid,
execution_id))
if got_lock:
lock_key = slash_join(self._expired_lock_prefix, build_job.build_uuid, execution_id)
acquired_lock = yield From(self._orchestrator.lock(lock_key))
if acquired_lock:
try:
# Clean up the bookkeeping for the job.
yield From(self._etcd_client.delete(self._etcd_job_key(build_job)))
except (KeyError, etcd.EtcdKeyError):
yield From(self._orchestrator.delete_key(self._job_key(build_job)))
except KeyError:
logger.debug('Could not delete job key %s; might have been removed already',
build_job.build_uuid)
logger.error('[BUILD INTERNAL ERROR] Build ID: %s. Exec name: %s. Exec ID: %s',
build_job.build_uuid, executor_name, execution_id)
build_job.build_uuid, executor_name, execution_id)
yield From(self.job_complete_callback(build_job, BuildJobResult.INCOMPLETE, executor_name,
update_phase=True))
else:
logger.debug('Did not get lock for job-expiration for job %s', build_job.build_uuid)
@coroutine
def _handle_job_change(self, etcd_result):
""" Handler invoked whenever a job expires or is deleted in etcd. """
if etcd_result is None:
def _job_callback(self, key_change):
"""
This is the callback invoked when keys related to jobs are changed.
It ignores all events related to the creation of new jobs.
Deletes or expirations cause checks to ensure they've been properly marked as completed.
:param key_change: the event and value produced by a key changing in the orchestrator
:type key_change: :class:`KeyChange`
"""
if key_change.event in (KeyEvent.CREATE, KeyEvent.SET):
raise Return()
if etcd_result.action in (EtcdAction.CREATE, EtcdAction.SET):
raise Return()
elif etcd_result.action in (EtcdAction.DELETE, EtcdAction.EXPIRE):
elif key_change.event in (KeyEvent.DELETE, KeyEvent.EXPIRE):
# Handle the expiration/deletion.
job_metadata = json.loads(etcd_result._prev_node.value)
job_metadata = json.loads(key_change.value)
build_job = BuildJob(AttrDict(job_metadata['job_queue_item']))
logger.debug('Got "%s" of job %s', etcd_result.action, build_job.build_uuid)
logger.debug('Got "%s" of job %s', key_change.event, build_job.build_uuid)
# Get the build info.
build_info = self._build_uuid_to_info.get(build_job.build_uuid, None)
if build_info is None:
logger.debug('No build info for "%s" job %s (%s); probably already deleted by this manager',
etcd_result.action, build_job.build_uuid, job_metadata)
key_change.event, build_job.build_uuid, job_metadata)
raise Return()
# If the etcd action was not an expiration, then it was already deleted by some manager and
# the execution was therefore already shutdown.
if etcd_result.action != EtcdAction.EXPIRE:
# Build information will no longer be needed; pop it off.
if key_change.event != KeyEvent.EXPIRE:
# If the etcd action was not an expiration, then it was already deleted by some manager and
# the execution was therefore already shutdown. All that's left is to remove the build info.
self._build_uuid_to_info.pop(build_job.build_uuid, None)
raise Return()
logger.debug('Got expiration for job %s with metadata: %s', build_job.build_uuid,
logger.debug('got expiration for job %s with metadata: %s', build_job.build_uuid,
job_metadata)
executor_name = build_info.executor_name
execution_id = build_info.execution_id
# If we have not yet received a heartbeat, then the node failed to boot in some way. We mark
# the job as incomplete here.
if not job_metadata.get('had_heartbeat', False):
# If we have not yet received a heartbeat, then the node failed to boot in some way.
# We mark the job as incomplete here.
yield From(self._mark_job_incomplete(build_job, build_info))
# Finally, we terminate the build execution for the job. We don't do this under a lock as
# terminating a node is an atomic operation; better to make sure it is terminated than not.
logger.info('Terminating expired build executor for job %s with execution id %s',
build_job.build_uuid, execution_id)
build_job.build_uuid, build_info.execution_id)
yield From(self.kill_builder_executor(build_job.build_uuid))
else:
logger.warning('Unexpected action (%s) on job key: %s', etcd_result.action, etcd_result.key)
logger.warning('Unexpected KeyEvent (%s) on job key: %s', key_change.event, key_change.key)
@coroutine
def _handle_realm_change(self, etcd_result):
if etcd_result is None:
raise Return()
if etcd_result.action == EtcdAction.CREATE:
# We must listen on the realm created by ourselves or another worker
realm_spec = json.loads(etcd_result.value)
def _realm_callback(self, key_change):
logger.debug('realm callback for key: %s', key_change.key)
if key_change.event == KeyEvent.CREATE:
# Listen on the realm created by ourselves or another worker.
realm_spec = json.loads(key_change.value)
self._register_realm(realm_spec)
elif etcd_result.action in (EtcdAction.DELETE, EtcdAction.EXPIRE):
# We must stop listening for new connections on the specified realm, if we did not get the
# connection
realm_spec = json.loads(etcd_result._prev_node.value)
elif key_change.event in (KeyEvent.DELETE, KeyEvent.EXPIRE):
# Stop listening for new connections on the realm, if we did not get the connection.
realm_spec = json.loads(key_change.value)
realm_id = realm_spec['realm']
build_job = BuildJob(AttrDict(realm_spec['job_queue_item']))
build_uuid = build_job.build_uuid
logger.debug('Realm key %s for build %s was %s', realm_id, build_uuid, etcd_result.action)
logger.debug('Realm key %s for build %s was %s', realm_id, build_uuid, key_change.event)
build_info = self._build_uuid_to_info.get(build_uuid, None)
if build_info is not None:
# Pop the component off. If we find one, then the build has not connected to this manager,
# so we can safely unregister its component.
# Pop off the component and if we find one, then the build has not connected to this
# manager, so we can safely unregister its component.
component = self._component_to_job.pop(build_info.component, None)
if component is not None:
# We were not the manager which the worker connected to, remove the bookkeeping for it
@ -276,7 +182,7 @@ class EphemeralBuilderManager(BaseManager):
self.unregister_component(build_info.component)
# If the realm has expired, then perform cleanup of the executor.
if etcd_result.action == EtcdAction.EXPIRE:
if key_change.event == KeyEvent.EXPIRE:
execution_id = realm_spec.get('execution_id', None)
executor_name = realm_spec.get('executor_name', 'EC2Executor')
@ -291,7 +197,8 @@ class EphemeralBuilderManager(BaseManager):
yield From(self.terminate_executor(executor_name, execution_id))
else:
logger.warning('Unexpected action (%s) on realm key: %s', etcd_result.action, etcd_result.key)
logger.warning('Unexpected action (%s) on realm key: %s', key_change.event, key_change.key)
def _register_realm(self, realm_spec):
logger.debug('Got call to register realm %s with manager', realm_spec['realm'])
@ -321,26 +228,19 @@ class EphemeralBuilderManager(BaseManager):
@coroutine
def _register_existing_realms(self):
try:
all_realms = yield From(self._etcd_client.read(self._etcd_realm_prefix, recursive=True))
all_realms = yield From(self._orchestrator.get_prefixed_keys(self._realm_prefix))
# Register all existing realms found.
encountered = set()
for realm in all_realms.children:
if not realm.dir:
component = self._register_realm(json.loads(realm.value))
encountered.add(component)
encountered = {self._register_realm(json.loads(realm_data))
for _realm, realm_data in all_realms}
# Remove any components not encountered so we can clean up.
for component, job in list(self._component_to_job.items()):
for component, job in iteritems(self._component_to_job):
if not component in encountered:
self._component_to_job.pop(component, None)
self._build_uuid_to_info.pop(job.build_uuid, None)
except (KeyError, etcd.EtcdKeyError):
# no realms have been registered yet
pass
except etcd.EtcdConnectionFailed:
# Not much to do.
except KeyError:
pass
def _load_executor(self, executor_kind_name, executor_config):
@ -356,6 +256,71 @@ class EphemeralBuilderManager(BaseManager):
self._ordered_executors.append(executor)
self._executor_name_to_executor[executor.name] = executor
def _config_prefix(self, key):
if self._manager_config.get('ORCHESTRATOR') is None:
return key
prefix = self._manager_config.get('ORCHESTRATOR_PREFIX', '')
return slash_join(prefix, key).lstrip('/') + '/'
@property
def _job_prefix(self):
return self._config_prefix(JOB_PREFIX)
@property
def _realm_prefix(self):
return self._config_prefix(REALM_PREFIX)
@property
def _cancel_prefix(self):
return self._config_prefix(CANCEL_PREFIX)
@property
def _metric_prefix(self):
return self._config_prefix(METRIC_PREFIX)
@property
def _expired_lock_prefix(self):
return self._config_prefix(EXPIRED_LOCK_PREFIX)
@property
def _canceled_lock_prefix(self):
return self._config_prefix(CANCELED_LOCK_PREFIX)
def _metric_key(self, realm):
"""
Create a key which is used to track a job in the Orchestrator.
:param realm: realm for the build
:type realm: str
:returns: key used to track jobs
:rtype: str
"""
return slash_join(self._metric_prefix, realm)
def _job_key(self, build_job):
"""
Creates a key which is used to track a job in the Orchestrator.
:param build_job: unique job identifier for a build
:type build_job: str
:returns: key used to track the job
:rtype: str
"""
return slash_join(self._job_prefix, build_job.job_details['build_uuid'])
def _realm_key(self, realm):
"""
Create a key which is used to track an incoming connection on a realm.
:param realm: realm for the build
:type realm: str
:returns: key used to track the connection to the realm
:rtype: str
"""
return slash_join(self._realm_prefix, realm)
def initialize(self, manager_config):
logger.debug('Calling initialize')
self._manager_config = manager_config
@ -369,93 +334,50 @@ class EphemeralBuilderManager(BaseManager):
else:
self._load_executor(manager_config.get('EXECUTOR'), manager_config.get('EXECUTOR_CONFIG'))
etcd_host = self._manager_config.get('ETCD_HOST', '127.0.0.1')
etcd_port = self._manager_config.get('ETCD_PORT', 2379)
etcd_ca_cert = self._manager_config.get('ETCD_CA_CERT', None)
logger.debug('calling orchestrator_from_config')
self._orchestrator = orchestrator_from_config(manager_config)
etcd_auth = self._manager_config.get('ETCD_CERT_AND_KEY', None)
if etcd_auth is not None:
etcd_auth = tuple(etcd_auth) # Convert YAML list to a tuple
etcd_protocol = 'http' if etcd_auth is None else 'https'
logger.debug('Connecting to etcd on %s:%s', etcd_host, etcd_port)
worker_threads = self._manager_config.get('ETCD_WORKER_THREADS', 5)
(self._etcd_client, self._async_thread_executor) = self._etcd_client_creator(
worker_threads,
host=etcd_host,
port=etcd_port,
cert=etcd_auth,
ca_cert=etcd_ca_cert,
protocol=etcd_protocol,
read_timeout=5,
)
self._etcd_job_prefix = self._manager_config.get('ETCD_BUILDER_PREFIX', 'building/')
self._watch_etcd(self._etcd_job_prefix, self._handle_job_change)
self._etcd_realm_prefix = self._manager_config.get('ETCD_REALM_PREFIX', 'realm/')
self._watch_etcd(self._etcd_realm_prefix, self._handle_realm_change,
restarter=self._register_existing_realms)
self._etcd_cancel_build_prefix = self._manager_config.get('ETCD_CANCEL_PREFIX', 'cancel/')
self._watch_etcd(self._etcd_cancel_build_prefix, self._cancel_build)
self._etcd_lock_prefix = self._manager_config.get('ETCD_LOCK_PREFIX', 'lock/')
self._etcd_metric_prefix = self._manager_config.get('ETCD_METRIC_PREFIX', 'metric/')
self._ephemeral_api_timeout = self._manager_config.get('API_TIMEOUT',
DEFAULT_EPHEMERAL_API_TIMEOUT)
self._ephemeral_setup_timeout = self._manager_config.get('SETUP_TIMEOUT',
DEFAULT_EPHEMERAL_SETUP_TIMEOUT)
logger.debug('setting on_key_change callbacks for job, cancel, realm')
self._orchestrator.on_key_change(self._job_prefix, self._job_callback)
self._orchestrator.on_key_change(self._cancel_prefix, self._cancel_callback)
self._orchestrator.on_key_change(self._realm_prefix, self._realm_callback,
restarter=self._register_existing_realms)
# Load components for all realms currently known to the cluster
async(self._register_existing_realms())
def overall_setup_time(self):
return self._ephemeral_setup_timeout
def shutdown(self):
logger.debug('Shutting down worker.')
self._shutting_down = True
for (etcd_key, _), task in self._watch_tasks.items():
if not task.done():
logger.debug('Canceling watch task for %s', etcd_key)
task.cancel()
if self._async_thread_executor is not None:
logger.debug('Shutting down thread pool executor.')
self._async_thread_executor.shutdown()
if self._orchestrator is not None:
self._orchestrator.shutdown()
@coroutine
def schedule(self, build_job):
build_uuid = build_job.job_details['build_uuid']
logger.debug('Calling schedule with job: %s', build_uuid)
# Check if there are worker slots available by checking the number of jobs in etcd
# Check if there are worker slots available by checking the number of jobs in the orchestrator
allowed_worker_count = self._manager_config.get('ALLOWED_WORKER_COUNT', 1)
try:
active_jobs = yield From(self._etcd_client.read(self._etcd_job_prefix, recursive=True))
workers_alive = sum(1 for child in active_jobs.children if not child.dir)
except (KeyError, etcd.EtcdKeyError):
active_jobs = yield From(self._orchestrator.get_prefixed_keys(self._job_prefix))
workers_alive = len(active_jobs)
except KeyError:
workers_alive = 0
except etcd.EtcdConnectionFailed:
logger.exception('Could not read job count from etcd for job due to etcd being down')
raise Return(False, ETCD_DOWN_SLEEP_TIMEOUT)
except etcd.EtcdException:
logger.exception('Exception when reading job count from etcd for job: %s', build_uuid)
raise Return(False, RETRY_IMMEDIATELY_TIMEOUT)
except OrchestratorConnectionError:
logger.exception('Could not read job count from orchestrator for job due to orchestrator being down')
raise Return(False, ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION)
except OrchestratorError:
logger.exception('Exception when reading job count from orchestrator for job: %s', build_uuid)
raise Return(False, RETRY_IMMEDIATELY_SLEEP_DURATION)
logger.debug('Total jobs (scheduling job %s): %s', build_uuid, workers_alive)
if workers_alive >= allowed_worker_count:
logger.info('Too many workers alive, unable to start new worker for build job: %s. %s >= %s',
build_uuid, workers_alive, allowed_worker_count)
raise Return(False, NO_WORKER_AVAILABLE_TIMEOUT)
raise Return(False, TOO_MANY_WORKERS_SLEEP_DURATION)
job_key = self._etcd_job_key(build_job)
job_key = self._job_key(build_job)
# First try to take a lock for this job, meaning we will be responsible for its lifeline
realm = str(uuid.uuid4())
@ -474,20 +396,22 @@ class EphemeralBuilderManager(BaseManager):
lock_payload = json.dumps(payload)
logger.debug('Writing key for job %s with expiration in %s seconds', build_uuid,
self._ephemeral_setup_timeout)
EPHEMERAL_SETUP_TIMEOUT)
try:
yield From(self._etcd_client.write(job_key, lock_payload, prevExist=False,
ttl=self._ephemeral_setup_timeout))
except (KeyError, etcd.EtcdKeyError):
# The job was already taken by someone else, we are probably a retry
logger.warning('Job: %s already exists in etcd, timeout may be misconfigured', build_uuid)
raise Return(False, self._ephemeral_api_timeout)
except etcd.EtcdConnectionFailed:
logger.exception('Exception when writing job %s to etcd; could not connect', build_uuid)
raise Return(False, ETCD_DOWN_SLEEP_TIMEOUT)
except etcd.EtcdException:
logger.exception('Exception when writing job %s to etcd', build_uuid)
raise Return(False, RETRY_IMMEDIATELY_TIMEOUT)
yield From(self._orchestrator.set_key(job_key, lock_payload, overwrite=False,
expiration=EPHEMERAL_SETUP_TIMEOUT))
except KeyError:
logger.warning('Job: %s already exists in orchestrator, timeout may be misconfigured',
build_uuid)
raise Return(False, EPHEMERAL_API_TIMEOUT)
except OrchestratorConnectionError:
logger.exception('Exception when writing job %s to orchestrator; could not connect',
build_uuid)
raise Return(False, ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION)
except OrchestratorError:
logger.exception('Exception when writing job %s to orchestrator', build_uuid)
raise Return(False, RETRY_IMMEDIATELY_SLEEP_DURATION)
# Got a lock, now lets boot the job via one of the registered executors.
started_with_executor = None
@ -548,8 +472,8 @@ class EphemeralBuilderManager(BaseManager):
logger.error('Could not start ephemeral worker for build %s', build_uuid)
# Delete the associated build job record.
yield From(self.delete_etcd_key(job_key))
raise Return(False, self._ephemeral_api_timeout)
yield From(self._orchestrator.delete_key(job_key))
raise Return(False, EPHEMERAL_API_TIMEOUT)
# Job was started!
logger.debug('Started execution with ID %s for job: %s with executor: %s',
@ -560,14 +484,16 @@ class EphemeralBuilderManager(BaseManager):
'executor_name': started_with_executor.name,
'start_time': time.time(),
})
try:
yield From(self._etcd_client.write(self._etcd_metric_key(realm), metric_spec, prevExist=False,
ttl=machine_max_expiration + 10))
except (KeyError, etcd.EtcdKeyError):
logger.error('Realm %s already exists in etcd for job %s ' +
yield From(self._orchestrator.set_key(self._metric_key(realm), metric_spec, overwrite=False,
expiration=machine_max_expiration + 10))
except KeyError:
logger.error('Realm %s already exists in orchestrator for job %s ' +
'UUID collision or something is very very wrong.', realm, build_uuid)
except etcd.EtcdException:
logger.exception('Exception when writing realm %s to etcd for job %s', realm, build_uuid)
except OrchestratorError:
logger.exception('Exception when writing realm %s to orchestrator for job %s',
realm, build_uuid)
# Store the realm spec which will allow any manager to accept this builder when it connects
realm_spec = json.dumps({
@ -582,33 +508,31 @@ class EphemeralBuilderManager(BaseManager):
setup_time = started_with_executor.setup_time or self.overall_setup_time()
logger.debug('Writing job key for job %s using executor %s with ID %s and ttl %s', build_uuid,
started_with_executor.name, execution_id, setup_time)
yield From(self._etcd_client.write(self._etcd_realm_key(realm), realm_spec, prevExist=False,
ttl=setup_time))
except (KeyError, etcd.EtcdKeyError):
logger.error('Realm %s already exists in etcd for job %s ' +
'UUID collision or something is very very wrong.', realm, build_uuid)
raise Return(False, setup_time)
except etcd.EtcdConnectionFailed:
logger.exception('Exception when writing realm %s to etcd for job %s', realm, build_uuid)
raise Return(False, ETCD_DOWN_SLEEP_TIMEOUT)
except etcd.EtcdException:
logger.exception('Exception when writing realm %s to etcd for job %s', realm, build_uuid)
yield From(self._orchestrator.set_key(self._realm_key(realm), realm_spec,
expiration=setup_time))
except OrchestratorConnectionError:
logger.exception('Exception when writing realm %s to orchestrator for job %s',
realm, build_uuid)
raise Return(False, ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION)
except OrchestratorError:
logger.exception('Exception when writing realm %s to orchestrator for job %s',
realm, build_uuid)
raise Return(False, setup_time)
logger.debug('Builder spawn complete for job %s using executor %s with ID %s ', build_uuid,
started_with_executor.name, execution_id)
logger.debug('Builder spawn complete for job %s using executor %s with ID %s ',
build_uuid, started_with_executor.name, execution_id)
raise Return(True, None)
@coroutine
def build_component_ready(self, build_component):
logger.debug('Got component ready for component with realm %s', build_component.builder_realm)
# Pop off the job for the component. We do so before we send out the etcd watch below,
# as it will also remove this mapping.
# Pop off the job for the component.
# We do so before we send out the watch below, as it will also remove this mapping.
job = self._component_to_job.pop(build_component, None)
if job is None:
# This will occur once the build finishes, so no need to worry about it. We log in case it
# happens outside of the expected flow.
# This will occur once the build finishes, so no need to worry about it.
# We log in case it happens outside of the expected flow.
logger.debug('Could not find job for the build component on realm %s; component is ready',
build_component.builder_realm)
raise Return()
@ -621,10 +545,10 @@ class EphemeralBuilderManager(BaseManager):
yield From(self._write_duration_metric(metric_queue.builder_time_to_build,
build_component.builder_realm))
# Clean up the bookkeeping for allowing any manager to take the job.
try:
# Clean up the bookkeeping for allowing any manager to take the job.
yield From(self._etcd_client.delete(self._etcd_realm_key(build_component.builder_realm)))
except (KeyError, etcd.EtcdKeyError):
yield From(self._orchestrator.delete_key(self._realm_key(build_component.builder_realm)))
except KeyError:
logger.warning('Could not delete realm key %s', build_component.builder_realm)
def build_component_disposed(self, build_component, timed_out):
@ -647,26 +571,26 @@ class EphemeralBuilderManager(BaseManager):
# Kill the ephemeral builder.
yield From(self.kill_builder_executor(build_job.build_uuid))
# Delete the build job from etcd.
job_key = self._etcd_job_key(build_job)
# Delete the build job from the orchestrator.
try:
yield From(self._etcd_client.delete(job_key))
except etcd.EtcdConnectionFailed:
logger.exception('Could not remove job key as etcd is not available')
yield From(trollius.sleep(ETCD_DOWN_SLEEP_TIMEOUT))
raise Return()
except (KeyError, etcd.EtcdKeyError):
job_key = self._job_key(build_job)
yield From(self._orchestrator.delete_key(job_key))
except KeyError:
logger.debug('Builder is asking for job to be removed, but work already completed')
except OrchestratorConnectionError:
logger.exception('Could not remove job key as orchestrator is not available')
yield From(sleep(ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION))
raise Return()
# Delete the metric from etcd
metric_key = self._etcd_metric_key(build_component.builder_realm)
# Delete the metric from the orchestrator.
try:
yield From(self._etcd_client.delete(metric_key))
except (KeyError, etcd.EtcdKeyError):
metric_key = self._metric_key(build_component.builder_realm)
yield From(self._orchestrator.delete_key(metric_key))
except KeyError:
logger.debug('Builder is asking for metric to be removed, but key not found')
except etcd.EtcdConnectionFailed:
logger.exception('Could not remove metric key as etcd is not available')
yield From(trollius.sleep(ETCD_DOWN_SLEEP_TIMEOUT))
except OrchestratorConnectionError:
logger.exception('Could not remove metric key as orchestrator is not available')
yield From(sleep(ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION))
raise Return()
logger.debug('job_completed for job %s with status: %s', build_job.build_uuid, job_status)
@ -698,19 +622,24 @@ class EphemeralBuilderManager(BaseManager):
@coroutine
def job_heartbeat(self, build_job):
# Extend the queue item.
"""
:param build_job: the identifier for the build
:type build_job: str
"""
self.job_heartbeat_callback(build_job)
self._extend_job_in_orchestrator(build_job)
# Extend the deadline in etcd.
job_key = self._etcd_job_key(build_job)
@coroutine
def _extend_job_in_orchestrator(self, build_job):
try:
build_job_metadata_response = yield From(self._etcd_client.read(job_key))
except (KeyError, etcd.EtcdKeyError):
logger.info('Job %s no longer exists in etcd', build_job.build_uuid)
job_data = yield From(self._orchestrator.get_key(self._job_key(build_job)))
except KeyError:
logger.info('Job %s no longer exists in the orchestrator', build_job.build_uuid)
raise Return()
except OrchestratorConnectionError:
logger.exception('failed to connect when attempted to extend job')
build_job_metadata = json.loads(build_job_metadata_response.value)
build_job_metadata = json.loads(job_data)
max_expiration = datetime.utcfromtimestamp(build_job_metadata['max_expiration'])
max_expiration_remaining = max_expiration - datetime.utcnow()
@ -723,41 +652,22 @@ class EphemeralBuilderManager(BaseManager):
'had_heartbeat': True,
}
# Note: A TTL of < 0 in etcd results in the key *never being expired*. We use a max here
# to ensure that if the TTL is < 0, the key will expire immediately.
etcd_ttl = max(ttl, 0)
try:
yield From(self._etcd_client.write(job_key, json.dumps(payload), ttl=etcd_ttl))
except etcd.EtcdConnectionFailed:
logger.exception('Could not update heartbeat for job as etcd is not available')
yield From(trollius.sleep(ETCD_DOWN_SLEEP_TIMEOUT))
@coroutine
def _take_etcd_atomic_lock(self, path, *args):
""" Takes a lock for atomic operations via etcd over the given path. Returns true if the lock
was granted and false otherwise.
"""
pieces = [self._etcd_lock_prefix, path]
pieces.extend(args)
lock_key = os.path.join(*pieces)
try:
yield From(self._etcd_client.write(lock_key, {}, prevExist=False, ttl=ETCD_ATOMIC_OP_TIMEOUT))
raise Return(True)
except etcd.EtcdConnectionFailed:
logger.exception('Could not get etcd atomic lock as etcd is down')
raise Return(False)
except (KeyError, etcd.EtcdKeyError):
raise Return(False)
yield From(self._orchestrator.set_key(self._job_key(build_job), json.dumps(payload),
expiration=ttl))
except OrchestratorConnectionError:
logger.exception('Could not update heartbeat for job as the orchestrator is not available')
yield From(sleep(ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION))
@coroutine
def _write_duration_metric(self, metric, realm):
""" Returns true if the metric was written and and false otherwise.
"""
:returns: True if the metric was written, otherwise False
:rtype: bool
"""
try:
metric_data = yield From(self._etcd_client.read(self._etcd_metric_key(realm)))
parsed_metric_data = json.loads(metric_data.value)
metric_data = yield From(self._orchestrator.get_key(self._metric_key(realm)))
parsed_metric_data = json.loads(metric_data)
start_time = parsed_metric_data['start_time']
metric.Observe(time.time() - start_time,
labelvalues=[parsed_metric_data.get('executor_name',
@ -765,58 +675,36 @@ class EphemeralBuilderManager(BaseManager):
except Exception:
logger.exception("Could not write metric for realm %s", realm)
def _etcd_metric_key(self, realm):
""" Create a key which is used to track a job in etcd.
"""
return os.path.join(self._etcd_metric_prefix, realm)
def _etcd_job_key(self, build_job):
""" Create a key which is used to track a job in etcd.
"""
return os.path.join(self._etcd_job_prefix, build_job.job_details['build_uuid'])
def _etcd_realm_key(self, realm):
""" Create a key which is used to track an incoming connection on a realm.
"""
return os.path.join(self._etcd_realm_prefix, realm)
def num_workers(self):
""" Return the number of workers we're managing locally.
"""
The number of workers we're managing locally.
:returns: the number of the workers locally managed
:rtype: int
"""
return len(self._component_to_job)
@coroutine
def _cancel_build(self, etcd_result):
""" Listens for etcd event and then cancels the build
"""
if etcd_result is None:
raise Return(False)
def _cancel_callback(self, key_change):
if key_change.event not in (KeyEvent.CREATE, KeyEvent.SET):
raise Return()
if etcd_result.action not in (EtcdAction.CREATE, EtcdAction.SET):
raise Return(False)
build_uuid = etcd_result.value
build_uuid = key_change.value
build_info = self._build_uuid_to_info.get(build_uuid, None)
if build_info is None:
logger.debug('No build info for "%s" job %s', etcd_result.action, build_uuid)
logger.debug('No build info for "%s" job %s', key_change.event, build_uuid)
raise Return(False)
got_lock = yield From(self._take_etcd_atomic_lock('job-cancelled', build_uuid, build_info.execution_id))
if got_lock:
lock_key = slash_join(self._canceled_lock_prefix,
build_uuid, build_info.execution_id)
lock_acquired = yield From(self._orchestrator.lock(lock_key))
if lock_acquired:
builder_realm = build_info.component.builder_realm
yield From(self.kill_builder_executor(build_uuid))
yield From(self.delete_etcd_key(self._etcd_realm_key(build_info.component.builder_realm)))
yield From(self.delete_etcd_key(self._etcd_metric_key(build_info.component.builder_realm)))
yield From(self.delete_etcd_key(os.path.join(self._etcd_job_prefix, build_uuid)))
yield From(self._orchestrator.delete_key(self._realm_key(builder_realm)))
yield From(self._orchestrator.delete_key(self._metric_key(builder_realm)))
yield From(self._orchestrator.delete_key(slash_join(self._job_prefix, build_uuid)))
# This is outside the lock so we can un-register the component wherever it is registered to.
yield From(build_info.component.cancel_build())
@coroutine
def delete_etcd_key(self, etcd_key):
try:
yield From(self._etcd_client.delete(etcd_key))
except (KeyError, etcd.EtcdKeyError):
logger.warning('Could not delete etcd key %s', etcd_key)
except etcd.EtcdConnectionFailed:
logger.exception('Could not delete etcd key as etcd is down')
yield From(trollius.sleep(ETCD_DOWN_SLEEP_TIMEOUT))

View File

@ -1,26 +1,31 @@
import datetime
import hashlib
import logging
import os
import uuid
import threading
import boto.ec2
import requests
import cachetools
import trollius
import datetime
import release
import socket
import hashlib
import subprocess
import threading
import uuid
from jinja2 import FileSystemLoader, Environment
from trollius import coroutine, From, Return, get_event_loop
from functools import partial
from buildman.asyncutil import AsyncWrapper
import boto.ec2
import cachetools.func
import requests
import trollius
from container_cloud_config import CloudConfigContext
from jinja2 import FileSystemLoader, Environment
from trollius import coroutine, From, Return, get_event_loop
import release
from buildman.asyncutil import AsyncWrapper
from app import metric_queue, app
from util.metrics.metricqueue import duration_collector_async
from _init import ROOT_DIR
logger = logging.getLogger(__name__)
@ -146,7 +151,7 @@ class EC2Executor(BuilderExecutor):
))
@classmethod
@cachetools.ttl_cache(ttl=ONE_HOUR)
@cachetools.func.ttl_cache(ttl=ONE_HOUR)
def _get_coreos_ami(cls, ec2_region, coreos_channel):
""" Retrieve the CoreOS AMI id from the canonical listing.
"""
@ -268,7 +273,6 @@ class PopenExecutor(BuilderExecutor):
def start_builder(self, realm, token, build_uuid):
# Now start a machine for this job, adding the machine id to the etcd information
logger.debug('Forking process for build')
import subprocess
ws_host = os.environ.get("BUILDMAN_WS_HOST", "localhost")
ws_port = os.environ.get("BUILDMAN_WS_PORT", "8787")
@ -283,7 +287,8 @@ class PopenExecutor(BuilderExecutor):
}
logpipe = LogPipe(logging.INFO)
spawned = subprocess.Popen(os.environ.get("BUILDER_BINARY_LOCATION", '/usr/local/bin/quay-builder'),
spawned = subprocess.Popen(os.environ.get('BUILDER_BINARY_LOCATION',
'/usr/local/bin/quay-builder'),
stdout=logpipe,
stderr=logpipe,
env=builder_env)
@ -323,13 +328,19 @@ class KubernetesExecutor(BuilderExecutor):
tls_cert = self.executor_config.get('K8S_API_TLS_CERT')
tls_key = self.executor_config.get('K8S_API_TLS_KEY')
tls_ca = self.executor_config.get('K8S_API_TLS_CA')
service_account_token = self.executor_config.get('SERVICE_ACCOUNT_TOKEN')
if 'timeout' not in request_options:
request_options['timeout'] = self.executor_config.get("K8S_API_TIMEOUT", 20)
if tls_cert and tls_key:
if service_account_token:
scheme = 'https'
request_options['headers'] = {'Authorization': 'Bearer ' + service_account_token}
logger.debug('Using service account token for Kubernetes authentication')
elif tls_cert and tls_key:
scheme = 'https'
request_options['cert'] = (tls_cert, tls_key)
logger.debug('Using tls certificate and key for Kubernetes authentication')
if tls_ca:
request_options['verify'] = tls_ca
else:
@ -350,22 +361,75 @@ class KubernetesExecutor(BuilderExecutor):
def _job_path(self, build_uuid):
return '%s/%s' % (self._jobs_path(), build_uuid)
def _job_resource(self, build_uuid, user_data, coreos_channel='stable'):
vm_memory_limit = self.executor_config.get('VM_MEMORY_LIMIT', '4G')
vm_volume_size = self.executor_config.get('VOLUME_SIZE', '32G')
def _kubernetes_distribution(self):
return self.executor_config.get('KUBERNETES_DISTRIBUTION', 'basic').lower()
def _is_basic_kubernetes_distribution(self):
return self._kubernetes_distribution() == 'basic'
def _is_openshift_kubernetes_distribution(self):
return self._kubernetes_distribution() == 'openshift'
def _build_job_container_resources(self):
# Minimum acceptable free resources for this container to "fit" in a quota
# These may be lower than the aboslute limits if the cluster is knowingly
# These may be lower than the absolute limits if the cluster is knowingly
# oversubscribed by some amount.
container_requests = {
'memory' : self.executor_config.get('CONTAINER_MEMORY_REQUEST', '3968Mi'),
}
container_limits = {
'memory' : self.executor_config.get('CONTAINER_MEMORY_LIMITS', '5120Mi'),
'cpu' : self.executor_config.get('CONTAINER_CPU_LIMITS', '1000m'),
}
resources = {
'requests': container_requests,
}
if self._is_openshift_kubernetes_distribution():
resources['requests']['cpu'] = self.executor_config.get('CONTAINER_CPU_REQUEST', '500m')
resources['limits'] = container_limits
return resources
def _build_job_containers(self, user_data):
vm_memory_limit = self.executor_config.get('VM_MEMORY_LIMIT', '4G')
vm_volume_size = self.executor_config.get('VOLUME_SIZE', '32G')
container = {
'name': 'builder',
'imagePullPolicy': 'IfNotPresent',
'image': self.image,
'securityContext': {'privileged': True},
'env': [
{'name': 'USERDATA', 'value': user_data},
{'name': 'VM_MEMORY', 'value': vm_memory_limit},
{'name': 'VM_VOLUME_SIZE', 'value': vm_volume_size},
],
'resources': self._build_job_container_resources(),
}
if self._is_basic_kubernetes_distribution():
container['volumeMounts'] = [{'name': 'secrets-mask','mountPath': '/var/run/secrets/kubernetes.io/serviceaccount'}]
return container
def _job_resource(self, build_uuid, user_data, coreos_channel='stable'):
image_pull_secret_name = self.executor_config.get('IMAGE_PULL_SECRET_NAME', 'builder')
service_account = self.executor_config.get('SERVICE_ACCOUNT_NAME', 'quay-builder-sa')
node_selector_label_key = self.executor_config.get('NODE_SELECTOR_LABEL_KEY', 'beta.kubernetes.io/instance-type')
node_selector_label_value = self.executor_config.get('NODE_SELECTOR_LABEL_VALUE', '')
node_selector = {
node_selector_label_key : node_selector_label_value
}
release_sha = release.GIT_HEAD or 'none'
if ' ' in release_sha:
release_sha = 'HEAD'
return {
job_resource = {
'apiVersion': 'batch/v1',
'kind': 'Job',
'metadata': {
@ -390,52 +454,42 @@ class KubernetesExecutor(BuilderExecutor):
},
},
'spec': {
# This volume is a hack to mask the token for the namespace's
# default service account, which is placed in a file mounted under
# `/var/run/secrets/kubernetes.io/serviceaccount` in all pods.
# There's currently no other way to just disable the service
# account at either the pod or namespace level.
#
# https://github.com/kubernetes/kubernetes/issues/16779
#
'volumes': [
{
'name': 'secrets-mask',
'emptyDir': {
'medium': 'Memory',
},
},
],
'containers': [
{
'name': 'builder',
'imagePullPolicy': 'IfNotPresent',
'image': self.image,
'securityContext': {'privileged': True},
'env': [
{'name': 'USERDATA', 'value': user_data},
{'name': 'VM_MEMORY', 'value': vm_memory_limit},
{'name': 'VM_VOLUME_SIZE', 'value': vm_volume_size},
],
'resources': {
'requests': container_requests,
},
'volumeMounts': [
{
'name': 'secrets-mask',
'mountPath': '/var/run/secrets/kubernetes.io/serviceaccount',
},
],
},
],
'imagePullSecrets': [{'name': 'builder'}],
'imagePullSecrets': [{ 'name': image_pull_secret_name }],
'restartPolicy': 'Never',
'dnsPolicy': 'Default',
'containers': [self._build_job_containers(user_data)],
},
},
},
}
if self._is_openshift_kubernetes_distribution():
# Setting `automountServiceAccountToken` to false will prevent automounting API credentials for a service account.
job_resource['spec']['template']['spec']['automountServiceAccountToken'] = False
# Use dedicated service account that has no authorization to any resources.
job_resource['spec']['template']['spec']['serviceAccount'] = service_account
# Setting `enableServiceLinks` to false prevents information about other services from being injected into pod's
# environment variables. Pod has no visibility into other services on the cluster.
job_resource['spec']['template']['spec']['enableServiceLinks'] = False
if node_selector_label_value.strip() != '':
job_resource['spec']['template']['spec']['nodeSelector'] = node_selector
if self._is_basic_kubernetes_distribution():
# This volume is a hack to mask the token for the namespace's
# default service account, which is placed in a file mounted under
# `/var/run/secrets/kubernetes.io/serviceaccount` in all pods.
# There's currently no other way to just disable the service
# account at either the pod or namespace level.
#
# https://github.com/kubernetes/kubernetes/issues/16779
#
job_resource['spec']['template']['spec']['volumes'] = [{'name': 'secrets-mask','emptyDir': {'medium': 'Memory'}}]
return job_resource
@coroutine
@duration_collector_async(metric_queue.builder_time_to_start, ['k8s'])
def start_builder(self, realm, token, build_uuid):
@ -443,6 +497,7 @@ class KubernetesExecutor(BuilderExecutor):
channel = self.executor_config.get('COREOS_CHANNEL', 'stable')
user_data = self.generate_cloud_config(realm, token, build_uuid, channel, self.manager_hostname)
resource = self._job_resource(build_uuid, user_data, channel)
logger.debug('Using Kubernetes Distribution: %s', self._kubernetes_distribution())
logger.debug('Generated kubernetes resource:\n%s', resource)
# schedule

View File

@ -0,0 +1,26 @@
import logging
from buildman.orchestrator import orchestrator_from_config, OrchestratorError
from util import slash_join
logger = logging.getLogger(__name__)
CANCEL_PREFIX = 'cancel/'
class OrchestratorCanceller(object):
""" An asynchronous way to cancel a build with any Orchestrator. """
def __init__(self, config):
self._orchestrator = orchestrator_from_config(config, canceller_only=True)
def try_cancel_build(self, build_uuid):
logger.info('Cancelling build %s', build_uuid)
cancel_key = slash_join(CANCEL_PREFIX, build_uuid)
try:
self._orchestrator.set_key_sync(cancel_key, build_uuid, expiration=60)
return True
except OrchestratorError:
logger.exception('Failed to write cancel action to redis with uuid %s', build_uuid)
return False

753
buildman/orchestrator.py Normal file
View File

@ -0,0 +1,753 @@
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import datetime
import json
import logging
import re
import time
from enum import IntEnum, unique
from six import add_metaclass, iteritems
from trollius import async, coroutine, From, Return
from urllib3.exceptions import ReadTimeoutError, ProtocolError
import etcd
import redis
from buildman.asyncutil import wrap_with_threadpool
from util import slash_join
from util.expiresdict import ExpiresDict
logger = logging.getLogger(__name__)
ONE_DAY = 60 * 60 * 24
ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION = 5
DEFAULT_LOCK_EXPIRATION = 10000
ETCD_READ_TIMEOUT = 5
ETCD_MAX_WATCH_TIMEOUT = 30
REDIS_EXPIRING_SUFFIX = '/expiring'
REDIS_DEFAULT_PUBSUB_KEY = 'orchestrator_events'
REDIS_EVENT_KIND_MESSAGE = 'message'
REDIS_EVENT_KIND_PMESSAGE = 'pmessage'
REDIS_NONEXPIRING_KEY = -1
# This constant defines the Redis configuration flags used to watch [K]eyspace and e[x]pired
# events on keys. For more info, see https://redis.io/topics/notifications#configuration
REDIS_KEYSPACE_EVENT_CONFIG_VALUE = 'Kx'
REDIS_KEYSPACE_EVENT_CONFIG_KEY = 'notify-keyspace-events'
REDIS_KEYSPACE_KEY_PATTERN = '__keyspace@%s__:%s'
REDIS_EXPIRED_KEYSPACE_PATTERN = slash_join(REDIS_KEYSPACE_KEY_PATTERN, REDIS_EXPIRING_SUFFIX)
REDIS_EXPIRED_KEYSPACE_REGEX = re.compile(REDIS_EXPIRED_KEYSPACE_PATTERN % (r'(\S+)', r'(\S+)'))
def orchestrator_from_config(manager_config, canceller_only=False):
"""
Allocates a new Orchestrator from the 'ORCHESTRATOR' block from provided manager config.
Checks for legacy configuration prefixed with 'ETCD_' when the 'ORCHESTRATOR' is not present.
:param manager_config: the configuration for the orchestrator
:type manager_config: dict
:rtype: :class: Orchestrator
"""
# Legacy codepath only knows how to configure etcd.
if manager_config.get('ORCHESTRATOR') is None:
manager_config['ORCHESTRATOR'] = {key: value
for (key, value) in iteritems(manager_config)
if key.startswith('ETCD_') and not key.endswith('_PREFIX')}
# Sanity check that legacy prefixes are no longer being used.
for key in manager_config['ORCHESTRATOR'].keys():
words = key.split('_')
if len(words) > 1 and words[-1].lower() == 'prefix':
raise AssertionError('legacy prefix used, use ORCHESTRATOR_PREFIX instead')
def _dict_key_prefix(d):
"""
:param d: the dict that has keys prefixed with underscore
:type d: {str: any}
:rtype: str
"""
return d.keys()[0].split('_', 1)[0].lower()
orchestrator_name = _dict_key_prefix(manager_config['ORCHESTRATOR'])
def format_key(key):
return key.lower().split('_', 1)[1]
orchestrator_kwargs = {format_key(key): value
for (key, value) in iteritems(manager_config['ORCHESTRATOR'])}
if manager_config.get('ORCHESTRATOR_PREFIX') is not None:
orchestrator_kwargs['orchestrator_prefix'] = manager_config['ORCHESTRATOR_PREFIX']
orchestrator_kwargs['canceller_only'] = canceller_only
logger.debug('attempting to create orchestrator %s with kwargs %s',
orchestrator_name, orchestrator_kwargs)
return orchestrator_by_name(orchestrator_name, **orchestrator_kwargs)
def orchestrator_by_name(name, **kwargs):
_ORCHESTRATORS = {
'etcd': Etcd2Orchestrator,
'mem': MemoryOrchestrator,
'redis': RedisOrchestrator,
}
return _ORCHESTRATORS.get(name, MemoryOrchestrator)(**kwargs)
class OrchestratorError(Exception):
pass
# TODO: replace with ConnectionError when this codebase is Python 3.
class OrchestratorConnectionError(OrchestratorError):
pass
@unique
class KeyEvent(IntEnum):
CREATE = 1
SET = 2
DELETE = 3
EXPIRE = 4
class KeyChange(namedtuple('KeyChange', ['event', 'key', 'value'])):
pass
@add_metaclass(ABCMeta)
class Orchestrator(object):
"""
Orchestrator is the interface that is used to synchronize the build states
across build managers.
This interface assumes that storage is being done by a key-value store
that supports watching for events on keys.
Missing keys should return KeyError; otherwise, errors should raise an
OrchestratorError.
:param key_prefix: the prefix of keys being watched
:type key_prefix: str
"""
@abstractmethod
def on_key_change(self, key, callback, restarter=None):
"""
The callback parameter takes in a KeyChange object as a parameter.
"""
pass
@abstractmethod
def get_prefixed_keys(self, prefix):
"""
:returns: a dict of key value pairs beginning with prefix
:rtype: {str: str}
"""
pass
@abstractmethod
def get_key(self, key):
"""
:returns: the value stored at the provided key
:rtype: str
"""
pass
@abstractmethod
def set_key(self, key, value, overwrite=False, expiration=None):
"""
:param key: the identifier for the value
:type key: str
:param value: the value being stored
:type value: str
:param overwrite: whether or not a KeyError is thrown if the key already exists
:type overwrite: bool
:param expiration: the duration in seconds that a key should be available
:type expiration: int
"""
pass
@abstractmethod
def set_key_sync(self, key, value, overwrite=False, expiration=None):
"""
set_key, but without trollius coroutines.
"""
pass
@abstractmethod
def delete_key(self, key):
"""
Deletes a key that has been set in the orchestrator.
:param key: the identifier for the key
:type key: str
"""
pass
@abstractmethod
def lock(self, key, expiration=DEFAULT_LOCK_EXPIRATION):
"""
Takes a lock for synchronizing exclusive operations cluster-wide.
:param key: the identifier for the lock
:type key: str
:param expiration: the duration until the lock expires
:type expiration: :class:`datetime.timedelta` or int (seconds)
:returns: whether or not the lock was acquired
:rtype: bool
"""
pass
@abstractmethod
def shutdown():
"""
This function should shutdown any final resources allocated by the Orchestrator.
"""
pass
def _sleep_orchestrator():
"""
This function blocks the trollius event loop by sleeping in order to backoff if a failure
such as a ConnectionError has occurred.
"""
logger.exception('Connecting to etcd failed; sleeping for %s and then trying again',
ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION)
time.sleep(ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION)
logger.exception('Connecting to etcd failed; slept for %s and now trying again',
ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION)
class EtcdAction(object):
""" Enumeration of the various kinds of etcd actions we can observe via a watch. """
GET = 'get'
SET = 'set'
EXPIRE = 'expire'
UPDATE = 'update'
DELETE = 'delete'
CREATE = 'create'
COMPARE_AND_SWAP = 'compareAndSwap'
COMPARE_AND_DELETE = 'compareAndDelete'
class Etcd2Orchestrator(Orchestrator):
def __init__(self, host='127.0.0.1', port=2379, cert_and_key=None, ca_cert=None,
client_threads=5, canceller_only=False, **kwargs):
self.is_canceller_only = canceller_only
logger.debug('initializing async etcd client')
self._sync_etcd_client = etcd.Client(
host=host,
port=port,
cert=tuple(cert_and_key) if cert_and_key is not None else None,
ca_cert=ca_cert,
protocol='http' if cert_and_key is None else 'https',
read_timeout=ETCD_READ_TIMEOUT,
)
if not self.is_canceller_only:
(self._etcd_client, self._async_executor) = wrap_with_threadpool(self._sync_etcd_client,
client_threads)
logger.debug('creating initial orchestrator state')
self._shutting_down = False
self._watch_tasks = {}
@staticmethod
def _sanity_check_ttl(ttl):
"""
A TTL of < 0 in etcd results in the key *never being expired*.
We use a max here to ensure that if the TTL is < 0, the key will expire immediately.
"""
return max(ttl, 0)
def _watch_etcd(self, key, callback, restarter=None, start_index=None):
def callback_wrapper(changed_key_future):
new_index = start_index
etcd_result = None
if not changed_key_future.cancelled():
try:
etcd_result = changed_key_future.result()
existing_index = getattr(etcd_result, 'etcd_index', None)
new_index = etcd_result.modifiedIndex + 1
logger.debug('Got watch of key: %s at #%s with result: %s',
key, existing_index, etcd_result)
except ReadTimeoutError:
logger.debug('Read-timeout on etcd watch %s, rescheduling', key)
except etcd.EtcdEventIndexCleared:
# This happens if etcd2 has moved forward too fast for us to start watching at the index
# we retrieved. We therefore start a new watch at HEAD and (if specified) call the
# restarter method which should conduct a read and reset the state of the manager.
logger.debug('Etcd moved forward too quickly. Restarting watch cycle.')
new_index = None
if restarter is not None:
async(restarter())
except (KeyError, etcd.EtcdKeyError):
logger.debug('Etcd key already cleared: %s', key)
return
except etcd.EtcdConnectionFailed:
_sleep_orchestrator()
except etcd.EtcdException as eex:
# TODO: This is a quick and dirty hack and should be replaced with a proper
# exception check.
if str(eex.message).find('Read timed out') >= 0:
logger.debug('Read-timeout on etcd watch %s, rescheduling', key)
else:
logger.exception('Exception on etcd watch: %s', key)
except ProtocolError:
logger.exception('Exception on etcd watch: %s', key)
if key not in self._watch_tasks or self._watch_tasks[key].done():
self._watch_etcd(key, callback, start_index=new_index, restarter=restarter)
if etcd_result and etcd_result.value is not None:
async(callback(self._etcd_result_to_keychange(etcd_result)))
if not self._shutting_down:
logger.debug('Scheduling watch of key: %s at start index %s', key, start_index)
watch_future = self._etcd_client.watch(key, recursive=True, index=start_index,
timeout=ETCD_MAX_WATCH_TIMEOUT)
watch_future.add_done_callback(callback_wrapper)
self._watch_tasks[key] = async(watch_future)
@staticmethod
def _etcd_result_to_keychange(etcd_result):
event = Etcd2Orchestrator._etcd_result_to_keyevent(etcd_result)
return KeyChange(event, etcd_result.key, etcd_result.value)
@staticmethod
def _etcd_result_to_keyevent(etcd_result):
if etcd_result.action == EtcdAction.CREATE:
return KeyEvent.CREATE
if etcd_result.action == EtcdAction.SET:
return KeyEvent.CREATE if etcd_result.createdIndex == etcd_result.modifiedIndex else KeyEvent.SET
if etcd_result.action == EtcdAction.DELETE:
return KeyEvent.DELETE
if etcd_result.action == EtcdAction.EXPIRE:
return KeyEvent.EXPIRE
raise AssertionError('etcd action must have equivalant keyevent')
def on_key_change(self, key, callback, restarter=None):
assert not self.is_canceller_only
logger.debug('creating watch on %s', key)
self._watch_etcd(key, callback, restarter=restarter)
@coroutine
def get_prefixed_keys(self, prefix):
assert not self.is_canceller_only
try:
etcd_result = yield From(self._etcd_client.read(prefix, recursive=True))
raise Return({leaf.key: leaf.value for leaf in etcd_result.leaves})
except etcd.EtcdKeyError:
raise KeyError
except etcd.EtcdConnectionFailed as ex:
raise OrchestratorConnectionError(ex)
except etcd.EtcdException as ex:
raise OrchestratorError(ex)
@coroutine
def get_key(self, key):
assert not self.is_canceller_only
try:
# Ignore pylint: the value property on EtcdResult is added dynamically using setattr.
etcd_result = yield From(self._etcd_client.read(key))
raise Return(etcd_result.value)
except etcd.EtcdKeyError:
raise KeyError
except etcd.EtcdConnectionFailed as ex:
raise OrchestratorConnectionError(ex)
except etcd.EtcdException as ex:
raise OrchestratorError(ex)
@coroutine
def set_key(self, key, value, overwrite=False, expiration=None):
assert not self.is_canceller_only
yield From(self._etcd_client.write(key, value, prevExists=overwrite,
ttl=self._sanity_check_ttl(expiration)))
def set_key_sync(self, key, value, overwrite=False, expiration=None):
self._sync_etcd_client.write(key, value, prevExists=overwrite,
ttl=self._sanity_check_ttl(expiration))
@coroutine
def delete_key(self, key):
assert not self.is_canceller_only
try:
yield From(self._etcd_client.delete(key))
except etcd.EtcdKeyError:
raise KeyError
except etcd.EtcdConnectionFailed as ex:
raise OrchestratorConnectionError(ex)
except etcd.EtcdException as ex:
raise OrchestratorError(ex)
@coroutine
def lock(self, key, expiration=DEFAULT_LOCK_EXPIRATION):
assert not self.is_canceller_only
try:
yield From(self._etcd_client.write(key, {}, prevExist=False,
ttl=self._sanity_check_ttl(expiration)))
raise Return(True)
except (KeyError, etcd.EtcdKeyError):
raise Return(False)
except etcd.EtcdConnectionFailed:
logger.exception('Could not get etcd atomic lock as etcd is down')
raise Return(False)
except etcd.EtcdException as ex:
raise OrchestratorError(ex)
def shutdown(self):
logger.debug('Shutting down etcd client.')
self._shutting_down = True
if self.is_canceller_only:
return
for (key, _), task in self._watch_tasks.items():
if not task.done():
logger.debug('Canceling watch task for %s', key)
task.cancel()
if self._async_executor is not None:
self._async_executor.shutdown()
class MemoryOrchestrator(Orchestrator):
def __init__(self, **kwargs):
self.state = ExpiresDict()
self.callbacks = {}
def _callbacks_prefixed(self, prefix):
return (callback for (key, callback) in iteritems(self.callbacks)
if key.startswith(prefix))
def on_key_change(self, key, callback, restarter=None):
self.callbacks[key] = callback
@coroutine
def get_prefixed_keys(self, prefix):
raise Return({k: value for (k, value) in self.state.items()
if k.startswith(prefix)})
@coroutine
def get_key(self, key):
raise Return(self.state[key])
@coroutine
def set_key(self, key, value, overwrite=False, expiration=None):
preexisting_key = 'key' in self.state
if preexisting_key and not overwrite:
raise KeyError
absolute_expiration = None
if expiration is not None:
absolute_expiration = datetime.datetime.now() + datetime.timedelta(seconds=expiration)
self.state.set(key, value, expires=absolute_expiration)
event = KeyEvent.CREATE if not preexisting_key else KeyEvent.SET
for callback in self._callbacks_prefixed(key):
yield From(callback(KeyChange(event, key, value)))
def set_key_sync(self, key, value, overwrite=False, expiration=None):
"""
set_key, but without trollius coroutines.
"""
preexisting_key = 'key' in self.state
if preexisting_key and not overwrite:
raise KeyError
absolute_expiration = None
if expiration is not None:
absolute_expiration = datetime.datetime.now() + datetime.timedelta(seconds=expiration)
self.state.set(key, value, expires=absolute_expiration)
event = KeyEvent.CREATE if not preexisting_key else KeyEvent.SET
for callback in self._callbacks_prefixed(key):
callback(KeyChange(event, key, value))
@coroutine
def delete_key(self, key):
value = self.state[key]
del self.state[key]
for callback in self._callbacks_prefixed(key):
yield From(callback(KeyChange(KeyEvent.DELETE, key, value)))
@coroutine
def lock(self, key, expiration=DEFAULT_LOCK_EXPIRATION):
if key in self.state:
raise Return(False)
self.state.set(key, None, expires=expiration)
raise Return(True)
def shutdown(self):
self.state = None
self.callbacks = None
class RedisOrchestrator(Orchestrator):
def __init__(self, host='127.0.0.1', port=6379, password=None, db=0, cert_and_key=None,
ca_cert=None, client_threads=5, ssl=False, skip_keyspace_event_setup=False,
canceller_only=False, **kwargs):
self.is_canceller_only = canceller_only
(cert, key) = tuple(cert_and_key) if cert_and_key is not None else (None, None)
self._sync_client = redis.StrictRedis(
host=host,
port=port,
password=password,
db=db,
ssl_certfile=cert,
ssl_keyfile=key,
ssl_ca_certs=ca_cert,
ssl=ssl,
)
self._shutting_down = False
self._tasks = {}
self._watched_keys = {}
self._pubsub_key = slash_join(kwargs.get('orchestrator_prefix', ''),
REDIS_DEFAULT_PUBSUB_KEY).lstrip('/')
if not self.is_canceller_only:
(self._client, self._async_executor) = wrap_with_threadpool(self._sync_client, client_threads)
# Configure a subscription to watch events that the orchestrator manually publishes.
logger.debug('creating pubsub with key %s', self._pubsub_key)
published_pubsub = self._sync_client.pubsub()
published_pubsub.subscribe(self._pubsub_key)
(self._pubsub, self._async_executor_pub) = wrap_with_threadpool(published_pubsub)
self._watch_published_key()
# Configure a subscription to watch expired keyspace events.
if not skip_keyspace_event_setup:
self._sync_client.config_set(REDIS_KEYSPACE_EVENT_CONFIG_KEY,
REDIS_KEYSPACE_EVENT_CONFIG_VALUE)
expiring_pubsub = self._sync_client.pubsub()
expiring_pubsub.psubscribe(REDIS_EXPIRED_KEYSPACE_PATTERN % (db, '*'))
(self._pubsub_expiring, self._async_executor_ex) = wrap_with_threadpool(expiring_pubsub)
self._watch_expiring_key()
def _watch_published_key(self):
def published_callback_wrapper(event_future):
logger.debug('published callback called')
event_result = None
if not event_future.cancelled():
try:
event_result = event_future.result()
(redis_event, event_key, event_value) = event_result
logger.debug('Got watch of key: (%s, %s, %s)', redis_event, event_key, event_value)
except redis.ConnectionError:
_sleep_orchestrator()
except redis.RedisError:
logger.exception('Exception watching redis publish: %s', event_key)
# Schedule creating a new future if this one has been consumed.
if 'pub' not in self._tasks or self._tasks['pub'].done():
self._watch_published_key()
if event_result is not None and redis_event == REDIS_EVENT_KIND_MESSAGE:
keychange = self._publish_to_keychange(event_value)
for watched_key, callback in iteritems(self._watched_keys):
if keychange.key.startswith(watched_key):
async(callback(keychange))
if not self._shutting_down:
logger.debug('Scheduling watch of publish stream')
watch_future = self._pubsub.parse_response()
watch_future.add_done_callback(published_callback_wrapper)
self._tasks['pub'] = async(watch_future)
def _watch_expiring_key(self):
def expiring_callback_wrapper(event_future):
logger.debug('expiring callback called')
event_result = None
if not event_future.cancelled():
try:
event_result = event_future.result()
if self._is_expired_keyspace_event(event_result):
# Get the value of the original key before the expiration happened.
key = self._key_from_expiration(event_future)
expired_value = yield From(self._client.get(key))
# $KEY/expiring is gone, but the original key still remains, set an expiration for it
# so that other managers have time to get the event and still read the expired value.
yield From(self._client.expire(key, ONE_DAY))
except redis.ConnectionError:
_sleep_orchestrator()
except redis.RedisError:
logger.exception('Exception watching redis expirations: %s', key)
# Schedule creating a new future if this one has been consumed.
if 'expire' not in self._tasks or self._tasks['expire'].done():
self._watch_expiring_key()
if self._is_expired_keyspace_event(event_result) and expired_value is not None:
for watched_key, callback in iteritems(self._watched_keys):
if key.startswith(watched_key):
async(callback(KeyChange(KeyEvent.EXPIRE, key, expired_value)))
if not self._shutting_down:
logger.debug('Scheduling watch of expiration')
watch_future = self._pubsub_expiring.parse_response()
watch_future.add_done_callback(expiring_callback_wrapper)
self._tasks['expire'] = async(watch_future)
def on_key_change(self, key, callback, restarter=None):
assert not self.is_canceller_only
logger.debug('watching key: %s', key)
self._watched_keys[key] = callback
@staticmethod
def _is_expired_keyspace_event(event_result):
"""
Sanity check that this isn't an unrelated keyspace event.
There could be a more efficient keyspace event config to avoid this client-side filter.
"""
if event_result is None:
return False
(redis_event, _pattern, matched_key, expired) = event_result
return (redis_event == REDIS_EVENT_KIND_PMESSAGE and
expired == 'expired' and
REDIS_EXPIRED_KEYSPACE_REGEX.match(matched_key) is not None)
@staticmethod
def _key_from_expiration(event_result):
(_redis_event, _pattern, matched_key, _expired) = event_result
return REDIS_EXPIRED_KEYSPACE_REGEX.match(matched_key).groups()[1]
@staticmethod
def _publish_to_keychange(event_value):
e = json.loads(event_value)
return KeyChange(KeyEvent(e['event']), e['key'], e['value'])
@coroutine
def get_prefixed_keys(self, prefix):
assert not self.is_canceller_only
# TODO: This can probably be done with redis pipelines to make it transactional.
keys = yield From(self._client.keys(prefix + '*'))
# Yielding to the event loop is required, thus this cannot be written as a dict comprehension.
results = {}
for key in keys:
if key.endswith(REDIS_EXPIRING_SUFFIX):
continue
ttl = yield From(self._client.ttl(key))
if ttl != REDIS_NONEXPIRING_KEY:
# Only redis keys without expirations are live build manager keys.
value = yield From(self._client.get(key))
results.update({key: value})
raise Return(results)
@coroutine
def get_key(self, key):
assert not self.is_canceller_only
value = yield From(self._client.get(key))
raise Return(value)
@coroutine
def set_key(self, key, value, overwrite=False, expiration=None):
assert not self.is_canceller_only
already_exists = yield From(self._client.exists(key))
yield From(self._client.set(key, value, xx=overwrite))
if expiration is not None:
yield From(self._client.set(slash_join(key, REDIS_EXPIRING_SUFFIX), value,
xx=overwrite, ex=expiration))
key_event = KeyEvent.SET if already_exists else KeyEvent.CREATE
yield From(self._publish(event=key_event, key=key, value=value))
def set_key_sync(self, key, value, overwrite=False, expiration=None):
already_exists = self._sync_client.exists(key)
self._sync_client.set(key, value, xx=overwrite)
if expiration is not None:
self._sync_client.set(slash_join(key, REDIS_EXPIRING_SUFFIX), value,
xx=overwrite, ex=expiration)
self._sync_client.publish(self._pubsub_key, json.dumps({
'event': int(KeyEvent.SET if already_exists else KeyEvent.CREATE),
'key': key,
'value': value,
}))
@coroutine
def _publish(self, **kwargs):
kwargs['event'] = int(kwargs['event'])
event_json = json.dumps(kwargs)
logger.debug('publishing event: %s', event_json)
yield From(self._client.publish(self._pubsub_key, event_json))
@coroutine
def delete_key(self, key):
assert not self.is_canceller_only
value = yield From(self._client.get(key))
yield From(self._client.delete(key))
yield From(self._client.delete(slash_join(key, REDIS_EXPIRING_SUFFIX)))
yield From(self._publish(event=KeyEvent.DELETE, key=key, value=value))
@coroutine
def lock(self, key, expiration=DEFAULT_LOCK_EXPIRATION):
assert not self.is_canceller_only
yield From(self.set_key(key, '', ex=expiration))
raise Return(True)
@coroutine
def shutdown(self):
logger.debug('Shutting down redis client.')
self._shutting_down = True
if self.is_canceller_only:
return
for key, task in iteritems(self._tasks):
if not task.done():
logger.debug('Canceling watch task for %s', key)
task.cancel()
if self._async_executor is not None:
self._async_executor.shutdown()
if self._async_executor_ex is not None:
self._async_executor_ex.shutdown()
if self._async_executor_pub is not None:
self._async_executor_pub.shutdown()

View File

@ -17,6 +17,7 @@ from buildman.jobutil.buildjob import BuildJob, BuildJobLoadException
from data import database, model
from app import app, metric_queue
logger = logging.getLogger(__name__)
WORK_CHECK_TIMEOUT = 10
@ -27,6 +28,7 @@ MINIMUM_JOB_EXTENSION = timedelta(minutes=1)
HEARTBEAT_PERIOD_SEC = 30
class BuilderServer(object):
""" Server which handles both HTTP and WAMP requests, managing the full state of the build
controller.
@ -130,7 +132,7 @@ class BuilderServer(object):
def _unregister_component(self, component):
logger.debug('Unregistering component with realm %s and token %s',
component.builder_realm, component.expected_token)
component.builder_realm, component.expected_token)
self._realm_map.pop(component.builder_realm, None)
@ -186,7 +188,7 @@ class BuilderServer(object):
try:
build_job = BuildJob(job_item)
except BuildJobLoadException as irbe:
logger.warning('[BUILD INCOMPLETE: job load exception] Jon data: %s. No retry restore.',
logger.warning('[BUILD INCOMPLETE: job load exception] Job data: %s. No retry restore.',
job_item.body)
logger.exception(irbe)
self._queue.incomplete(job_item, restore_retry=False)

View File

@ -1,21 +1,22 @@
import unittest
import etcd
import time
import json
import uuid
import os
from mock import Mock, ANY
from six import iteritems
from trollius import coroutine, get_event_loop, From, Future, Return
from mock import Mock, ANY, call
from buildman.manager.executor import BuilderExecutor, ExecutorException
from buildman.manager.ephemeral import (EphemeralBuilderManager, EtcdAction,
ETCD_MAX_WATCH_TIMEOUT)
from buildman.component.buildcomponent import BuildComponent
from buildman.server import BuildJobResult
from buildman.asyncutil import AsyncWrapper
from util.metrics.metricqueue import duration_collector_async
from app import metric_queue
from buildman.asyncutil import AsyncWrapper
from buildman.component.buildcomponent import BuildComponent
from buildman.manager.ephemeral import (EphemeralBuilderManager, REALM_PREFIX,
JOB_PREFIX)
from buildman.manager.executor import BuilderExecutor, ExecutorException
from buildman.orchestrator import KeyEvent, KeyChange
from buildman.server import BuildJobResult
from util import slash_join
from util.metrics.metricqueue import duration_collector_async
BUILD_UUID = 'deadbeef-dead-beef-dead-deadbeefdead'
REALM_ID = '1234-realm'
@ -45,7 +46,6 @@ class TestExecutor(BuilderExecutor):
self.job_stopped = execution_id
class BadExecutor(BuilderExecutor):
@coroutine
@duration_collector_async(metric_queue.builder_time_to_start, labelvalues=["testlabel"])
@ -58,19 +58,8 @@ class EphemeralBuilderTestCase(unittest.TestCase):
self.etcd_client_mock = None
super(EphemeralBuilderTestCase, self).__init__(*args, **kwargs)
def _create_mock_etcd_client(self, *args, **kwargs):
def create_future(*args, **kwargs):
return Future()
self.etcd_client_mock = Mock(spec=etcd.Client, name='etcd.Client')
self.etcd_client_mock.read = Mock(side_effect=KeyError)
self.etcd_client_mock.delete = Mock(side_effect=self._create_completed_future())
self.etcd_client_mock.watch = Mock(side_effect=create_future)
self.etcd_client_mock.write = Mock(side_effect=self._create_completed_future('some_exec_id'))
return (self.etcd_client_mock, None)
def _create_completed_future(self, result=None):
@staticmethod
def _create_completed_future(result=None):
def inner(*args, **kwargs):
new_future = Future()
new_future.set_result(result)
@ -89,12 +78,10 @@ class EphemeralBuilderTestCase(unittest.TestCase):
def _create_build_job(self, namespace='namespace', retries=3):
mock_job = Mock()
mock_job.job_details = {
'build_uuid': BUILD_UUID,
}
mock_job.job_details = {'build_uuid': BUILD_UUID}
mock_job.job_item = {
'body': json.dumps(mock_job.job_details),
'id': 1,
'body': json.dumps(mock_job.job_details),
'id': 1,
}
mock_job.namespace = namespace
@ -103,7 +90,6 @@ class EphemeralBuilderTestCase(unittest.TestCase):
return mock_job
class TestEphemeralLifecycle(EphemeralBuilderTestCase):
""" Tests the various lifecycles of the ephemeral builder and its interaction with etcd. """
@ -123,6 +109,7 @@ class TestEphemeralLifecycle(EphemeralBuilderTestCase):
self.test_executor = Mock(spec=BuilderExecutor)
self.test_executor.start_builder = Mock(side_effect=self._create_completed_future('123'))
self.test_executor.stop_builder = Mock(side_effect=self._create_completed_future())
self.test_executor.setup_time = 60
self.test_executor.name = 'MockExecutor'
self.test_executor.minimum_retry_threshold = 0
return self.test_executor
@ -144,21 +131,20 @@ class TestEphemeralLifecycle(EphemeralBuilderTestCase):
self.job_complete_callback,
'127.0.0.1',
30,
etcd_creator=self._create_mock_etcd_client,
)
self.manager.initialize({'EXECUTOR': 'test'})
# Test that we are watching the realm and jobs key once initialized.
self.etcd_client_mock.watch.assert_any_call('realm/', recursive=True, index=None,
timeout=ETCD_MAX_WATCH_TIMEOUT)
self.etcd_client_mock.watch.assert_any_call('building/', recursive=True, index=None,
timeout=ETCD_MAX_WATCH_TIMEOUT)
self.manager.initialize({
'EXECUTOR': 'test',
'ORCHESTRATOR': {'MEM_CONFIG': None},
})
# Ensure that that the realm and building callbacks have been registered
callback_keys = [key for key in self.manager._orchestrator.callbacks]
self.assertIn(REALM_PREFIX, callback_keys)
self.assertIn(JOB_PREFIX, callback_keys)
self.mock_job = self._create_build_job()
self.mock_job_key = os.path.join('building/', BUILD_UUID)
self.mock_job_key = slash_join('building', BUILD_UUID)
def tearDown(self):
super(TestEphemeralLifecycle, self).tearDown()
@ -167,46 +153,58 @@ class TestEphemeralLifecycle(EphemeralBuilderTestCase):
@coroutine
def _setup_job_for_managers(self):
self.etcd_client_mock.read = Mock(side_effect=KeyError)
test_component = Mock(spec=BuildComponent)
test_component.builder_realm = REALM_ID
test_component.start_build = Mock(side_effect=self._create_completed_future())
self.register_component_callback.return_value = test_component
# Ask for a builder to be scheduled
self.etcd_client_mock.write.reset()
is_scheduled = yield From(self.manager.schedule(self.mock_job))
self.assertTrue(is_scheduled)
self.assertEqual(self.test_executor.start_builder.call_count, 1)
# Ensure the job and realm and metric were added to etcd.
self.assertEqual(self.etcd_client_mock.write.call_args_list[0][0][0], self.mock_job_key)
self.assertTrue(self.etcd_client_mock.write.call_args_list[1][0][0].find('metric/') == 0)
self.assertTrue(self.etcd_client_mock.write.call_args_list[2][0][0].find('realm/') == 0)
realm_data = json.loads(self.etcd_client_mock.write.call_args_list[2][0][1])
# Ensure that that the job, realm, and metric callbacks have been registered
callback_keys = [key for key in self.manager._orchestrator.callbacks]
self.assertIn(self.mock_job_key, self.manager._orchestrator.state)
self.assertIn(REALM_PREFIX, callback_keys)
# TODO: assert metric key has been set
realm_for_build = self._find_realm_key(self.manager._orchestrator, BUILD_UUID)
raw_realm_data = yield From(self.manager._orchestrator.get_key(slash_join('realm',
realm_for_build)))
realm_data = json.loads(raw_realm_data)
realm_data['realm'] = REALM_ID
# Right now the job is not registered with any managers because etcd has not accepted the job
self.assertEqual(self.register_component_callback.call_count, 0)
# Fire off a realm changed with the same data.
realm_created = Mock(spec=etcd.EtcdResult)
realm_created.action = EtcdAction.CREATE
realm_created.key = os.path.join('realm/', REALM_ID)
realm_created.value = json.dumps(realm_data)
yield From(self.manager._handle_realm_change(realm_created))
self.assertEqual(self.register_component_callback.call_count, 1)
yield From(self.manager._realm_callback(
KeyChange(KeyEvent.CREATE,
slash_join(REALM_PREFIX, REALM_ID),
json.dumps(realm_data))))
# Ensure that we have at least one component node.
self.assertEquals(1, self.manager.num_workers())
self.assertEqual(self.register_component_callback.call_count, 1)
self.assertEqual(1, self.manager.num_workers())
# Ensure that the build info exists.
self.assertIsNotNone(self.manager._build_uuid_to_info.get(BUILD_UUID))
raise Return(test_component)
@staticmethod
def _find_realm_key(orchestrator, build_uuid):
for key, value in iteritems(orchestrator.state):
if key.startswith(REALM_PREFIX):
parsed_value = json.loads(value)
body = json.loads(parsed_value['job_queue_item']['body'])
if body['build_uuid'] == build_uuid:
return parsed_value['realm']
continue
raise KeyError
@async_test
def test_schedule_and_complete(self):
# Test that a job is properly registered with all of the managers
@ -214,12 +212,6 @@ class TestEphemeralLifecycle(EphemeralBuilderTestCase):
# Take the job ourselves
yield From(self.manager.build_component_ready(test_component))
read_calls = [call('building/', recursive=True), call(os.path.join('metric/', REALM_ID))]
self.etcd_client_mock.read.assert_has_calls(read_calls)
delete_calls = [call('building/', recursive=True), call(os.path.join('metric/', REALM_ID))]
self.etcd_client_mock.read.assert_has_calls(delete_calls)
self.etcd_client_mock.delete.reset_mock()
self.assertIsNotNone(self.manager._build_uuid_to_info.get(BUILD_UUID))
@ -228,30 +220,23 @@ class TestEphemeralLifecycle(EphemeralBuilderTestCase):
# Ensure that the executor kills the job.
self.assertEqual(self.test_executor.stop_builder.call_count, 1)
self.etcd_client_mock.delete.assert_has_calls([call(self.mock_job_key)])
# Ensure the build information is cleaned up.
self.assertIsNone(self.manager._build_uuid_to_info.get(BUILD_UUID))
self.assertEquals(0, self.manager.num_workers())
self.assertEqual(0, self.manager.num_workers())
@async_test
def test_another_manager_takes_job(self):
# Prepare a job to be taken by another manager
test_component = yield From(self._setup_job_for_managers())
realm_deleted = Mock(spec=etcd.EtcdResult)
realm_deleted.action = EtcdAction.DELETE
realm_deleted.key = os.path.join('realm/', REALM_ID)
realm_deleted._prev_node = Mock(spec=etcd.EtcdResult)
realm_deleted._prev_node.value = json.dumps({
'realm': REALM_ID,
'token': 'beef',
'execution_id': '123',
'job_queue_item': self.mock_job.job_item,
})
yield From(self.manager._handle_realm_change(realm_deleted))
yield From(self.manager._realm_callback(
KeyChange(KeyEvent.DELETE,
slash_join(REALM_PREFIX, REALM_ID),
json.dumps({'realm': REALM_ID,
'token': 'beef',
'execution_id': '123',
'job_queue_item': self.mock_job.job_item}))))
self.unregister_component_callback.assert_called_once_with(test_component)
@ -259,20 +244,15 @@ class TestEphemeralLifecycle(EphemeralBuilderTestCase):
self.assertEqual(self.test_executor.stop_builder.call_count, 0)
# Ensure that we still have the build info, but not the component.
self.assertEquals(0, self.manager.num_workers())
self.assertEqual(0, self.manager.num_workers())
self.assertIsNotNone(self.manager._build_uuid_to_info.get(BUILD_UUID))
# Delete the job once it has "completed".
expired_result = Mock(spec=etcd.EtcdResult)
expired_result.action = EtcdAction.DELETE
expired_result.key = self.mock_job_key
expired_result._prev_node = Mock(spec=etcd.EtcdResult)
expired_result._prev_node.value = json.dumps({
'had_heartbeat': False,
'job_queue_item': self.mock_job.job_item,
})
yield From(self.manager._handle_job_change(expired_result))
yield From(self.manager._job_callback(
KeyChange(KeyEvent.DELETE,
self.mock_job_key,
json.dumps({'had_heartbeat': False,
'job_queue_item': self.mock_job.job_item}))))
# Ensure the job was removed from the info, but stop was not called.
self.assertIsNone(self.manager._build_uuid_to_info.get(BUILD_UUID))
@ -280,63 +260,49 @@ class TestEphemeralLifecycle(EphemeralBuilderTestCase):
@async_test
def test_job_started_by_other_manager(self):
# Test that we are watching before anything else happens
self.etcd_client_mock.watch.assert_any_call('building/', recursive=True,
timeout=ETCD_MAX_WATCH_TIMEOUT, index=None)
# Ensure that that the building callbacks have been registered
callback_keys = [key for key in self.manager._orchestrator.callbacks]
self.assertIn(JOB_PREFIX, callback_keys)
# Send a signal to the callback that the job has been created.
expired_result = Mock(spec=etcd.EtcdResult)
expired_result.action = EtcdAction.CREATE
expired_result.key = self.mock_job_key
expired_result._prev_node = Mock(spec=etcd.EtcdResult)
expired_result._prev_node.value = json.dumps({
'had_heartbeat': False,
'job_queue_item': self.mock_job.job_item,
})
yield From(self.manager._job_callback(
KeyChange(KeyEvent.CREATE,
self.mock_job_key,
json.dumps({'had_heartbeat': False,
'job_queue_item': self.mock_job.job_item}))))
# Ensure the create does nothing.
yield From(self.manager._handle_job_change(expired_result))
self.assertEqual(self.test_executor.stop_builder.call_count, 0)
@async_test
def test_expiring_worker_not_started(self):
# Test that we are watching before anything else happens
self.etcd_client_mock.watch.assert_any_call('building/', recursive=True,
timeout=ETCD_MAX_WATCH_TIMEOUT, index=None)
# Ensure that that the building callbacks have been registered
callback_keys = [key for key in self.manager._orchestrator.callbacks]
self.assertIn(JOB_PREFIX, callback_keys)
# Send a signal to the callback that a worker has expired
expired_result = Mock(spec=etcd.EtcdResult)
expired_result.action = EtcdAction.EXPIRE
expired_result.key = self.mock_job_key
expired_result._prev_node = Mock(spec=etcd.EtcdResult)
expired_result._prev_node.value = json.dumps({
'had_heartbeat': True,
'job_queue_item': self.mock_job.job_item,
})
yield From(self.manager._job_callback(
KeyChange(KeyEvent.EXPIRE,
self.mock_job_key,
json.dumps({'had_heartbeat': True,
'job_queue_item': self.mock_job.job_item}))))
# Since the realm was never registered, expiration should do nothing.
yield From(self.manager._handle_job_change(expired_result))
self.assertEqual(self.test_executor.stop_builder.call_count, 0)
@async_test
def test_expiring_worker_started(self):
test_component = yield From(self._setup_job_for_managers())
# Test that we are watching before anything else happens
self.etcd_client_mock.watch.assert_any_call('building/', recursive=True,
timeout=ETCD_MAX_WATCH_TIMEOUT, index=None)
# Ensure that that the building callbacks have been registered
callback_keys = [key for key in self.manager._orchestrator.callbacks]
self.assertIn(JOB_PREFIX, callback_keys)
# Send a signal to the callback that a worker has expired
expired_result = Mock(spec=etcd.EtcdResult)
expired_result.action = EtcdAction.EXPIRE
expired_result.key = self.mock_job_key
expired_result._prev_node = Mock(spec=etcd.EtcdResult)
expired_result._prev_node.value = json.dumps({
'had_heartbeat': True,
'job_queue_item': self.mock_job.job_item,
})
yield From(self.manager._handle_job_change(expired_result))
yield From(self.manager._job_callback(
KeyChange(KeyEvent.EXPIRE,
self.mock_job_key,
json.dumps({'had_heartbeat': True,
'job_queue_item': self.mock_job.job_item}))))
self.test_executor.stop_builder.assert_called_once_with('123')
self.assertEqual(self.test_executor.stop_builder.call_count, 1)
@ -345,21 +311,16 @@ class TestEphemeralLifecycle(EphemeralBuilderTestCase):
def test_buildjob_deleted(self):
test_component = yield From(self._setup_job_for_managers())
# Test that we are watching before anything else happens
self.etcd_client_mock.watch.assert_any_call('building/', recursive=True,
timeout=ETCD_MAX_WATCH_TIMEOUT, index=None)
# Ensure that that the building callbacks have been registered
callback_keys = [key for key in self.manager._orchestrator.callbacks]
self.assertIn(JOB_PREFIX, callback_keys)
# Send a signal to the callback that a worker has expired
expired_result = Mock(spec=etcd.EtcdResult)
expired_result.action = EtcdAction.DELETE
expired_result.key = self.mock_job_key
expired_result._prev_node = Mock(spec=etcd.EtcdResult)
expired_result._prev_node.value = json.dumps({
'had_heartbeat': False,
'job_queue_item': self.mock_job.job_item,
})
yield From(self.manager._handle_job_change(expired_result))
yield From(self.manager._job_callback(
KeyChange(KeyEvent.DELETE,
self.mock_job_key,
json.dumps({'had_heartbeat': False,
'job_queue_item': self.mock_job.job_item}))))
self.assertEqual(self.test_executor.stop_builder.call_count, 0)
self.assertEqual(self.job_complete_callback.call_count, 0)
@ -369,21 +330,16 @@ class TestEphemeralLifecycle(EphemeralBuilderTestCase):
def test_builder_never_starts(self):
test_component = yield From(self._setup_job_for_managers())
# Test that we are watching before anything else happens
self.etcd_client_mock.watch.assert_any_call('building/', recursive=True,
timeout=ETCD_MAX_WATCH_TIMEOUT, index=None)
# Ensure that that the building callbacks have been registered
callback_keys = [key for key in self.manager._orchestrator.callbacks]
self.assertIn(JOB_PREFIX, callback_keys)
# Send a signal to the callback that a worker has expired
expired_result = Mock(spec=etcd.EtcdResult)
expired_result.action = EtcdAction.EXPIRE
expired_result.key = self.mock_job_key
expired_result._prev_node = Mock(spec=etcd.EtcdResult)
expired_result._prev_node.value = json.dumps({
'had_heartbeat': False,
'job_queue_item': self.mock_job.job_item,
})
yield From(self.manager._handle_job_change(expired_result))
yield From(self.manager._job_callback(
KeyChange(KeyEvent.EXPIRE,
self.mock_job_key,
json.dumps({'had_heartbeat': False,
'job_queue_item': self.mock_job.job_item}))))
self.test_executor.stop_builder.assert_called_once_with('123')
self.assertEqual(self.test_executor.stop_builder.call_count, 1)
@ -397,74 +353,29 @@ class TestEphemeralLifecycle(EphemeralBuilderTestCase):
@async_test
def test_change_worker(self):
# Send a signal to the callback that a worker key has been changed
set_result = Mock(sepc=etcd.EtcdResult)
set_result.action = 'set'
set_result.key = self.mock_job_key
self.manager._handle_job_change(set_result)
self.assertEquals(self.test_executor.stop_builder.call_count, 0)
self.manager._job_callback(KeyChange(KeyEvent.SET, self.mock_job_key, 'value'))
self.assertEqual(self.test_executor.stop_builder.call_count, 0)
@async_test
def test_realm_expired(self):
test_component = yield From(self._setup_job_for_managers())
# Send a signal to the callback that a realm has expired
expired_result = Mock(spec=etcd.EtcdResult)
expired_result.action = EtcdAction.EXPIRE
expired_result.key = self.mock_job_key
expired_result._prev_node = Mock(spec=etcd.EtcdResult)
expired_result._prev_node.value = json.dumps({
'realm': REALM_ID,
'execution_id': 'foobar',
'executor_name': 'MockExecutor',
'job_queue_item': {'body': '{"build_uuid": "fakeid"}'},
})
yield From(self.manager._handle_realm_change(expired_result))
yield From(self.manager._realm_callback(KeyChange(
KeyEvent.EXPIRE,
self.mock_job_key,
json.dumps({
'realm': REALM_ID,
'execution_id': 'foobar',
'executor_name': 'MockExecutor',
'job_queue_item': {'body': '{"build_uuid": "fakeid"}'},
}))))
# Ensure that the cleanup code for the executor was called.
self.test_executor.stop_builder.assert_called_once_with('foobar')
self.assertEqual(self.test_executor.stop_builder.call_count, 1)
@async_test
def test_heartbeat_response(self):
yield From(self.assertHeartbeatWithExpiration(100, self.manager.heartbeat_period_sec * 2))
@async_test
def test_heartbeat_future_expiration(self):
yield From(self.assertHeartbeatWithExpiration(10, 10, ranged=True))
@async_test
def test_heartbeat_expired(self):
yield From(self.assertHeartbeatWithExpiration(-60, 0))
@coroutine
def assertHeartbeatWithExpiration(self, expires_in_sec, expected_ttl, ranged=False):
expiration_timestamp = time.time() + expires_in_sec
builder_result = Mock(spec=etcd.EtcdResult)
builder_result.value = json.dumps({
'expiration': expiration_timestamp,
'max_expiration': expiration_timestamp,
})
self.etcd_client_mock.read = Mock(side_effect=self._create_completed_future(builder_result))
yield From(self.manager.job_heartbeat(self.mock_job))
self.job_heartbeat_callback.assert_called_once_with(self.mock_job)
self.assertEqual(self.etcd_client_mock.write.call_count, 1)
self.assertEqual(self.etcd_client_mock.write.call_args_list[0][0][0], self.mock_job_key)
job_key_data = json.loads(self.etcd_client_mock.write.call_args_list[0][0][1])
self.assertTrue(job_key_data['had_heartbeat'])
self.assertEquals(self.mock_job.job_item, job_key_data['job_queue_item'])
if not ranged:
self.assertEquals(expected_ttl, self.etcd_client_mock.write.call_args_list[0][1]['ttl'])
else:
self.assertTrue(self.etcd_client_mock.write.call_args_list[0][1]['ttl'] <= expected_ttl)
class TestEphemeral(EphemeralBuilderTestCase):
""" Simple unit tests for the ephemeral builder around config management, starting and stopping
jobs.
@ -487,7 +398,6 @@ class TestEphemeral(EphemeralBuilderTestCase):
job_complete_callback,
'127.0.0.1',
30,
etcd_creator=self._create_mock_etcd_client,
)
def tearDown(self):
@ -498,13 +408,14 @@ class TestEphemeral(EphemeralBuilderTestCase):
EphemeralBuilderManager.EXECUTORS['test'] = TestExecutor
self.manager.initialize({
'EXECUTOR': 'test',
'EXECUTOR_CONFIG': dict(MINIMUM_RETRY_THRESHOLD=42)
'EXECUTOR_CONFIG': dict(MINIMUM_RETRY_THRESHOLD=42),
'ORCHESTRATOR': {'MEM_CONFIG': None},
})
# Ensure that we have a single test executor.
self.assertEquals(1, len(self.manager.registered_executors))
self.assertEquals(42, self.manager.registered_executors[0].minimum_retry_threshold)
self.assertEquals('TestExecutor', self.manager.registered_executors[0].name)
self.assertEqual(1, len(self.manager.registered_executors))
self.assertEqual(42, self.manager.registered_executors[0].minimum_retry_threshold)
self.assertEqual('TestExecutor', self.manager.registered_executors[0].name)
def test_verify_executor_newconfig(self):
EphemeralBuilderManager.EXECUTORS['test'] = TestExecutor
@ -512,12 +423,13 @@ class TestEphemeral(EphemeralBuilderTestCase):
'EXECUTORS': [{
'EXECUTOR': 'test',
'MINIMUM_RETRY_THRESHOLD': 42
}]
}],
'ORCHESTRATOR': {'MEM_CONFIG': None},
})
# Ensure that we have a single test executor.
self.assertEquals(1, len(self.manager.registered_executors))
self.assertEquals(42, self.manager.registered_executors[0].minimum_retry_threshold)
self.assertEqual(1, len(self.manager.registered_executors))
self.assertEqual(42, self.manager.registered_executors[0].minimum_retry_threshold)
def test_multiple_executors_samename(self):
@ -537,7 +449,8 @@ class TestEphemeral(EphemeralBuilderTestCase):
'EXECUTOR': 'anotherexecutor',
'MINIMUM_RETRY_THRESHOLD': 24
},
]
],
'ORCHESTRATOR': {'MEM_CONFIG': None},
})
@ -557,13 +470,14 @@ class TestEphemeral(EphemeralBuilderTestCase):
'EXECUTOR': 'anotherexecutor',
'MINIMUM_RETRY_THRESHOLD': 24
},
]
],
'ORCHESTRATOR': {'MEM_CONFIG': None},
})
# Ensure that we have a two test executors.
self.assertEquals(2, len(self.manager.registered_executors))
self.assertEquals(42, self.manager.registered_executors[0].minimum_retry_threshold)
self.assertEquals(24, self.manager.registered_executors[1].minimum_retry_threshold)
self.assertEqual(2, len(self.manager.registered_executors))
self.assertEqual(42, self.manager.registered_executors[0].minimum_retry_threshold)
self.assertEqual(24, self.manager.registered_executors[1].minimum_retry_threshold)
def test_skip_invalid_executor(self):
self.manager.initialize({
@ -572,10 +486,11 @@ class TestEphemeral(EphemeralBuilderTestCase):
'EXECUTOR': 'unknown',
'MINIMUM_RETRY_THRESHOLD': 42
},
]
],
'ORCHESTRATOR': {'MEM_CONFIG': None},
})
self.assertEquals(0, len(self.manager.registered_executors))
self.assertEqual(0, len(self.manager.registered_executors))
@async_test
def test_schedule_job_namespace_filter(self):
@ -584,7 +499,8 @@ class TestEphemeral(EphemeralBuilderTestCase):
'EXECUTORS': [{
'EXECUTOR': 'test',
'NAMESPACE_WHITELIST': ['something'],
}]
}],
'ORCHESTRATOR': {'MEM_CONFIG': None},
})
# Try with a build job in an invalid namespace.
@ -604,7 +520,8 @@ class TestEphemeral(EphemeralBuilderTestCase):
'EXECUTORS': [{
'EXECUTOR': 'test',
'MINIMUM_RETRY_THRESHOLD': 2,
}]
}],
'ORCHESTRATOR': {'MEM_CONFIG': None},
})
# Try with a build job that has too few retries.
@ -617,7 +534,6 @@ class TestEphemeral(EphemeralBuilderTestCase):
result = yield From(self.manager.schedule(build_job))
self.assertTrue(result[0])
@async_test
def test_schedule_job_executor_fallback(self):
EphemeralBuilderManager.EXECUTORS['primary'] = TestExecutor
@ -636,7 +552,9 @@ class TestEphemeral(EphemeralBuilderTestCase):
'EXECUTOR': 'secondary',
'MINIMUM_RETRY_THRESHOLD': 2,
},
]
],
'ALLOWED_WORKER_COUNT': 5,
'ORCHESTRATOR': {'MEM_CONFIG': None},
})
# Try a job not matching the primary's namespace filter. Should schedule on secondary.
@ -691,6 +609,8 @@ class TestEphemeral(EphemeralBuilderTestCase):
self.manager.initialize({
'EXECUTOR': 'test',
'EXECUTOR_CONFIG': {},
'ALLOWED_WORKER_COUNT': 5,
'ORCHESTRATOR': {'MEM_CONFIG': None},
})
build_job = self._create_build_job(namespace='something', retries=3)
@ -708,7 +628,6 @@ class TestEphemeral(EphemeralBuilderTestCase):
self.assertIsNotNone(self.manager.registered_executors[0].job_started)
self.manager.registered_executors[0].job_started = None
@async_test
def test_executor_exception(self):
EphemeralBuilderManager.EXECUTORS['bad'] = BadExecutor
@ -716,13 +635,13 @@ class TestEphemeral(EphemeralBuilderTestCase):
self.manager.initialize({
'EXECUTOR': 'bad',
'EXECUTOR_CONFIG': {},
'ORCHESTRATOR': {'MEM_CONFIG': None},
})
build_job = self._create_build_job(namespace='something', retries=3)
result = yield From(self.manager.schedule(build_job))
self.assertFalse(result[0])
@async_test
def test_schedule_and_stop(self):
EphemeralBuilderManager.EXECUTORS['test'] = TestExecutor
@ -730,6 +649,7 @@ class TestEphemeral(EphemeralBuilderTestCase):
self.manager.initialize({
'EXECUTOR': 'test',
'EXECUTOR_CONFIG': {},
'ORCHESTRATOR': {'MEM_CONFIG': None},
})
# Start the build job.
@ -752,7 +672,7 @@ class TestEphemeral(EphemeralBuilderTestCase):
# Stop the build job.
yield From(self.manager.kill_builder_executor(build_job.build_uuid))
self.assertEquals(executor.job_stopped, executor.job_started)
self.assertEqual(executor.job_stopped, executor.job_started)
if __name__ == '__main__':

View File

@ -3,6 +3,7 @@ from abc import ABCMeta, abstractmethod
from jsonschema import validate
from six import add_metaclass
from active_migration import ActiveDataMigration, ERTMigrationFlags
from endpoints.building import PreparedBuild
from data import model
from buildtrigger.triggerutil import get_trigger_config, InvalidServiceException
@ -37,7 +38,7 @@ NAMESPACES_SCHEMA = {
'description': 'Human-readable title of the namespace',
},
},
'required': ['personal', 'score', 'avatar_url', 'url', 'id', 'title'],
'required': ['personal', 'score', 'avatar_url', 'id', 'title'],
},
}
@ -75,7 +76,7 @@ BUILD_SOURCES_SCHEMA = {
'description': 'True if the repository is private',
},
},
'required': ['name', 'full_name', 'description', 'last_updated', 'url',
'required': ['name', 'full_name', 'description', 'last_updated',
'has_admin_permissions', 'private'],
},
}
@ -156,7 +157,7 @@ METADATA_SCHEMA = {
'required': ['username'],
},
},
'required': ['url', 'message', 'date'],
'required': ['message'],
},
},
'required': ['commit', 'git_url'],
@ -172,7 +173,18 @@ class BuildTriggerHandler(object):
@property
def auth_token(self):
""" Returns the auth token for the trigger. """
return self.trigger.auth_token
# NOTE: This check is for testing.
if isinstance(self.trigger.auth_token, str):
return self.trigger.auth_token
# TODO(remove-unenc): Remove legacy field.
if self.trigger.secure_auth_token is not None:
return self.trigger.secure_auth_token.decrypt()
if ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS):
return self.trigger.auth_token
return None
@abstractmethod
def load_dockerfile_contents(self):
@ -306,7 +318,6 @@ class BuildTriggerHandler(object):
default_branch = metadata.get('default_branch', None)
prepared = PreparedBuild(self.trigger)
prepared.name_from_sha(commit_sha)
# TODO: Charlie Tuesday, March 28, 2017 come back and clean up subdirectory.
prepared.subdirectory = config.get('dockerfile_path', None)
prepared.context = config.get('context', None)
prepared.is_manual = is_manual

View File

@ -64,21 +64,15 @@ BITBUCKET_WEBHOOK_PAYLOAD_SCHEMA = {
'user': {
'type': 'object',
'properties': {
'username': {
'display_name': {
'type': 'string',
},
'account_id': {
'type': 'string',
},
'links': {
'type': 'object',
'properties': {
'html': {
'type': 'object',
'properties': {
'href': {
'type': 'string',
},
},
'required': ['href'],
},
'avatar': {
'type': 'object',
'properties': {
@ -89,28 +83,12 @@ BITBUCKET_WEBHOOK_PAYLOAD_SCHEMA = {
'required': ['href'],
},
},
'required': ['html', 'avatar'],
'required': ['avatar'],
}, # /User
},
'required': ['username'],
}, # /Author
},
},
'links': {
'type': 'object',
'properties': {
'html': {
'type': 'object',
'properties': {
'href': {
'type': 'string',
},
},
'required': ['href'],
},
},
'required': ['html'],
}, # /Links
},
},
'required': ['hash', 'message', 'date'],
}, # /Target
@ -127,21 +105,15 @@ BITBUCKET_WEBHOOK_PAYLOAD_SCHEMA = {
'actor': {
'type': 'object',
'properties': {
'username': {
'account_id': {
'type': 'string',
},
'display_name': {
'type': 'string',
},
'links': {
'type': 'object',
'properties': {
'html': {
'type': 'object',
'properties': {
'href': {
'type': 'string',
},
},
'required': ['href'],
},
'avatar': {
'type': 'object',
'properties': {
@ -152,10 +124,9 @@ BITBUCKET_WEBHOOK_PAYLOAD_SCHEMA = {
'required': ['href'],
},
},
'required': ['html', 'avatar'],
'required': ['avatar'],
},
},
'required': ['username'],
}, # /Actor
'required': ['push', 'repository'],
} # /Root
@ -206,8 +177,7 @@ def get_transformed_commit_info(bb_commit, ref, default_branch, repository_name,
author = lookup_author(match.group(1))
author_info = JSONPathDict(author) if author is not None else None
if author_info:
config['commit_info.author.username'] = author_info['user.username']
config['commit_info.author.url'] = 'https://bitbucket.org/%s/' % author_info['user.username']
config['commit_info.author.username'] = author_info['user.display_name']
config['commit_info.author.avatar_url'] = author_info['user.avatar']
return config.dict_value()
@ -245,12 +215,10 @@ def get_transformed_webhook_payload(bb_payload, default_branch=None):
config['commit_info.message'] = target['message']
config['commit_info.date'] = target['date']
config['commit_info.author.username'] = target['author.user.username']
config['commit_info.author.url'] = target['author.user.links.html.href']
config['commit_info.author.username'] = target['author.user.display_name']
config['commit_info.author.avatar_url'] = target['author.user.links.avatar.href']
config['commit_info.committer.username'] = payload['actor.username']
config['commit_info.committer.url'] = payload['actor.links.html.href']
config['commit_info.committer.username'] = payload['actor.display_name']
config['commit_info.committer.avatar_url'] = payload['actor.links.avatar.href']
return config.dict_value()
@ -328,8 +296,8 @@ class BitbucketBuildTrigger(BuildTriggerHandler):
if not result:
return False
username = data['user']['username']
self.put_config_key('username', username)
self.put_config_key('account_id', data['user']['account_id'])
self.put_config_key('nickname', data['user']['nickname'])
return True
def is_active(self):
@ -403,11 +371,12 @@ class BitbucketBuildTrigger(BuildTriggerHandler):
namespaces = {}
for repo in data:
owner = repo['owner']
if owner in namespaces:
namespaces[owner]['score'] = namespaces[owner]['score'] + 1
else:
namespaces[owner] = {
'personal': owner == self.config.get('username'),
'personal': owner == self.config.get('nickname', self.config.get('username')),
'id': owner,
'title': owner,
'avatar_url': repo['logo'],

View File

@ -108,7 +108,7 @@ def get_transformed_webhook_payload(gh_payload, default_branch=None, lookup_user
config = SafeDictSetter()
config['commit'] = payload['head_commit.id']
config['ref'] = payload['ref']
config['default_branch'] = default_branch
config['default_branch'] = payload['repository.default_branch'] or default_branch
config['git_url'] = payload['repository.ssh_url']
config['commit_info.url'] = payload['head_commit.url']
@ -293,13 +293,18 @@ class GithubBuildTrigger(BuildTriggerHandler):
for org in usr.get_orgs():
organization = org.login if org.login else org.name
# NOTE: We don't load the organization's html_url nor its plan, because doing
# so requires loading *each organization* via its own API call in this tight
# loop, which was massively slowing down the load time for users when setting
# up triggers.
namespaces[organization] = {
'personal': False,
'id': organization,
'title': organization,
'avatar_url': org.avatar_url,
'url': org.html_url,
'score': org.plan.private_repos if org.plan else 0,
'url': '',
'score': 0,
}
return BuildTriggerHandler.build_namespaces_response(namespaces)
@ -320,7 +325,7 @@ class GithubBuildTrigger(BuildTriggerHandler):
gh_client = self._get_client()
usr = gh_client.get_user()
if namespace == usr.login:
repos = [repo_view(repo) for repo in usr.get_repos() if repo.owner.login == namespace]
repos = [repo_view(repo) for repo in usr.get_repos(type='owner', sort='updated')]
return BuildTriggerHandler.build_sources_response(repos)
try:
@ -376,7 +381,7 @@ class GithubBuildTrigger(BuildTriggerHandler):
return None
try:
file_info = repo.get_file_contents(path)
file_info = repo.get_contents(path)
# TypeError is needed because directory inputs cause a TypeError
except (GithubException, TypeError) as ghe:
logger.error("got error from trying to find github file %s" % ghe)

View File

@ -51,7 +51,7 @@ GITLAB_WEBHOOK_PAYLOAD_SCHEMA = {
'type': 'string',
},
'url': {
'type': 'string',
'type': ['string', 'null'],
},
'message': {
'type': 'string',
@ -69,7 +69,7 @@ GITLAB_WEBHOOK_PAYLOAD_SCHEMA = {
'required': ['email'],
},
},
'required': ['id', 'url', 'message', 'timestamp'],
'required': ['id', 'message', 'timestamp'],
},
},
},
@ -99,7 +99,7 @@ def _catch_timeouts_and_errors(func):
except gitlab.GitlabError:
msg = 'GitLab API error. Please contact support.'
logger.exception(msg)
raise ExternalServiceError(msg)
raise ExternalServiceError(msg)
return wrapper
@ -145,32 +145,36 @@ def get_transformed_webhook_payload(gl_payload, default_branch=None, lookup_user
if payload['object_kind'] == 'push' and not commits:
raise SkipRequestException
# Check for missing commit information.
commit_sha = payload['checkout_sha'] or payload['after']
if commit_sha is None or commit_sha == '0000000000000000000000000000000000000000':
raise SkipRequestException
config = SafeDictSetter()
config['commit'] = payload['checkout_sha']
config['commit'] = commit_sha
config['ref'] = payload['ref']
config['default_branch'] = default_branch
config['git_url'] = payload['repository.git_ssh_url']
found_commit = JSONPathDict({})
if payload['object_kind'] == 'push':
if payload['object_kind'] == 'push' or payload['object_kind'] == 'tag_push':
# Find the commit associated with the checkout_sha. Gitlab doesn't (necessary) send this in
# any order, so we cannot simply index into the commits list.
found_commit = None
for commit in commits:
if commit['id'] == payload['checkout_sha']:
found_commit = JSONPathDict(commit)
break
if commits is not None:
for commit in commits:
if commit['id'] == payload['checkout_sha']:
found_commit = JSONPathDict(commit)
break
if found_commit is None and lookup_commit:
checkout_sha = payload['checkout_sha'] or payload['after']
found_commit_info = lookup_commit(payload['project_id'], checkout_sha)
found_commit = JSONPathDict(dict(found_commit_info) if found_commit_info else {})
if found_commit is None:
raise SkipRequestException
elif payload['object_kind'] == 'tag_push':
# Gitlab doesn't send commit information for tag pushes (WHY?!), so we need to lookup the
# commit SHA directly.
if lookup_commit:
found_commit_info = lookup_commit(payload['project_id'], payload['checkout_sha'])
found_commit = JSONPathDict(found_commit_info or {})
config['commit_info.url'] = found_commit['url']
config['commit_info.message'] = found_commit['message']
config['commit_info.date'] = found_commit['timestamp']
@ -247,6 +251,9 @@ class GitLabBuildTrigger(BuildTriggerHandler):
hook = gl_project.hooks.create({
'url': standard_webhook_url,
'push': True,
'tag_push': True,
'push_events': True,
'tag_push_events': True,
})
if not hook:
msg = 'Unable to create webhook on repository: %s' % new_build_source
@ -261,17 +268,33 @@ class GitLabBuildTrigger(BuildTriggerHandler):
gl_client = self._get_authorized_client()
# Find the GitLab repository.
gl_project = gl_client.projects.get(config['build_source'])
if not gl_project:
msg = 'Unable to find GitLab repository for source: %s' % config['build_source']
raise TriggerDeactivationException(msg)
try:
gl_project = gl_client.projects.get(config['build_source'])
if not gl_project:
config.pop('key_id', None)
config.pop('hook_id', None)
self.config = config
return config
except gitlab.GitlabGetError as ex:
if ex.response_code != 404:
raise
# Remove the webhook.
gl_project.hooks.delete(config['hook_id'])
try:
gl_project.hooks.delete(config['hook_id'])
except gitlab.GitlabDeleteError as ex:
if ex.response_code != 404:
raise
config.pop('hook_id', None)
# Remove the key
gl_project.keys.delete(config['key_id'])
try:
gl_project.keys.delete(config['key_id'])
except gitlab.GitlabDeleteError as ex:
if ex.response_code != 404:
raise
config.pop('key_id', None)
self.config = config
@ -287,24 +310,17 @@ class GitLabBuildTrigger(BuildTriggerHandler):
namespaces = {}
for namespace in _paginated_iterator(gl_client.namespaces.list, RepositoryReadException):
namespace_id = namespace.get_id()
# Retrieve the namespace as a user or group.
namespace_obj = self._get_namespace(gl_client, namespace)
if namespace_obj is None:
logger.warning('Could not load details for namespace %s', namespace_id)
continue
if namespace_id in namespaces:
namespaces[namespace_id]['score'] = namespaces[namespace_id]['score'] + 1
else:
owner = namespace.attributes['name']
namespaces[namespace_id] = {
'personal': owner == current_user.attributes['username'],
'personal': namespace.attributes['kind'] == 'user',
'id': str(namespace_id),
'title': namespace.attributes['name'],
'avatar_url': namespace_obj.attributes.get('avatar_url', ''),
'avatar_url': namespace.attributes.get('avatar_url'),
'score': 1,
'url': namespace_obj.attributes.get('web_url', ''),
'url': namespace.attributes.get('web_url') or '',
}
return BuildTriggerHandler.build_namespaces_response(namespaces)
@ -314,7 +330,7 @@ class GitLabBuildTrigger(BuildTriggerHandler):
if gl_namespace.attributes['kind'] == 'group':
return gl_client.groups.get(gl_namespace.attributes['id'], lazy=lazy)
if gl_namespace.attributes['name'] == gl_client.user.attributes['username']:
if gl_namespace.attributes['kind'] == 'user':
return gl_client.users.get(gl_client.user.attributes['id'], lazy=lazy)
# Note: This doesn't seem to work for IDs retrieved via the namespaces API; the IDs are
@ -375,7 +391,11 @@ class GitLabBuildTrigger(BuildTriggerHandler):
namespace_obj = self._get_namespace(gl_client, gl_namespace, lazy=True)
repositories = _paginated_iterator(namespace_obj.projects.list, RepositoryReadException)
return BuildTriggerHandler.build_sources_response([repo_view(repo) for repo in repositories])
try:
return BuildTriggerHandler.build_sources_response([repo_view(repo) for repo in repositories])
except gitlab.GitlabGetError:
return []
@_catch_timeouts_and_errors
def list_build_subdirs(self):
@ -583,10 +603,17 @@ class GitLabBuildTrigger(BuildTriggerHandler):
logger.debug('Skipping GitLab build; project %s not found', self.config['build_source'])
raise InvalidPayloadException()
def lookup_commit(repo_id, commit_sha):
commit = self.lookup_commit(repo_id, commit_sha)
if commit is None:
return None
return dict(commit.attributes)
default_branch = gl_project.attributes['default_branch']
metadata = get_transformed_webhook_payload(payload, default_branch=default_branch,
lookup_user=self.lookup_user,
lookup_commit=self.lookup_commit)
lookup_commit=lookup_commit)
prepared = self.prepare_build(metadata)
# Check if we should skip this build.

View File

@ -9,7 +9,8 @@ def get_bitbucket_trigger(dockerfile_path=''):
trigger = BitbucketBuildTrigger(trigger_obj, {
'build_source': 'foo/bar',
'dockerfile_path': dockerfile_path,
'username': 'knownuser'
'nickname': 'knownuser',
'account_id': 'foo',
})
trigger._get_client = get_mock_bitbucket

View File

@ -65,7 +65,7 @@ def get_mock_github():
repo_mock.permissions.admin = namespace == 'knownuser'
return repo_mock
def get_user_repos_mock():
def get_user_repos_mock(type='all', sort='created'):
return [get_repo_mock('knownuser', 'somerepo')]
def get_org_repos_mock(type='all'):
@ -123,7 +123,7 @@ def get_mock_github():
otherbranch.commit = get_commit_mock('aaaaaaa')
return [master, otherbranch]
def get_file_contents_mock(filepath):
def get_contents_mock(filepath):
if filepath == 'Dockerfile':
m = Mock()
m.content = 'hello world'
@ -168,7 +168,7 @@ def get_mock_github():
repo_mock.get_tags = Mock(side_effect=get_tags_mock)
repo_mock.get_branches = Mock(side_effect=get_branches_mock)
repo_mock.get_commit = Mock(side_effect=get_commit_mock)
repo_mock.get_file_contents = Mock(side_effect=get_file_contents_mock)
repo_mock.get_contents = Mock(side_effect=get_contents_mock)
repo_mock.get_git_tree = Mock(side_effect=get_git_tree_mock)
gh_mock = Mock()

View File

@ -234,7 +234,7 @@ def user_namespace_handler(_, request):
'Content-Type': 'application/json',
},
'content': json.dumps({
"id": 2,
"id": 1,
"name": "knownuser",
"path": "knownuser",
"kind": "user",
@ -262,6 +262,7 @@ def namespaces_handler(_, request):
"kind": "group",
"full_path": "someorg",
"parent_id": None,
"web_url": "http://gitlab.com/groups/someorg",
"members_count_with_descendants": 2
}]),
}
@ -546,7 +547,7 @@ def delete_deploykey_handker(_, request):
}
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/users/2/projects$')
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/users/1/projects$')
def user_projects_list_handler(_, request):
if not request.headers.get('Authorization') == 'Bearer foobar':
return {'status_code': 401}

View File

@ -86,6 +86,6 @@ def test_handle_trigger_request(bitbucket_trigger, payload, expected_error, expe
if expected_error is not None:
with pytest.raises(expected_error) as ipe:
bitbucket_trigger.handle_trigger_request(request)
assert ipe.value.message == expected_message
assert str(ipe.value) == expected_message
else:
assert isinstance(bitbucket_trigger.handle_trigger_request(request), PreparedBuild)

View File

@ -32,7 +32,7 @@ def test_handle_trigger_request(payload, expected_error, expected_message):
if expected_error is not None:
with pytest.raises(expected_error) as ipe:
trigger.handle_trigger_request(request)
assert ipe.value.message == expected_message
assert str(ipe.value) == expected_message
else:
assert isinstance(trigger.handle_trigger_request(request), PreparedBuild)
@ -46,6 +46,6 @@ def test_manual_start(run_parameters, expected_error, expected_message):
if expected_error is not None:
with pytest.raises(expected_error) as ipe:
trigger.manual_start(run_parameters)
assert ipe.value.message == expected_message
assert str(ipe.value) == expected_message
else:
assert isinstance(trigger.manual_start(run_parameters), PreparedBuild)

View File

@ -33,7 +33,7 @@ def test_manual_start(run_parameters, expected_error, expected_message, githost_
if expected_error is not None:
with pytest.raises(expected_error) as ipe:
githost_trigger.manual_start(run_parameters)
assert ipe.value.message == expected_message
assert str(ipe.value) == expected_message
else:
assert isinstance(githost_trigger.manual_start(run_parameters), PreparedBuild)
@ -58,7 +58,7 @@ def test_list_field_values(name, expected, githost_trigger):
assert githost_trigger.list_field_values(name) == expected
def test_list_build_source_namespaces(githost_trigger):
def test_list_build_source_namespaces():
namespaces_expected = [
{
'personal': True,
@ -78,7 +78,7 @@ def test_list_build_source_namespaces(githost_trigger):
}
]
found = githost_trigger.list_build_source_namespaces()
found = get_bitbucket_trigger().list_build_source_namespaces()
found.sort()
namespaces_expected.sort()

View File

@ -63,7 +63,7 @@ def test_handle_trigger_request(github_trigger, payload, expected_error, expecte
if expected_error is not None:
with pytest.raises(expected_error) as ipe:
github_trigger.handle_trigger_request(request)
assert ipe.value.message == expected_message
assert str(ipe.value) == expected_message
else:
assert isinstance(github_trigger.handle_trigger_request(request), PreparedBuild)
@ -88,3 +88,30 @@ def test_lookup_user(username, expected_response, github_trigger):
def test_list_build_subdirs(github_trigger):
assert github_trigger.list_build_subdirs() == ['Dockerfile', 'somesubdir/Dockerfile']
def test_list_build_source_namespaces(github_trigger):
namespaces_expected = [
{
'personal': True,
'score': 1,
'avatar_url': 'avatarurl',
'id': 'knownuser',
'title': 'knownuser',
'url': 'https://bitbucket.org/knownuser',
},
{
'score': 0,
'title': 'someorg',
'personal': False,
'url': '',
'avatar_url': 'avatarurl',
'id': 'someorg'
}
]
found = github_trigger.list_build_source_namespaces()
found.sort()
namespaces_expected.sort()
assert found == namespaces_expected

View File

@ -132,7 +132,7 @@ def test_handle_trigger_request(gitlab_trigger, payload, expected_error, expecte
if expected_error is not None:
with pytest.raises(expected_error) as ipe:
gitlab_trigger.handle_trigger_request(request)
assert ipe.value.message == expected_message
assert str(ipe.value) == expected_message
else:
assert isinstance(gitlab_trigger.handle_trigger_request(request), PreparedBuild)
@ -159,7 +159,7 @@ def test_manual_start(run_parameters, expected_error, expected_message, gitlab_t
if expected_error is not None:
with pytest.raises(expected_error) as ipe:
gitlab_trigger.manual_start(run_parameters)
assert ipe.value.message == expected_message
assert str(ipe.value) == expected_message
else:
assert isinstance(gitlab_trigger.manual_start(run_parameters), PreparedBuild)

View File

@ -65,77 +65,75 @@ def test_custom_gitlab():
expected = {
'commit': u'fb88379ee45de28a0a4590fddcbd8eff8b36026e',
'ref': u'refs/heads/master',
'git_url': u'git@gitlab.com:jzelinskie/www-gitlab-com.git',
'git_url': u'git@gitlab.com:jsmith/somerepo.git',
'commit_info': {
'url': u'https://gitlab.com/jzelinskie/www-gitlab-com/commit/fb88379ee45de28a0a4590fddcbd8eff8b36026e',
'url': u'https://gitlab.com/jsmith/somerepo/commit/fb88379ee45de28a0a4590fddcbd8eff8b36026e',
'date': u'2015-08-13T19:33:18+00:00',
'message': u'Fix link\n',
},
}
assertSchema('gitlab_webhook', expected, custom_trigger_payload, git_url='git@gitlab.com:jzelinskie/www-gitlab-com.git')
assertSchema('gitlab_webhook', expected, custom_trigger_payload, git_url='git@gitlab.com:jsmith/somerepo.git')
def test_custom_github():
expected = {
'commit': u'410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'ref': u'refs/heads/master',
'git_url': u'git@github.com:josephschorr/anothertest.git',
'default_branch': u'master',
'git_url': u'git@github.com:jsmith/anothertest.git',
'commit_info': {
'url': u'https://github.com/josephschorr/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'url': u'https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'date': u'2015-09-11T14:26:16-04:00',
'message': u'Update Dockerfile',
'committer': {
'username': u'josephschorr',
'username': u'jsmith',
},
'author': {
'username': u'josephschorr',
'username': u'jsmith',
},
},
}
assertSchema('github_webhook', expected, custom_trigger_payload,
git_url='git@github.com:josephschorr/anothertest.git')
git_url='git@github.com:jsmith/anothertest.git')
def test_custom_bitbucket():
expected = {
"commit": u"af64ae7188685f8424040b4735ad12941b980d75",
"ref": u"refs/heads/master",
"git_url": u"git@bitbucket.org:jscoreos/another-repo.git",
"git_url": u"git@bitbucket.org:jsmith/another-repo.git",
"commit_info": {
"url": u"https://bitbucket.org/jscoreos/another-repo/commits/af64ae7188685f8424040b4735ad12941b980d75",
"url": u"https://bitbucket.org/jsmith/another-repo/commits/af64ae7188685f8424040b4735ad12941b980d75",
"date": u"2015-09-10T20:40:54+00:00",
"message": u"Dockerfile edited online with Bitbucket",
"author": {
"username": u"jscoreos",
"url": u"https://bitbucket.org/jscoreos/",
"avatar_url": u"https://bitbucket.org/account/jscoreos/avatar/32/",
"username": u"John Smith",
"avatar_url": u"https://bitbucket.org/account/jsmith/avatar/32/",
},
"committer": {
"username": u"jscoreos",
"url": u"https://bitbucket.org/jscoreos/",
"avatar_url": u"https://bitbucket.org/account/jscoreos/avatar/32/",
"username": u"John Smith",
"avatar_url": u"https://bitbucket.org/account/jsmith/avatar/32/",
},
},
}
assertSchema('bitbucket_webhook', expected, custom_trigger_payload, git_url='git@bitbucket.org:jscoreos/another-repo.git')
assertSchema('bitbucket_webhook', expected, custom_trigger_payload, git_url='git@bitbucket.org:jsmith/another-repo.git')
def test_bitbucket_customer_payload_noauthor():
expected = {
"commit": "a0ec139843b2bb281ab21a433266ddc498e605dc",
"ref": "refs/heads/master",
"git_url": "git@bitbucket.org:lightsidelabs/svc-identity.git",
"git_url": "git@bitbucket.org:somecoollabs/svc-identity.git",
"commit_info": {
"url": "https://bitbucket.org/lightsidelabs/svc-identity/commits/a0ec139843b2bb281ab21a433266ddc498e605dc",
"url": "https://bitbucket.org/somecoollabs/svc-identity/commits/a0ec139843b2bb281ab21a433266ddc498e605dc",
"date": "2015-09-25T00:55:08+00:00",
"message": "Update version.py to 0.1.2 [skip ci]\n\n(by utilitybelt/scripts/autotag_version.py)\n",
"committer": {
"username": "LightSide_CodeShip",
"url": "https://bitbucket.org/LightSide_CodeShip/",
"avatar_url": "https://bitbucket.org/account/LightSide_CodeShip/avatar/32/",
"username": "CodeShip Tagging",
"avatar_url": "https://bitbucket.org/account/SomeCoolLabs_CodeShip/avatar/32/",
},
},
}
@ -147,15 +145,14 @@ def test_bitbucket_customer_payload_tag():
expected = {
"commit": "a0ec139843b2bb281ab21a433266ddc498e605dc",
"ref": "refs/tags/0.1.2",
"git_url": "git@bitbucket.org:lightsidelabs/svc-identity.git",
"git_url": "git@bitbucket.org:somecoollabs/svc-identity.git",
"commit_info": {
"url": "https://bitbucket.org/lightsidelabs/svc-identity/commits/a0ec139843b2bb281ab21a433266ddc498e605dc",
"url": "https://bitbucket.org/somecoollabs/svc-identity/commits/a0ec139843b2bb281ab21a433266ddc498e605dc",
"date": "2015-09-25T00:55:08+00:00",
"message": "Update version.py to 0.1.2 [skip ci]\n\n(by utilitybelt/scripts/autotag_version.py)\n",
"committer": {
"username": "LightSide_CodeShip",
"url": "https://bitbucket.org/LightSide_CodeShip/",
"avatar_url": "https://bitbucket.org/account/LightSide_CodeShip/avatar/32/",
"username": "CodeShip Tagging",
"avatar_url": "https://bitbucket.org/account/SomeCoolLabs_CodeShip/avatar/32/",
},
},
}
@ -171,7 +168,7 @@ def test_bitbucket_commit():
def lookup_author(_):
return {
'user': {
'username': 'cooluser',
'display_name': 'cooluser',
'avatar': 'http://some/avatar/url'
}
}
@ -186,7 +183,6 @@ def test_bitbucket_commit():
"date": u"2012-07-24 00:26:36",
"message": u"making some changes\n",
"author": {
"url": u"https://bitbucket.org/cooluser/",
"avatar_url": u"http://some/avatar/url",
"username": u"cooluser",
}
@ -200,20 +196,18 @@ def test_bitbucket_webhook_payload():
expected = {
"commit": u"af64ae7188685f8424040b4735ad12941b980d75",
"ref": u"refs/heads/master",
"git_url": u"git@bitbucket.org:jscoreos/another-repo.git",
"git_url": u"git@bitbucket.org:jsmith/another-repo.git",
"commit_info": {
"url": u"https://bitbucket.org/jscoreos/another-repo/commits/af64ae7188685f8424040b4735ad12941b980d75",
"url": u"https://bitbucket.org/jsmith/another-repo/commits/af64ae7188685f8424040b4735ad12941b980d75",
"date": u"2015-09-10T20:40:54+00:00",
"message": u"Dockerfile edited online with Bitbucket",
"author": {
"username": u"jscoreos",
"url": u"https://bitbucket.org/jscoreos/",
"avatar_url": u"https://bitbucket.org/account/jscoreos/avatar/32/",
"username": u"John Smith",
"avatar_url": u"https://bitbucket.org/account/jsmith/avatar/32/",
},
"committer": {
"username": u"jscoreos",
"url": u"https://bitbucket.org/jscoreos/",
"avatar_url": u"https://bitbucket.org/account/jscoreos/avatar/32/",
"username": u"John Smith",
"avatar_url": u"https://bitbucket.org/account/jsmith/avatar/32/",
},
},
}
@ -225,16 +219,17 @@ def test_github_webhook_payload_slash_branch():
expected = {
'commit': u'410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'ref': u'refs/heads/slash/branch',
'git_url': u'git@github.com:josephschorr/anothertest.git',
'default_branch': u'master',
'git_url': u'git@github.com:jsmith/anothertest.git',
'commit_info': {
'url': u'https://github.com/josephschorr/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'url': u'https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'date': u'2015-09-11T14:26:16-04:00',
'message': u'Update Dockerfile',
'committer': {
'username': u'josephschorr',
'username': u'jsmith',
},
'author': {
'username': u'josephschorr',
'username': u'jsmith',
},
},
}
@ -246,16 +241,17 @@ def test_github_webhook_payload():
expected = {
'commit': u'410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'ref': u'refs/heads/master',
'git_url': u'git@github.com:josephschorr/anothertest.git',
'default_branch': u'master',
'git_url': u'git@github.com:jsmith/anothertest.git',
'commit_info': {
'url': u'https://github.com/josephschorr/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'url': u'https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'date': u'2015-09-11T14:26:16-04:00',
'message': u'Update Dockerfile',
'committer': {
'username': u'josephschorr',
'username': u'jsmith',
},
'author': {
'username': u'josephschorr',
'username': u'jsmith',
},
},
}
@ -267,19 +263,20 @@ def test_github_webhook_payload_with_lookup():
expected = {
'commit': u'410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'ref': u'refs/heads/master',
'git_url': u'git@github.com:josephschorr/anothertest.git',
'default_branch': u'master',
'git_url': u'git@github.com:jsmith/anothertest.git',
'commit_info': {
'url': u'https://github.com/josephschorr/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'url': u'https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'date': u'2015-09-11T14:26:16-04:00',
'message': u'Update Dockerfile',
'committer': {
'username': u'josephschorr',
'url': u'http://github.com/josephschorr',
'username': u'jsmith',
'url': u'http://github.com/jsmith',
'avatar_url': u'http://some/avatar/url',
},
'author': {
'username': u'josephschorr',
'url': u'http://github.com/josephschorr',
'username': u'jsmith',
'url': u'http://github.com/jsmith',
'avatar_url': u'http://some/avatar/url',
},
},
@ -287,7 +284,7 @@ def test_github_webhook_payload_with_lookup():
def lookup_user(_):
return {
'html_url': 'http://github.com/josephschorr',
'html_url': 'http://github.com/jsmith',
'avatar_url': 'http://some/avatar/url'
}
@ -298,9 +295,10 @@ def test_github_webhook_payload_missing_fields_with_lookup():
expected = {
'commit': u'410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'ref': u'refs/heads/master',
'git_url': u'git@github.com:josephschorr/anothertest.git',
'default_branch': u'master',
'git_url': u'git@github.com:jsmith/anothertest.git',
'commit_info': {
'url': u'https://github.com/josephschorr/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'url': u'https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'date': u'2015-09-11T14:26:16-04:00',
'message': u'Update Dockerfile'
},
@ -311,7 +309,7 @@ def test_github_webhook_payload_missing_fields_with_lookup():
raise Exception('Fail!')
return {
'html_url': 'http://github.com/josephschorr',
'html_url': 'http://github.com/jsmith',
'avatar_url': 'http://some/avatar/url'
}
@ -322,9 +320,9 @@ def test_gitlab_webhook_payload():
expected = {
'commit': u'fb88379ee45de28a0a4590fddcbd8eff8b36026e',
'ref': u'refs/heads/master',
'git_url': u'git@gitlab.com:jzelinskie/www-gitlab-com.git',
'git_url': u'git@gitlab.com:jsmith/somerepo.git',
'commit_info': {
'url': u'https://gitlab.com/jzelinskie/www-gitlab-com/commit/fb88379ee45de28a0a4590fddcbd8eff8b36026e',
'url': u'https://gitlab.com/jsmith/somerepo/commit/fb88379ee45de28a0a4590fddcbd8eff8b36026e',
'date': u'2015-08-13T19:33:18+00:00',
'message': u'Fix link\n',
},
@ -337,9 +335,10 @@ def test_github_webhook_payload_known_issue():
expected = {
"commit": "118b07121695d9f2e40a5ff264fdcc2917680870",
"ref": "refs/heads/master",
"git_url": "git@github.com:silas/docker-test.git",
"default_branch": "master",
"git_url": "git@github.com:jsmith/docker-test.git",
"commit_info": {
"url": "https://github.com/silas/docker-test/commit/118b07121695d9f2e40a5ff264fdcc2917680870",
"url": "https://github.com/jsmith/docker-test/commit/118b07121695d9f2e40a5ff264fdcc2917680870",
"date": "2015-09-25T14:55:11-04:00",
"message": "Fail",
},
@ -352,9 +351,10 @@ def test_github_webhook_payload_missing_fields():
expected = {
'commit': u'410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'ref': u'refs/heads/master',
'git_url': u'git@github.com:josephschorr/anothertest.git',
'default_branch': u'master',
'git_url': u'git@github.com:jsmith/anothertest.git',
'commit_info': {
'url': u'https://github.com/josephschorr/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'url': u'https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'date': u'2015-09-11T14:26:16-04:00',
'message': u'Update Dockerfile'
},
@ -371,14 +371,14 @@ def test_gitlab_webhook_multiple_commits():
expected = {
'commit': u'9a052a0b2fbe01d4a1a88638dd9fe31c1c56ef53',
'ref': u'refs/heads/master',
'git_url': u'git@gitlab.com:joseph.schorr/some-test-project.git',
'git_url': u'git@gitlab.com:jsmith/some-test-project.git',
'commit_info': {
'url': u'https://gitlab.com/joseph.schorr/some-test-project/commit/9a052a0b2fbe01d4a1a88638dd9fe31c1c56ef53',
'url': u'https://gitlab.com/jsmith/some-test-project/commit/9a052a0b2fbe01d4a1a88638dd9fe31c1c56ef53',
'date': u'2016-09-29T15:02:41+00:00',
'message': u"Merge branch 'foobar' into 'master'\r\n\r\nAdd changelog\r\n\r\nSome merge thing\r\n\r\nSee merge request !1",
'author': {
'username': 'josephschorr',
'url': 'http://gitlab.com/josephschorr',
'username': 'jsmith',
'url': 'http://gitlab.com/jsmith',
'avatar_url': 'http://some/avatar/url'
},
},
@ -386,8 +386,8 @@ def test_gitlab_webhook_multiple_commits():
def lookup_user(_):
return {
'username': 'josephschorr',
'html_url': 'http://gitlab.com/josephschorr',
'username': 'jsmith',
'html_url': 'http://gitlab.com/jsmith',
'avatar_url': 'http://some/avatar/url',
}
@ -400,8 +400,8 @@ def test_gitlab_webhook_for_tag():
'commit_info': {
'author': {
'avatar_url': 'http://some/avatar/url',
'url': 'http://gitlab.com/jzelinskie',
'username': 'jzelinskie'
'url': 'http://gitlab.com/jsmith',
'username': 'jsmith'
},
'date': '2015-08-13T19:33:18+00:00',
'message': 'Fix link\n',
@ -413,8 +413,8 @@ def test_gitlab_webhook_for_tag():
def lookup_user(_):
return {
'username': 'jzelinskie',
'html_url': 'http://gitlab.com/jzelinskie',
'username': 'jsmith',
'html_url': 'http://gitlab.com/jsmith',
'avatar_url': 'http://some/avatar/url',
}
@ -436,20 +436,72 @@ def test_gitlab_webhook_for_tag():
def test_gitlab_webhook_for_tag_nocommit():
assertSkipped('gitlab_webhook_tag', gl_webhook)
def test_gitlab_webhook_for_tag_commit_sha_null():
assertSkipped('gitlab_webhook_tag_commit_sha_null', gl_webhook)
def test_gitlab_webhook_for_tag_known_issue():
expected = {
'commit': u'82b3d5ae55f7080f1e6022629cdb57bfae7cccc7',
'git_url': u'git@example.com:jsmith/example.git',
'ref': u'refs/tags/v1.0.0',
'commit': u'770830e7ca132856991e6db4f7fc0f4dbe20bd5f',
'ref': u'refs/tags/thirdtag',
'git_url': u'git@gitlab.com:someuser/some-test-project.git',
'commit_info': {
'url': u'https://gitlab.com/someuser/some-test-project/commit/770830e7ca132856991e6db4f7fc0f4dbe20bd5f',
'date': u'2019-10-17T18:07:48Z',
'message': u'Update Dockerfile',
'author': {
'username': 'someuser',
'url': 'http://gitlab.com/someuser',
'avatar_url': 'http://some/avatar/url',
},
},
}
def lookup_user(_):
return {
'username': 'jzelinskie',
'html_url': 'http://gitlab.com/jzelinskie',
'username': 'someuser',
'html_url': 'http://gitlab.com/someuser',
'avatar_url': 'http://some/avatar/url',
}
assertSchema('gitlab_webhook_tag', expected, gl_webhook, lookup_user=lookup_user)
assertSchema('gitlab_webhook_tag_commit_issue', expected, gl_webhook, lookup_user=lookup_user)
def test_gitlab_webhook_payload_known_issue():
expected = {
'commit': u'770830e7ca132856991e6db4f7fc0f4dbe20bd5f',
'ref': u'refs/tags/fourthtag',
'git_url': u'git@gitlab.com:someuser/some-test-project.git',
'commit_info': {
'url': u'https://gitlab.com/someuser/some-test-project/commit/770830e7ca132856991e6db4f7fc0f4dbe20bd5f',
'date': u'2019-10-17T18:07:48Z',
'message': u'Update Dockerfile',
},
}
def lookup_commit(repo_id, commit_sha):
if commit_sha == '770830e7ca132856991e6db4f7fc0f4dbe20bd5f':
return {
"added": [],
"author": {
"name": "Some User",
"email": "someuser@somedomain.com"
},
"url": "https://gitlab.com/someuser/some-test-project/commit/770830e7ca132856991e6db4f7fc0f4dbe20bd5f",
"message": "Update Dockerfile",
"removed": [],
"modified": [
"Dockerfile"
],
"id": "770830e7ca132856991e6db4f7fc0f4dbe20bd5f"
}
return None
assertSchema('gitlab_webhook_known_issue', expected, gl_webhook, lookup_commit=lookup_commit)
def test_gitlab_webhook_for_other():
@ -460,14 +512,14 @@ def test_gitlab_webhook_payload_with_lookup():
expected = {
'commit': u'fb88379ee45de28a0a4590fddcbd8eff8b36026e',
'ref': u'refs/heads/master',
'git_url': u'git@gitlab.com:jzelinskie/www-gitlab-com.git',
'git_url': u'git@gitlab.com:jsmith/somerepo.git',
'commit_info': {
'url': u'https://gitlab.com/jzelinskie/www-gitlab-com/commit/fb88379ee45de28a0a4590fddcbd8eff8b36026e',
'url': u'https://gitlab.com/jsmith/somerepo/commit/fb88379ee45de28a0a4590fddcbd8eff8b36026e',
'date': u'2015-08-13T19:33:18+00:00',
'message': u'Fix link\n',
'author': {
'username': 'jzelinskie',
'url': 'http://gitlab.com/jzelinskie',
'username': 'jsmith',
'url': 'http://gitlab.com/jsmith',
'avatar_url': 'http://some/avatar/url',
},
},
@ -475,8 +527,8 @@ def test_gitlab_webhook_payload_with_lookup():
def lookup_user(_):
return {
'username': 'jzelinskie',
'html_url': 'http://gitlab.com/jzelinskie',
'username': 'jsmith',
'html_url': 'http://gitlab.com/jsmith',
'avatar_url': 'http://some/avatar/url',
}
@ -488,18 +540,19 @@ def test_github_webhook_payload_deleted_commit():
'commit': u'456806b662cb903a0febbaed8344f3ed42f27bab',
'commit_info': {
'author': {
'username': u'jakedt'
'username': u'jsmith'
},
'committer': {
'username': u'jakedt'
'username': u'jsmith'
},
'date': u'2015-12-08T18:07:03-05:00',
'message': (u'Merge pull request #1044 from jakedt/errerror\n\n' +
'message': (u'Merge pull request #1044 from jsmith/errerror\n\n' +
'Assign the exception to a variable to log it'),
'url': u'https://github.com/coreos-inc/quay/commit/456806b662cb903a0febbaed8344f3ed42f27bab'
'url': u'https://github.com/jsmith/somerepo/commit/456806b662cb903a0febbaed8344f3ed42f27bab'
},
'git_url': u'git@github.com:coreos-inc/quay.git',
'git_url': u'git@github.com:jsmith/somerepo.git',
'ref': u'refs/heads/master',
'default_branch': u'master',
}
def lookup_user(_):

View File

@ -9,9 +9,9 @@
"file": "Readme"
}
],
"raw_author": "Mary Anthony <manthony@172-28-13-105.staff.sf.atlassian.com>",
"raw_author": "Mark Anthony <manthony@example.com>",
"utctimestamp": "2012-07-23 22:26:36+00:00",
"author": "Mary Anthony",
"author": "Mark Anthony",
"timestamp": "2012-07-24 00:26:36",
"node": "abdeaf1b2b4a6b9ddf742c1e1754236380435a62",
"parents": [

View File

@ -1,55 +1,43 @@
{
"actor": {
"username": "LightSide_CodeShip",
"account_id": "SomeCoolLabs_CodeShip",
"links": {
"self": {
"href": "https://api.bitbucket.org/2.0/users/LightSide_CodeShip"
"href": "https://api.bitbucket.org/2.0/users/SomeCoolLabs_CodeShip"
},
"avatar": {
"href": "https://bitbucket.org/account/LightSide_CodeShip/avatar/32/"
},
"html": {
"href": "https://bitbucket.org/LightSide_CodeShip/"
"href": "https://bitbucket.org/account/SomeCoolLabs_CodeShip/avatar/32/"
}
},
"uuid": "{d009ab20-b8b8-4840-9491-bfe72fbf666e}",
"type": "user",
"display_name": "CodeShip Tagging"
},
"repository": {
"full_name": "lightsidelabs/svc-identity",
"full_name": "somecoollabs/svc-identity",
"name": "svc-identity",
"scm": "git",
"type": "repository",
"links": {
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity"
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity"
},
"avatar": {
"href": "https://bitbucket.org/lightsidelabs/svc-identity/avatar/16/"
},
"html": {
"href": "https://bitbucket.org/lightsidelabs/svc-identity"
"href": "https://bitbucket.org/somecoollabs/svc-identity/avatar/16/"
}
},
"is_private": true,
"uuid": "{3400bed9-5cde-45b9-8d86-c1dac5d5e610}",
"owner": {
"username": "lightsidelabs",
"account_id": "somecoollabs",
"links": {
"self": {
"href": "https://api.bitbucket.org/2.0/teams/lightsidelabs"
"href": "https://api.bitbucket.org/2.0/teams/somecoollabs"
},
"avatar": {
"href": "https://bitbucket.org/account/lightsidelabs/avatar/32/"
},
"html": {
"href": "https://bitbucket.org/lightsidelabs/"
"href": "https://bitbucket.org/account/somecoollabs/avatar/32/"
}
},
"uuid": "{456c5f28-7338-4d89-9506-c7b889ba2d11}",
"type": "team",
"display_name": "LightSIDE Labs"
"display_name": "Some Cool Labs"
}
},
"push": {
@ -60,14 +48,14 @@
"hash": "a0ec139843b2bb281ab21a433266ddc498e605dc",
"links": {
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commit/a0ec139843b2bb281ab21a433266ddc498e605dc"
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commit/a0ec139843b2bb281ab21a433266ddc498e605dc"
},
"html": {
"href": "https://bitbucket.org/lightsidelabs/svc-identity/commits/a0ec139843b2bb281ab21a433266ddc498e605dc"
"href": "https://bitbucket.org/somecoollabs/svc-identity/commits/a0ec139843b2bb281ab21a433266ddc498e605dc"
}
},
"author": {
"raw": "scripts/autotag_version.py <utilitybelt@lightside>"
"raw": "scripts/autotag_version.py <utilitybelt@somecoollabs.com>"
},
"type": "commit",
"message": "Update version.py to 0.1.2 [skip ci]\n\n(by utilitybelt/scripts/autotag_version.py)\n"
@ -82,10 +70,10 @@
"hash": "bd749165b0c50c65c15fc4df526b8e9df26eff10",
"links": {
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commit/bd749165b0c50c65c15fc4df526b8e9df26eff10"
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commit/bd749165b0c50c65c15fc4df526b8e9df26eff10"
},
"html": {
"href": "https://bitbucket.org/lightsidelabs/svc-identity/commits/bd749165b0c50c65c15fc4df526b8e9df26eff10"
"href": "https://bitbucket.org/somecoollabs/svc-identity/commits/bd749165b0c50c65c15fc4df526b8e9df26eff10"
}
},
"type": "commit"
@ -94,10 +82,10 @@
"hash": "910b5624b74190dfaa51938d851563a4c5254926",
"links": {
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commit/910b5624b74190dfaa51938d851563a4c5254926"
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commit/910b5624b74190dfaa51938d851563a4c5254926"
},
"html": {
"href": "https://bitbucket.org/lightsidelabs/svc-identity/commits/910b5624b74190dfaa51938d851563a4c5254926"
"href": "https://bitbucket.org/somecoollabs/svc-identity/commits/910b5624b74190dfaa51938d851563a4c5254926"
}
},
"type": "commit"
@ -109,42 +97,38 @@
"hash": "263736ecc250113fad56a93f83b712093554ad42",
"links": {
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commit/263736ecc250113fad56a93f83b712093554ad42"
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commit/263736ecc250113fad56a93f83b712093554ad42"
},
"html": {
"href": "https://bitbucket.org/lightsidelabs/svc-identity/commits/263736ecc250113fad56a93f83b712093554ad42"
"href": "https://bitbucket.org/somecoollabs/svc-identity/commits/263736ecc250113fad56a93f83b712093554ad42"
}
},
"author": {
"raw": "Chris Winters <chris@cwinters.com>",
"raw": "John Smith <j@smith.com>",
"user": {
"username": "cwinters",
"account_id": "jsmith",
"links": {
"self": {
"href": "https://api.bitbucket.org/2.0/users/cwinters"
"href": "https://api.bitbucket.org/2.0/users/jsmith"
},
"avatar": {
"href": "https://bitbucket.org/account/cwinters/avatar/32/"
},
"html": {
"href": "https://bitbucket.org/cwinters/"
"href": "https://bitbucket.org/account/jsmith/avatar/32/"
}
},
"uuid": "{a6209615-6d75-4294-8181-dbf96d40fc6b}",
"type": "user",
"display_name": "Chris Winters"
"display_name": "John Smith"
}
}
},
"links": {
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/refs/branches/master"
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/refs/branches/master"
},
"commits": {
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commits/master"
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commits/master"
},
"html": {
"href": "https://bitbucket.org/lightsidelabs/svc-identity/branch/master"
"href": "https://bitbucket.org/somecoollabs/svc-identity/branch/master"
}
},
"name": "master",
@ -152,13 +136,13 @@
},
"links": {
"diff": {
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/diff/a0ec139843b2bb281ab21a433266ddc498e605dc..263736ecc250113fad56a93f83b712093554ad42"
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/diff/a0ec139843b2bb281ab21a433266ddc498e605dc..263736ecc250113fad56a93f83b712093554ad42"
},
"commits": {
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commits?include=a0ec139843b2bb281ab21a433266ddc498e605dc&exclude=263736ecc250113fad56a93f83b712093554ad42"
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commits?include=a0ec139843b2bb281ab21a433266ddc498e605dc&exclude=263736ecc250113fad56a93f83b712093554ad42"
},
"html": {
"href": "https://bitbucket.org/lightsidelabs/svc-identity/branches/compare/a0ec139843b2bb281ab21a433266ddc498e605dc..263736ecc250113fad56a93f83b712093554ad42"
"href": "https://bitbucket.org/somecoollabs/svc-identity/branches/compare/a0ec139843b2bb281ab21a433266ddc498e605dc..263736ecc250113fad56a93f83b712093554ad42"
}
},
"new": {
@ -168,10 +152,10 @@
"hash": "263736ecc250113fad56a93f83b712093554ad42",
"links": {
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commit/263736ecc250113fad56a93f83b712093554ad42"
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commit/263736ecc250113fad56a93f83b712093554ad42"
},
"html": {
"href": "https://bitbucket.org/lightsidelabs/svc-identity/commits/263736ecc250113fad56a93f83b712093554ad42"
"href": "https://bitbucket.org/somecoollabs/svc-identity/commits/263736ecc250113fad56a93f83b712093554ad42"
}
},
"type": "commit"
@ -183,25 +167,25 @@
"hash": "a0ec139843b2bb281ab21a433266ddc498e605dc",
"links": {
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commit/a0ec139843b2bb281ab21a433266ddc498e605dc"
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commit/a0ec139843b2bb281ab21a433266ddc498e605dc"
},
"html": {
"href": "https://bitbucket.org/lightsidelabs/svc-identity/commits/a0ec139843b2bb281ab21a433266ddc498e605dc"
"href": "https://bitbucket.org/somecoollabs/svc-identity/commits/a0ec139843b2bb281ab21a433266ddc498e605dc"
}
},
"author": {
"raw": "scripts/autotag_version.py <utilitybelt@lightside>"
"raw": "scripts/autotag_version.py <utilitybelt@somecoollabs.com>"
}
},
"links": {
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/refs/branches/master"
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/refs/branches/master"
},
"commits": {
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commits/master"
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commits/master"
},
"html": {
"href": "https://bitbucket.org/lightsidelabs/svc-identity/branch/master"
"href": "https://bitbucket.org/somecoollabs/svc-identity/branch/master"
}
},
"name": "master",

View File

@ -4,7 +4,7 @@
{
"links": {
"commits": {
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commits?include=a0ec139843b2bb281ab21a433266ddc498e605dc"
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commits?include=a0ec139843b2bb281ab21a433266ddc498e605dc"
}
},
"closed": false,
@ -13,10 +13,10 @@
"date": "2015-09-25T00:55:08+00:00",
"links": {
"html": {
"href": "https://bitbucket.org/lightsidelabs/svc-identity/commits/a0ec139843b2bb281ab21a433266ddc498e605dc"
"href": "https://bitbucket.org/somecoollabs/svc-identity/commits/a0ec139843b2bb281ab21a433266ddc498e605dc"
},
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commit/a0ec139843b2bb281ab21a433266ddc498e605dc"
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commit/a0ec139843b2bb281ab21a433266ddc498e605dc"
}
},
"message": "Update version.py to 0.1.2 [skip ci]\n\n(by utilitybelt/scripts/autotag_version.py)\n",
@ -25,10 +25,10 @@
{
"links": {
"html": {
"href": "https://bitbucket.org/lightsidelabs/svc-identity/commits/263736ecc250113fad56a93f83b712093554ad42"
"href": "https://bitbucket.org/somecoollabs/svc-identity/commits/263736ecc250113fad56a93f83b712093554ad42"
},
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commit/263736ecc250113fad56a93f83b712093554ad42"
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commit/263736ecc250113fad56a93f83b712093554ad42"
}
},
"hash": "263736ecc250113fad56a93f83b712093554ad42",
@ -37,19 +37,19 @@
],
"hash": "a0ec139843b2bb281ab21a433266ddc498e605dc",
"author": {
"raw": "scripts/autotag_version.py <utilitybelt@lightside>"
"raw": "scripts/autotag_version.py <utilitybelt@somecoollabs.com>"
}
},
"name": "0.1.2",
"links": {
"html": {
"href": "https://bitbucket.org/lightsidelabs/svc-identity/commits/tag/0.1.2"
"href": "https://bitbucket.org/somecoollabs/svc-identity/commits/tag/0.1.2"
},
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/refs/tags/0.1.2"
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/refs/tags/0.1.2"
},
"commits": {
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commits/0.1.2"
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commits/0.1.2"
}
},
"type": "tag"
@ -65,53 +65,44 @@
"name": "svc-identity",
"links": {
"html": {
"href": "https://bitbucket.org/lightsidelabs/svc-identity"
"href": "https://bitbucket.org/somecoollabs/svc-identity"
},
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity"
"href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity"
},
"avatar": {
"href": "https://bitbucket.org/lightsidelabs/svc-identity/avatar/16/"
"href": "https://bitbucket.org/somecoollabs/svc-identity/avatar/16/"
}
},
"is_private": true,
"type": "repository",
"scm": "git",
"owner": {
"username": "lightsidelabs",
"account_id": "somecoollabs",
"links": {
"html": {
"href": "https://bitbucket.org/lightsidelabs/"
},
"self": {
"href": "https://api.bitbucket.org/2.0/teams/lightsidelabs"
"href": "https://api.bitbucket.org/2.0/teams/somecoollabs"
},
"avatar": {
"href": "https://bitbucket.org/account/lightsidelabs/avatar/32/"
"href": "https://bitbucket.org/account/somecoollabs/avatar/32/"
}
},
"display_name": "LightSIDE Labs",
"uuid": "{456c5f28-7338-4d89-9506-c7b889ba2d11}",
"display_name": "Some Cool Labs",
"type": "team"
},
"full_name": "lightsidelabs/svc-identity",
"uuid": "{3400bed9-5cde-45b9-8d86-c1dac5d5e610}"
"full_name": "somecoollabs/svc-identity"
},
"actor": {
"username": "LightSide_CodeShip",
"account_id": "SomeCoolLabs_CodeShip",
"links": {
"html": {
"href": "https://bitbucket.org/LightSide_CodeShip/"
},
"self": {
"href": "https://api.bitbucket.org/2.0/users/LightSide_CodeShip"
"href": "https://api.bitbucket.org/2.0/users/SomeCoolLabs_CodeShip"
},
"avatar": {
"href": "https://bitbucket.org/account/LightSide_CodeShip/avatar/32/"
"href": "https://bitbucket.org/account/SomeCoolLabs_CodeShip/avatar/32/"
}
},
"display_name": "CodeShip Tagging",
"uuid": "{d009ab20-b8b8-4840-9491-bfe72fbf666e}",
"type": "user"
}
}

View File

@ -5,19 +5,15 @@
]
},
"actor": {
"username": "SL_jwilds",
"display_name": "Jim Wilds",
"account_id": "jsmith",
"display_name": "John Smith",
"type": "user",
"uuid": "{61f851e9-afbc-4bc6-bb9b-99e23c0e765a}",
"links": {
"self": {
"href": "https:\/\/api.bitbucket.org\/2.0\/users\/SL_jwilds"
},
"html": {
"href": "https:\/\/bitbucket.org\/SL_jwilds\/"
"href": "https:\/\/api.bitbucket.org\/2.0\/users\/jsmith"
},
"avatar": {
"href": "https:\/\/bitbucket.org\/account\/SL_jwilds\/avatar\/32\/"
"href": "https:\/\/bitbucket.org\/account\/jsmith\/avatar\/32\/"
}
}
},
@ -27,52 +23,46 @@
"name": "slip-api",
"links": {
"self": {
"href": "https:\/\/api.bitbucket.org\/2.0\/repositories\/silverlinkinc\/slip-api"
"href": "https:\/\/api.bitbucket.org\/2.0\/repositories\/goldcuff\/slip-api"
},
"html": {
"href": "https:\/\/bitbucket.org\/silverlinkinc\/slip-api"
"href": "https:\/\/bitbucket.org\/goldcuff\/slip-api"
},
"avatar": {
"href": "https:\/\/bitbucket.org\/silverlinkinc\/slip-api\/avatar\/32\/"
"href": "https:\/\/bitbucket.org\/goldcuff\/slip-api\/avatar\/32\/"
}
},
"project": {
"links": {
"self": {
"href": "https:\/\/api.bitbucket.org\/2.0\/teams\/silverlinkinc\/projects\/SLIP"
"href": "https:\/\/api.bitbucket.org\/2.0\/teams\/goldcuff\/projects\/SLIP"
},
"html": {
"href": "https:\/\/bitbucket.org\/account\/user\/silverlinkinc\/projects\/SLIP"
"href": "https:\/\/bitbucket.org\/account\/user\/goldcuff\/projects\/SLIP"
},
"avatar": {
"href": "https:\/\/bitbucket.org\/account\/user\/silverlinkinc\/projects\/SLIP\/avatar\/32"
"href": "https:\/\/bitbucket.org\/account\/user\/goldcuff\/projects\/SLIP\/avatar\/32"
}
},
"type": "project",
"name": "SLIP",
"key": "SLIP",
"uuid": "{f5ba67c5-3585-453b-9412-77e4dc15be29}"
"key": "SLIP"
},
"full_name": "silverlinkinc\/slip-api",
"full_name": "goldcuff\/slip-api",
"owner": {
"username": "silverlinkinc",
"display_name": "Silverlink",
"account_id": "goldcuff",
"display_name": "Goldcuff",
"type": "team",
"uuid": "{9c4ce5f2-79fe-4906-9451-41fcac6bb293}",
"links": {
"self": {
"href": "https:\/\/api.bitbucket.org\/2.0\/teams\/silverlinkinc"
},
"html": {
"href": "https:\/\/bitbucket.org\/silverlinkinc\/"
"href": "https:\/\/api.bitbucket.org\/2.0\/teams\/goldcuff"
},
"avatar": {
"href": "https:\/\/bitbucket.org\/account\/silverlinkinc\/avatar\/32\/"
"href": "https:\/\/bitbucket.org\/account\/goldcuff\/avatar\/32\/"
}
}
},
"type": "repository",
"is_private": true,
"uuid": "{59183493-0e4a-47aa-b069-be60adce4092}"
"is_private": true
}
}

View File

@ -4,67 +4,66 @@
{
"links": {
"commits": {
"href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo/commits?include=af64ae7188685f8424040b4735ad12941b980d75&exclude=1784139225279a587e0afb151bed1f9ba3dd509e"
"href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo/commits?include=af64ae7188685f8424040b4735ad12941b980d75&exclude=1784139225279a587e0afb151bed1f9ba3dd509e"
},
"diff": {
"href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo/diff/af64ae7188685f8424040b4735ad12941b980d75..1784139225279a587e0afb151bed1f9ba3dd509e"
"href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo/diff/af64ae7188685f8424040b4735ad12941b980d75..1784139225279a587e0afb151bed1f9ba3dd509e"
},
"html": {
"href": "https://bitbucket.org/jscoreos/another-repo/branches/compare/af64ae7188685f8424040b4735ad12941b980d75..1784139225279a587e0afb151bed1f9ba3dd509e"
"href": "https://bitbucket.org/jsmith/another-repo/branches/compare/af64ae7188685f8424040b4735ad12941b980d75..1784139225279a587e0afb151bed1f9ba3dd509e"
}
},
"old": {
"name": "master",
"links": {
"commits": {
"href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo/commits/master"
"href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo/commits/master"
},
"html": {
"href": "https://bitbucket.org/jscoreos/another-repo/branch/master"
"href": "https://bitbucket.org/jsmith/another-repo/branch/master"
},
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo/refs/branches/master"
"href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo/refs/branches/master"
}
},
"type": "branch",
"target": {
"links": {
"html": {
"href": "https://bitbucket.org/jscoreos/another-repo/commits/1784139225279a587e0afb151bed1f9ba3dd509e"
"href": "https://bitbucket.org/jsmith/another-repo/commits/1784139225279a587e0afb151bed1f9ba3dd509e"
},
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo/commit/1784139225279a587e0afb151bed1f9ba3dd509e"
"href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo/commit/1784139225279a587e0afb151bed1f9ba3dd509e"
}
},
"author": {
"user": {
"links": {
"avatar": {
"href": "https://bitbucket.org/account/jscoreos/avatar/32/"
"href": "https://bitbucket.org/account/jsmith/avatar/32/"
},
"html": {
"href": "https://bitbucket.org/jscoreos/"
"href": "https://bitbucket.org/jsmith/"
},
"self": {
"href": "https://api.bitbucket.org/2.0/users/jscoreos"
"href": "https://api.bitbucket.org/2.0/users/jsmith"
}
},
"uuid": "{2fa27577-f361-45bb-999a-f4450c546b73}",
"type": "user",
"display_name": "Joseph Schorr",
"username": "jscoreos"
"display_name": "John Smith",
"account_id": "jsmith"
},
"raw": "Joseph Schorr <joseph.schorr@coreos.com>"
"raw": "John Smith <j@smith.com>"
},
"date": "2015-09-10T20:37:54+00:00",
"parents": [
{
"links": {
"html": {
"href": "https://bitbucket.org/jscoreos/another-repo/commits/5329daa0961ec968de9ef36f30024bfa0da73103"
"href": "https://bitbucket.org/jsmith/another-repo/commits/5329daa0961ec968de9ef36f30024bfa0da73103"
},
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo/commit/5329daa0961ec968de9ef36f30024bfa0da73103"
"href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo/commit/5329daa0961ec968de9ef36f30024bfa0da73103"
}
},
"type": "commit",
@ -84,28 +83,24 @@
"user": {
"links": {
"avatar": {
"href": "https://bitbucket.org/account/jscoreos/avatar/32/"
},
"html": {
"href": "https://bitbucket.org/jscoreos/"
"href": "https://bitbucket.org/account/jsmith/avatar/32/"
},
"self": {
"href": "https://api.bitbucket.org/2.0/users/jscoreos"
"href": "https://api.bitbucket.org/2.0/users/jsmith"
}
},
"uuid": "{2fa27577-f361-45bb-999a-f4450c546b73}",
"type": "user",
"display_name": "Joseph Schorr",
"username": "jscoreos"
"display_name": "John Smith",
"account_id": "jsmith"
},
"raw": "Joseph Schorr <joseph.schorr@coreos.com>"
"raw": "John Smith <j@smith.com>"
},
"links": {
"html": {
"href": "https://bitbucket.org/jscoreos/another-repo/commits/af64ae7188685f8424040b4735ad12941b980d75"
"href": "https://bitbucket.org/jsmith/another-repo/commits/af64ae7188685f8424040b4735ad12941b980d75"
},
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo/commit/af64ae7188685f8424040b4735ad12941b980d75"
"href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo/commit/af64ae7188685f8424040b4735ad12941b980d75"
}
},
"message": "Dockerfile edited online with Bitbucket",
@ -117,54 +112,50 @@
"name": "master",
"links": {
"commits": {
"href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo/commits/master"
"href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo/commits/master"
},
"html": {
"href": "https://bitbucket.org/jscoreos/another-repo/branch/master"
"href": "https://bitbucket.org/jsmith/another-repo/branch/master"
},
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo/refs/branches/master"
"href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo/refs/branches/master"
}
},
"type": "branch",
"target": {
"links": {
"html": {
"href": "https://bitbucket.org/jscoreos/another-repo/commits/af64ae7188685f8424040b4735ad12941b980d75"
"href": "https://bitbucket.org/jsmith/another-repo/commits/af64ae7188685f8424040b4735ad12941b980d75"
},
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo/commit/af64ae7188685f8424040b4735ad12941b980d75"
"href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo/commit/af64ae7188685f8424040b4735ad12941b980d75"
}
},
"author": {
"user": {
"links": {
"avatar": {
"href": "https://bitbucket.org/account/jscoreos/avatar/32/"
},
"html": {
"href": "https://bitbucket.org/jscoreos/"
"href": "https://bitbucket.org/account/jsmith/avatar/32/"
},
"self": {
"href": "https://api.bitbucket.org/2.0/users/jscoreos"
"href": "https://api.bitbucket.org/2.0/users/jsmith"
}
},
"uuid": "{2fa27577-f361-45bb-999a-f4450c546b73}",
"type": "user",
"display_name": "Joseph Schorr",
"username": "jscoreos"
"display_name": "John Smith",
"account_id": "jsmith"
},
"raw": "Joseph Schorr <joseph.schorr@coreos.com>"
"raw": "John Smith <j@smith.com>"
},
"date": "2015-09-10T20:40:54+00:00",
"parents": [
{
"links": {
"html": {
"href": "https://bitbucket.org/jscoreos/another-repo/commits/1784139225279a587e0afb151bed1f9ba3dd509e"
"href": "https://bitbucket.org/jsmith/another-repo/commits/1784139225279a587e0afb151bed1f9ba3dd509e"
},
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo/commit/1784139225279a587e0afb151bed1f9ba3dd509e"
"href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo/commit/1784139225279a587e0afb151bed1f9ba3dd509e"
}
},
"type": "commit",
@ -184,54 +175,45 @@
"repository": {
"links": {
"avatar": {
"href": "https://bitbucket.org/jscoreos/another-repo/avatar/16/"
"href": "https://bitbucket.org/jsmith/another-repo/avatar/16/"
},
"html": {
"href": "https://bitbucket.org/jscoreos/another-repo"
"href": "https://bitbucket.org/jsmith/another-repo"
},
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo"
"href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo"
}
},
"full_name": "jscoreos/another-repo",
"uuid": "{b3459203-3e58-497b-8059-ad087b6b01de}",
"full_name": "jsmith/another-repo",
"type": "repository",
"is_private": true,
"name": "Another Repo",
"owner": {
"links": {
"avatar": {
"href": "https://bitbucket.org/account/jscoreos/avatar/32/"
},
"html": {
"href": "https://bitbucket.org/jscoreos/"
"href": "https://bitbucket.org/account/jsmith/avatar/32/"
},
"self": {
"href": "https://api.bitbucket.org/2.0/users/jscoreos"
"href": "https://api.bitbucket.org/2.0/users/jsmith"
}
},
"uuid": "{2fa27577-f361-45bb-999a-f4450c546b73}",
"type": "user",
"display_name": "Joseph Schorr",
"username": "jscoreos"
"display_name": "John Smith",
"account_id": "jsmith"
},
"scm": "git"
},
"actor": {
"links": {
"avatar": {
"href": "https://bitbucket.org/account/jscoreos/avatar/32/"
},
"html": {
"href": "https://bitbucket.org/jscoreos/"
"href": "https://bitbucket.org/account/jsmith/avatar/32/"
},
"self": {
"href": "https://api.bitbucket.org/2.0/users/jscoreos"
"href": "https://api.bitbucket.org/2.0/users/jsmith"
}
},
"uuid": "{2fa27577-f361-45bb-999a-f4450c546b73}",
"type": "user",
"display_name": "Joseph Schorr",
"username": "jscoreos"
"display_name": "John Smith",
"account_id": "jsmith"
}
}

View File

@ -6,23 +6,23 @@
"deleted": false,
"forced": false,
"base_ref": null,
"compare": "https://github.com/josephschorr/anothertest/compare/9ea43cab4747...410f4cdf8ff0",
"compare": "https://github.com/jsmith/anothertest/compare/9ea43cab4747...410f4cdf8ff0",
"commits": [
{
"id": "410f4cdf8ff09b87f245b13845e8497f90b90a4c",
"distinct": true,
"message": "Update Dockerfile",
"timestamp": "2015-09-11T14:26:16-04:00",
"url": "https://github.com/josephschorr/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c",
"url": "https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c",
"author": {
"name": "josephschorr",
"email": "josephschorr@users.noreply.github.com",
"username": "josephschorr"
"name": "jsmith",
"email": "jsmith@users.noreply.github.com",
"username": "jsmith"
},
"committer": {
"name": "josephschorr",
"email": "josephschorr@users.noreply.github.com",
"username": "josephschorr"
"name": "jsmith",
"email": "jsmith@users.noreply.github.com",
"username": "jsmith"
},
"added": [],
"removed": [],
@ -36,16 +36,16 @@
"distinct": true,
"message": "Update Dockerfile",
"timestamp": "2015-09-11T14:26:16-04:00",
"url": "https://github.com/josephschorr/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c",
"url": "https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c",
"author": {
"name": "josephschorr",
"email": "josephschorr@users.noreply.github.com",
"username": "josephschorr"
"name": "jsmith",
"email": "jsmith@users.noreply.github.com",
"username": "jsmith"
},
"committer": {
"name": "josephschorr",
"email": "josephschorr@users.noreply.github.com",
"username": "josephschorr"
"name": "jsmith",
"email": "jsmith@users.noreply.github.com",
"username": "jsmith"
},
"added": [],
"removed": [],
@ -54,60 +54,60 @@
]
},
"repository": {
"id": 34876107,
"id": 1234567,
"name": "anothertest",
"full_name": "josephschorr/anothertest",
"full_name": "jsmith/anothertest",
"owner": {
"name": "josephschorr",
"email": "josephschorr@users.noreply.github.com"
"name": "jsmith",
"email": "jsmith@users.noreply.github.com"
},
"private": false,
"html_url": "https://github.com/josephschorr/anothertest",
"html_url": "https://github.com/jsmith/anothertest",
"description": "",
"fork": false,
"url": "https://github.com/josephschorr/anothertest",
"forks_url": "https://api.github.com/repos/josephschorr/anothertest/forks",
"keys_url": "https://api.github.com/repos/josephschorr/anothertest/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/josephschorr/anothertest/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/josephschorr/anothertest/teams",
"hooks_url": "https://api.github.com/repos/josephschorr/anothertest/hooks",
"issue_events_url": "https://api.github.com/repos/josephschorr/anothertest/issues/events{/number}",
"events_url": "https://api.github.com/repos/josephschorr/anothertest/events",
"assignees_url": "https://api.github.com/repos/josephschorr/anothertest/assignees{/user}",
"branches_url": "https://api.github.com/repos/josephschorr/anothertest/branches{/branch}",
"tags_url": "https://api.github.com/repos/josephschorr/anothertest/tags",
"blobs_url": "https://api.github.com/repos/josephschorr/anothertest/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/josephschorr/anothertest/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/josephschorr/anothertest/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/josephschorr/anothertest/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/josephschorr/anothertest/statuses/{sha}",
"languages_url": "https://api.github.com/repos/josephschorr/anothertest/languages",
"stargazers_url": "https://api.github.com/repos/josephschorr/anothertest/stargazers",
"contributors_url": "https://api.github.com/repos/josephschorr/anothertest/contributors",
"subscribers_url": "https://api.github.com/repos/josephschorr/anothertest/subscribers",
"subscription_url": "https://api.github.com/repos/josephschorr/anothertest/subscription",
"commits_url": "https://api.github.com/repos/josephschorr/anothertest/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/josephschorr/anothertest/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/josephschorr/anothertest/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/josephschorr/anothertest/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/josephschorr/anothertest/contents/{+path}",
"compare_url": "https://api.github.com/repos/josephschorr/anothertest/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/josephschorr/anothertest/merges",
"archive_url": "https://api.github.com/repos/josephschorr/anothertest/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/josephschorr/anothertest/downloads",
"issues_url": "https://api.github.com/repos/josephschorr/anothertest/issues{/number}",
"pulls_url": "https://api.github.com/repos/josephschorr/anothertest/pulls{/number}",
"milestones_url": "https://api.github.com/repos/josephschorr/anothertest/milestones{/number}",
"notifications_url": "https://api.github.com/repos/josephschorr/anothertest/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/josephschorr/anothertest/labels{/name}",
"releases_url": "https://api.github.com/repos/josephschorr/anothertest/releases{/id}",
"url": "https://github.com/jsmith/anothertest",
"forks_url": "https://api.github.com/repos/jsmith/anothertest/forks",
"keys_url": "https://api.github.com/repos/jsmith/anothertest/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/jsmith/anothertest/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/jsmith/anothertest/teams",
"hooks_url": "https://api.github.com/repos/jsmith/anothertest/hooks",
"issue_events_url": "https://api.github.com/repos/jsmith/anothertest/issues/events{/number}",
"events_url": "https://api.github.com/repos/jsmith/anothertest/events",
"assignees_url": "https://api.github.com/repos/jsmith/anothertest/assignees{/user}",
"branches_url": "https://api.github.com/repos/jsmith/anothertest/branches{/branch}",
"tags_url": "https://api.github.com/repos/jsmith/anothertest/tags",
"blobs_url": "https://api.github.com/repos/jsmith/anothertest/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/jsmith/anothertest/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/jsmith/anothertest/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/jsmith/anothertest/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/jsmith/anothertest/statuses/{sha}",
"languages_url": "https://api.github.com/repos/jsmith/anothertest/languages",
"stargazers_url": "https://api.github.com/repos/jsmith/anothertest/stargazers",
"contributors_url": "https://api.github.com/repos/jsmith/anothertest/contributors",
"subscribers_url": "https://api.github.com/repos/jsmith/anothertest/subscribers",
"subscription_url": "https://api.github.com/repos/jsmith/anothertest/subscription",
"commits_url": "https://api.github.com/repos/jsmith/anothertest/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/jsmith/anothertest/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/jsmith/anothertest/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/jsmith/anothertest/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/jsmith/anothertest/contents/{+path}",
"compare_url": "https://api.github.com/repos/jsmith/anothertest/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/jsmith/anothertest/merges",
"archive_url": "https://api.github.com/repos/jsmith/anothertest/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/jsmith/anothertest/downloads",
"issues_url": "https://api.github.com/repos/jsmith/anothertest/issues{/number}",
"pulls_url": "https://api.github.com/repos/jsmith/anothertest/pulls{/number}",
"milestones_url": "https://api.github.com/repos/jsmith/anothertest/milestones{/number}",
"notifications_url": "https://api.github.com/repos/jsmith/anothertest/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/jsmith/anothertest/labels{/name}",
"releases_url": "https://api.github.com/repos/jsmith/anothertest/releases{/id}",
"created_at": 1430426945,
"updated_at": "2015-04-30T20:49:05Z",
"pushed_at": 1441995976,
"git_url": "git://github.com/josephschorr/anothertest.git",
"ssh_url": "git@github.com:josephschorr/anothertest.git",
"clone_url": "https://github.com/josephschorr/anothertest.git",
"svn_url": "https://github.com/josephschorr/anothertest",
"git_url": "git://github.com/jsmith/anothertest.git",
"ssh_url": "git@github.com:jsmith/anothertest.git",
"clone_url": "https://github.com/jsmith/anothertest.git",
"svn_url": "https://github.com/jsmith/anothertest",
"homepage": null,
"size": 144,
"stargazers_count": 0,
@ -128,25 +128,25 @@
"master_branch": "master"
},
"pusher": {
"name": "josephschorr",
"email": "josephschorr@users.noreply.github.com"
"name": "jsmith",
"email": "jsmith@users.noreply.github.com"
},
"sender": {
"login": "josephschorr",
"id": 4073002,
"avatar_url": "https://avatars.githubusercontent.com/u/4073002?v=3",
"login": "jsmith",
"id": 1234567,
"avatar_url": "https://avatars.githubusercontent.com/u/1234567?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/josephschorr",
"html_url": "https://github.com/josephschorr",
"followers_url": "https://api.github.com/users/josephschorr/followers",
"following_url": "https://api.github.com/users/josephschorr/following{/other_user}",
"gists_url": "https://api.github.com/users/josephschorr/gists{/gist_id}",
"starred_url": "https://api.github.com/users/josephschorr/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/josephschorr/subscriptions",
"organizations_url": "https://api.github.com/users/josephschorr/orgs",
"repos_url": "https://api.github.com/users/josephschorr/repos",
"events_url": "https://api.github.com/users/josephschorr/events{/privacy}",
"received_events_url": "https://api.github.com/users/josephschorr/received_events",
"url": "https://api.github.com/users/jsmith",
"html_url": "https://github.com/jsmith",
"followers_url": "https://api.github.com/users/jsmith/followers",
"following_url": "https://api.github.com/users/jsmith/following{/other_user}",
"gists_url": "https://api.github.com/users/jsmith/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jsmith/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jsmith/subscriptions",
"organizations_url": "https://api.github.com/users/jsmith/orgs",
"repos_url": "https://api.github.com/users/jsmith/repos",
"events_url": "https://api.github.com/users/jsmith/events{/privacy}",
"received_events_url": "https://api.github.com/users/jsmith/received_events",
"type": "User",
"site_admin": false
}

View File

@ -6,23 +6,23 @@
"deleted": false,
"forced": false,
"base_ref": null,
"compare": "https://github.com/coreos-inc/quay/compare/c7fa613b99d5...456806b662cb",
"compare": "https://github.com/jsmith/somerepo/compare/c7fa613b99d5...456806b662cb",
"commits": [
{
"id": "e00365b225ad7f454982e9198756cc1ab5dc4428",
"distinct": true,
"message": "Assign the exception to a variable to log it",
"timestamp": "2015-12-08T18:03:48-05:00",
"url": "https://github.com/coreos-inc/quay/commit/e00365b225ad7f454982e9198756cc1ab5dc4428",
"url": "https://github.com/jsmith/somerepo/commit/e00365b225ad7f454982e9198756cc1ab5dc4428",
"author": {
"name": "Jake Moshenko",
"email": "jake.moshenko@coreos.com",
"username": "jakedt"
"name": "John Smith",
"email": "j@smith.com",
"username": "jsmith"
},
"committer": {
"name": "Jake Moshenko",
"email": "jake.moshenko@coreos.com",
"username": "jakedt"
"name": "John Smith",
"email": "j@smith.com",
"username": "jsmith"
},
"added": [
@ -37,18 +37,18 @@
{
"id": "456806b662cb903a0febbaed8344f3ed42f27bab",
"distinct": true,
"message": "Merge pull request #1044 from jakedt/errerror\n\nAssign the exception to a variable to log it",
"message": "Merge pull request #1044 from jsmith/errerror\n\nAssign the exception to a variable to log it",
"timestamp": "2015-12-08T18:07:03-05:00",
"url": "https://github.com/coreos-inc/quay/commit/456806b662cb903a0febbaed8344f3ed42f27bab",
"url": "https://github.com/jsmith/somerepo/commit/456806b662cb903a0febbaed8344f3ed42f27bab",
"author": {
"name": "Jake Moshenko",
"email": "jake.moshenko@coreos.com",
"username": "jakedt"
"name": "John Smith",
"email": "j@smith.com",
"username": "jsmith"
},
"committer": {
"name": "Jake Moshenko",
"email": "jake.moshenko@coreos.com",
"username": "jakedt"
"name": "John Smith",
"email": "j@smith.com",
"username": "jsmith"
},
"added": [
@ -64,18 +64,18 @@
"head_commit": {
"id": "456806b662cb903a0febbaed8344f3ed42f27bab",
"distinct": true,
"message": "Merge pull request #1044 from jakedt/errerror\n\nAssign the exception to a variable to log it",
"message": "Merge pull request #1044 from jsmith/errerror\n\nAssign the exception to a variable to log it",
"timestamp": "2015-12-08T18:07:03-05:00",
"url": "https://github.com/coreos-inc/quay/commit/456806b662cb903a0febbaed8344f3ed42f27bab",
"url": "https://github.com/jsmith/somerepo/commit/456806b662cb903a0febbaed8344f3ed42f27bab",
"author": {
"name": "Jake Moshenko",
"email": "jake.moshenko@coreos.com",
"username": "jakedt"
"name": "John Smith",
"email": "j@smith.com",
"username": "jsmith"
},
"committer": {
"name": "Jake Moshenko",
"email": "jake.moshenko@coreos.com",
"username": "jakedt"
"name": "John Smith",
"email": "j@smith.com",
"username": "jsmith"
},
"added": [
@ -88,60 +88,60 @@
]
},
"repository": {
"id": 26143156,
"name": "quay",
"full_name": "coreos-inc/quay",
"id": 12345678,
"name": "somerepo",
"full_name": "jsmith/somerepo",
"owner": {
"name": "coreos-inc",
"name": "jsmith",
"email": null
},
"private": true,
"html_url": "https://github.com/coreos-inc/quay",
"description": "Quay Registry",
"html_url": "https://github.com/jsmith/somerepo",
"description": "Some Cool Repo",
"fork": false,
"url": "https://github.com/coreos-inc/quay",
"forks_url": "https://api.github.com/repos/coreos-inc/quay/forks",
"keys_url": "https://api.github.com/repos/coreos-inc/quay/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/coreos-inc/quay/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/coreos-inc/quay/teams",
"hooks_url": "https://api.github.com/repos/coreos-inc/quay/hooks",
"issue_events_url": "https://api.github.com/repos/coreos-inc/quay/issues/events{/number}",
"events_url": "https://api.github.com/repos/coreos-inc/quay/events",
"assignees_url": "https://api.github.com/repos/coreos-inc/quay/assignees{/user}",
"branches_url": "https://api.github.com/repos/coreos-inc/quay/branches{/branch}",
"tags_url": "https://api.github.com/repos/coreos-inc/quay/tags",
"blobs_url": "https://api.github.com/repos/coreos-inc/quay/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/coreos-inc/quay/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/coreos-inc/quay/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/coreos-inc/quay/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/coreos-inc/quay/statuses/{sha}",
"languages_url": "https://api.github.com/repos/coreos-inc/quay/languages",
"stargazers_url": "https://api.github.com/repos/coreos-inc/quay/stargazers",
"contributors_url": "https://api.github.com/repos/coreos-inc/quay/contributors",
"subscribers_url": "https://api.github.com/repos/coreos-inc/quay/subscribers",
"subscription_url": "https://api.github.com/repos/coreos-inc/quay/subscription",
"commits_url": "https://api.github.com/repos/coreos-inc/quay/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/coreos-inc/quay/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/coreos-inc/quay/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/coreos-inc/quay/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/coreos-inc/quay/contents/{+path}",
"compare_url": "https://api.github.com/repos/coreos-inc/quay/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/coreos-inc/quay/merges",
"archive_url": "https://api.github.com/repos/coreos-inc/quay/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/coreos-inc/quay/downloads",
"issues_url": "https://api.github.com/repos/coreos-inc/quay/issues{/number}",
"pulls_url": "https://api.github.com/repos/coreos-inc/quay/pulls{/number}",
"milestones_url": "https://api.github.com/repos/coreos-inc/quay/milestones{/number}",
"notifications_url": "https://api.github.com/repos/coreos-inc/quay/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/coreos-inc/quay/labels{/name}",
"releases_url": "https://api.github.com/repos/coreos-inc/quay/releases{/id}",
"url": "https://github.com/jsmith/somerepo",
"forks_url": "https://api.github.com/repos/jsmith/somerepo/forks",
"keys_url": "https://api.github.com/repos/jsmith/somerepo/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/jsmith/somerepo/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/jsmith/somerepo/teams",
"hooks_url": "https://api.github.com/repos/jsmith/somerepo/hooks",
"issue_events_url": "https://api.github.com/repos/jsmith/somerepo/issues/events{/number}",
"events_url": "https://api.github.com/repos/jsmith/somerepo/events",
"assignees_url": "https://api.github.com/repos/jsmith/somerepo/assignees{/user}",
"branches_url": "https://api.github.com/repos/jsmith/somerepo/branches{/branch}",
"tags_url": "https://api.github.com/repos/jsmith/somerepo/tags",
"blobs_url": "https://api.github.com/repos/jsmith/somerepo/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/jsmith/somerepo/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/jsmith/somerepo/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/jsmith/somerepo/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/jsmith/somerepo/statuses/{sha}",
"languages_url": "https://api.github.com/repos/jsmith/somerepo/languages",
"stargazers_url": "https://api.github.com/repos/jsmith/somerepo/stargazers",
"contributors_url": "https://api.github.com/repos/jsmith/somerepo/contributors",
"subscribers_url": "https://api.github.com/repos/jsmith/somerepo/subscribers",
"subscription_url": "https://api.github.com/repos/jsmith/somerepo/subscription",
"commits_url": "https://api.github.com/repos/jsmith/somerepo/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/jsmith/somerepo/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/jsmith/somerepo/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/jsmith/somerepo/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/jsmith/somerepo/contents/{+path}",
"compare_url": "https://api.github.com/repos/jsmith/somerepo/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/jsmith/somerepo/merges",
"archive_url": "https://api.github.com/repos/jsmith/somerepo/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/jsmith/somerepo/downloads",
"issues_url": "https://api.github.com/repos/jsmith/somerepo/issues{/number}",
"pulls_url": "https://api.github.com/repos/jsmith/somerepo/pulls{/number}",
"milestones_url": "https://api.github.com/repos/jsmith/somerepo/milestones{/number}",
"notifications_url": "https://api.github.com/repos/jsmith/somerepo/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/jsmith/somerepo/labels{/name}",
"releases_url": "https://api.github.com/repos/jsmith/somerepo/releases{/id}",
"created_at": 1415056063,
"updated_at": "2015-11-12T05:16:51Z",
"pushed_at": 1449616023,
"git_url": "git://github.com/coreos-inc/quay.git",
"ssh_url": "git@github.com:coreos-inc/quay.git",
"clone_url": "https://github.com/coreos-inc/quay.git",
"svn_url": "https://github.com/coreos-inc/quay",
"git_url": "git://github.com/jsmith/somerepo.git",
"ssh_url": "git@github.com:jsmith/somerepo.git",
"clone_url": "https://github.com/jsmith/somerepo.git",
"svn_url": "https://github.com/jsmith/somerepo",
"homepage": "",
"size": 183677,
"stargazers_count": 3,
@ -160,39 +160,39 @@
"default_branch": "master",
"stargazers": 3,
"master_branch": "master",
"organization": "coreos-inc"
"organization": "jsmith"
},
"pusher": {
"name": "jakedt",
"email": "jake.moshenko@coreos.com"
"name": "jsmith",
"email": "j@smith.com"
},
"organization": {
"login": "coreos-inc",
"id": 5504624,
"url": "https://api.github.com/orgs/coreos-inc",
"repos_url": "https://api.github.com/orgs/coreos-inc/repos",
"events_url": "https://api.github.com/orgs/coreos-inc/events",
"members_url": "https://api.github.com/orgs/coreos-inc/members{/member}",
"public_members_url": "https://api.github.com/orgs/coreos-inc/public_members{/member}",
"login": "jsmith",
"id": 9876543,
"url": "https://api.github.com/orgs/jsmith",
"repos_url": "https://api.github.com/orgs/jsmith/repos",
"events_url": "https://api.github.com/orgs/jsmith/events",
"members_url": "https://api.github.com/orgs/jsmith/members{/member}",
"public_members_url": "https://api.github.com/orgs/jsmith/public_members{/member}",
"avatar_url": "https://avatars.githubusercontent.com/u/5504624?v=3",
"description": null
},
"sender": {
"login": "jakedt",
"id": 2183986,
"avatar_url": "https://avatars.githubusercontent.com/u/2183986?v=3",
"login": "jsmith",
"id": 1234567,
"avatar_url": "https://avatars.githubusercontent.com/u/000000?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/jakedt",
"html_url": "https://github.com/jakedt",
"followers_url": "https://api.github.com/users/jakedt/followers",
"following_url": "https://api.github.com/users/jakedt/following{/other_user}",
"gists_url": "https://api.github.com/users/jakedt/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jakedt/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jakedt/subscriptions",
"organizations_url": "https://api.github.com/users/jakedt/orgs",
"repos_url": "https://api.github.com/users/jakedt/repos",
"events_url": "https://api.github.com/users/jakedt/events{/privacy}",
"received_events_url": "https://api.github.com/users/jakedt/received_events",
"url": "https://api.github.com/users/jsmith",
"html_url": "https://github.com/jsmith",
"followers_url": "https://api.github.com/users/jsmith/followers",
"following_url": "https://api.github.com/users/jsmith/following{/other_user}",
"gists_url": "https://api.github.com/users/jsmith/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jsmith/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jsmith/subscriptions",
"organizations_url": "https://api.github.com/users/jsmith/orgs",
"repos_url": "https://api.github.com/users/jsmith/repos",
"events_url": "https://api.github.com/users/jsmith/events{/privacy}",
"received_events_url": "https://api.github.com/users/jsmith/received_events",
"type": "User",
"site_admin": false
}

View File

@ -6,67 +6,67 @@
"deleted": true,
"forced": false,
"base_ref": null,
"compare": "https://github.com/coreos/clair-jwt-docker/compare/76a309ed96c7...000000000000",
"compare": "https://github.com/jsmith/somerepo/compare/76a309ed96c7...000000000000",
"commits": [
],
"head_commit": null,
"repository": {
"id": 56698571,
"name": "clair-jwt-docker",
"full_name": "coreos/clair-jwt-docker",
"id": 12345678,
"name": "somerepo",
"full_name": "jsmith/somerepo",
"owner": {
"name": "coreos",
"email": "partners@coreos.com"
"name": "jsmith",
"email": "j@smith.com"
},
"private": true,
"html_url": "https://github.com/coreos/clair-jwt-docker",
"description": "Dockerfile for Clair running behind jwtproxy",
"html_url": "https://github.com/jsmith/somerepo",
"description": "Dockerfile for some repo",
"fork": false,
"url": "https://github.com/coreos/clair-jwt-docker",
"forks_url": "https://api.github.com/repos/coreos/clair-jwt-docker/forks",
"keys_url": "https://api.github.com/repos/coreos/clair-jwt-docker/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/coreos/clair-jwt-docker/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/coreos/clair-jwt-docker/teams",
"hooks_url": "https://api.github.com/repos/coreos/clair-jwt-docker/hooks",
"issue_events_url": "https://api.github.com/repos/coreos/clair-jwt-docker/issues/events{/number}",
"events_url": "https://api.github.com/repos/coreos/clair-jwt-docker/events",
"assignees_url": "https://api.github.com/repos/coreos/clair-jwt-docker/assignees{/user}",
"branches_url": "https://api.github.com/repos/coreos/clair-jwt-docker/branches{/branch}",
"tags_url": "https://api.github.com/repos/coreos/clair-jwt-docker/tags",
"blobs_url": "https://api.github.com/repos/coreos/clair-jwt-docker/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/coreos/clair-jwt-docker/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/coreos/clair-jwt-docker/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/coreos/clair-jwt-docker/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/coreos/clair-jwt-docker/statuses/{sha}",
"languages_url": "https://api.github.com/repos/coreos/clair-jwt-docker/languages",
"stargazers_url": "https://api.github.com/repos/coreos/clair-jwt-docker/stargazers",
"contributors_url": "https://api.github.com/repos/coreos/clair-jwt-docker/contributors",
"subscribers_url": "https://api.github.com/repos/coreos/clair-jwt-docker/subscribers",
"subscription_url": "https://api.github.com/repos/coreos/clair-jwt-docker/subscription",
"commits_url": "https://api.github.com/repos/coreos/clair-jwt-docker/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/coreos/clair-jwt-docker/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/coreos/clair-jwt-docker/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/coreos/clair-jwt-docker/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/coreos/clair-jwt-docker/contents/{+path}",
"compare_url": "https://api.github.com/repos/coreos/clair-jwt-docker/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/coreos/clair-jwt-docker/merges",
"archive_url": "https://api.github.com/repos/coreos/clair-jwt-docker/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/coreos/clair-jwt-docker/downloads",
"issues_url": "https://api.github.com/repos/coreos/clair-jwt-docker/issues{/number}",
"pulls_url": "https://api.github.com/repos/coreos/clair-jwt-docker/pulls{/number}",
"milestones_url": "https://api.github.com/repos/coreos/clair-jwt-docker/milestones{/number}",
"notifications_url": "https://api.github.com/repos/coreos/clair-jwt-docker/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/coreos/clair-jwt-docker/labels{/name}",
"releases_url": "https://api.github.com/repos/coreos/clair-jwt-docker/releases{/id}",
"deployments_url": "https://api.github.com/repos/coreos/clair-jwt-docker/deployments",
"url": "https://github.com/jsmith/somerepo",
"forks_url": "https://api.github.com/repos/jsmith/somerepo/forks",
"keys_url": "https://api.github.com/repos/jsmith/somerepo/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/jsmith/somerepo/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/jsmith/somerepo/teams",
"hooks_url": "https://api.github.com/repos/jsmith/somerepo/hooks",
"issue_events_url": "https://api.github.com/repos/jsmith/somerepo/issues/events{/number}",
"events_url": "https://api.github.com/repos/jsmith/somerepo/events",
"assignees_url": "https://api.github.com/repos/jsmith/somerepo/assignees{/user}",
"branches_url": "https://api.github.com/repos/jsmith/somerepo/branches{/branch}",
"tags_url": "https://api.github.com/repos/jsmith/somerepo/tags",
"blobs_url": "https://api.github.com/repos/jsmith/somerepo/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/jsmith/somerepo/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/jsmith/somerepo/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/jsmith/somerepo/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/jsmith/somerepo/statuses/{sha}",
"languages_url": "https://api.github.com/repos/jsmith/somerepo/languages",
"stargazers_url": "https://api.github.com/repos/jsmith/somerepo/stargazers",
"contributors_url": "https://api.github.com/repos/jsmith/somerepo/contributors",
"subscribers_url": "https://api.github.com/repos/jsmith/somerepo/subscribers",
"subscription_url": "https://api.github.com/repos/jsmith/somerepo/subscription",
"commits_url": "https://api.github.com/repos/jsmith/somerepo/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/jsmith/somerepo/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/jsmith/somerepo/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/jsmith/somerepo/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/jsmith/somerepo/contents/{+path}",
"compare_url": "https://api.github.com/repos/jsmith/somerepo/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/jsmith/somerepo/merges",
"archive_url": "https://api.github.com/repos/jsmith/somerepo/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/jsmith/somerepo/downloads",
"issues_url": "https://api.github.com/repos/jsmith/somerepo/issues{/number}",
"pulls_url": "https://api.github.com/repos/jsmith/somerepo/pulls{/number}",
"milestones_url": "https://api.github.com/repos/jsmith/somerepo/milestones{/number}",
"notifications_url": "https://api.github.com/repos/jsmith/somerepo/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/jsmith/somerepo/labels{/name}",
"releases_url": "https://api.github.com/repos/jsmith/somerepo/releases{/id}",
"deployments_url": "https://api.github.com/repos/jsmith/somerepo/deployments",
"created_at": 1461165926,
"updated_at": "2016-11-03T18:20:01Z",
"pushed_at": 1479313569,
"git_url": "git://github.com/coreos/clair-jwt-docker.git",
"ssh_url": "git@github.com:coreos/clair-jwt-docker.git",
"clone_url": "https://github.com/coreos/clair-jwt-docker.git",
"svn_url": "https://github.com/coreos/clair-jwt-docker",
"git_url": "git://github.com/jsmith/somerepo.git",
"ssh_url": "git@github.com:jsmith/somerepo.git",
"clone_url": "https://github.com/jsmith/somerepo.git",
"svn_url": "https://github.com/jsmith/somerepo",
"homepage": "",
"size": 3114,
"stargazers_count": 0,
@ -85,41 +85,41 @@
"default_branch": "master",
"stargazers": 0,
"master_branch": "master",
"organization": "coreos"
"organization": "jsmith"
},
"pusher": {
"name": "jzelinskie",
"email": "jimmy.zelinskie+git@gmail.com"
"name": "jsmith",
"email": "j@smith.com"
},
"organization": {
"login": "coreos",
"id": 3730757,
"url": "https://api.github.com/orgs/coreos",
"repos_url": "https://api.github.com/orgs/coreos/repos",
"events_url": "https://api.github.com/orgs/coreos/events",
"hooks_url": "https://api.github.com/orgs/coreos/hooks",
"issues_url": "https://api.github.com/orgs/coreos/issues",
"members_url": "https://api.github.com/orgs/coreos/members{/member}",
"public_members_url": "https://api.github.com/orgs/coreos/public_members{/member}",
"avatar_url": "https://avatars.githubusercontent.com/u/3730757?v=3",
"login": "jsmith",
"id": 9876543,
"url": "https://api.github.com/orgs/jsmith",
"repos_url": "https://api.github.com/orgs/jsmith/repos",
"events_url": "https://api.github.com/orgs/jsmith/events",
"hooks_url": "https://api.github.com/orgs/jsmith/hooks",
"issues_url": "https://api.github.com/orgs/jsmith/issues",
"members_url": "https://api.github.com/orgs/jsmith/members{/member}",
"public_members_url": "https://api.github.com/orgs/jsmith/public_members{/member}",
"avatar_url": "https://avatars.githubusercontent.com/u/1234567?v=3",
"description": "Open Source Projects for Linux Containers"
},
"sender": {
"login": "jzelinskie",
"id": 343539,
"avatar_url": "https://avatars.githubusercontent.com/u/343539?v=3",
"login": "jsmith",
"id": 12345678,
"avatar_url": "https://avatars.githubusercontent.com/u/1234567?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/jzelinskie",
"html_url": "https://github.com/jzelinskie",
"followers_url": "https://api.github.com/users/jzelinskie/followers",
"following_url": "https://api.github.com/users/jzelinskie/following{/other_user}",
"gists_url": "https://api.github.com/users/jzelinskie/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jzelinskie/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jzelinskie/subscriptions",
"organizations_url": "https://api.github.com/users/jzelinskie/orgs",
"repos_url": "https://api.github.com/users/jzelinskie/repos",
"events_url": "https://api.github.com/users/jzelinskie/events{/privacy}",
"received_events_url": "https://api.github.com/users/jzelinskie/received_events",
"url": "https://api.github.com/users/jsmith",
"html_url": "https://github.com/jsmith",
"followers_url": "https://api.github.com/users/jsmith/followers",
"following_url": "https://api.github.com/users/jsmith/following{/other_user}",
"gists_url": "https://api.github.com/users/jsmith/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jsmith/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jsmith/subscriptions",
"organizations_url": "https://api.github.com/users/jsmith/orgs",
"repos_url": "https://api.github.com/users/jsmith/repos",
"events_url": "https://api.github.com/users/jsmith/events{/privacy}",
"received_events_url": "https://api.github.com/users/jsmith/received_events",
"type": "User",
"site_admin": false
}

View File

@ -6,14 +6,14 @@
"deleted": false,
"forced": false,
"base_ref": null,
"compare": "https://github.com/josephschorr/anothertest/compare/9ea43cab4747...410f4cdf8ff0",
"compare": "https://github.com/jsmith/anothertest/compare/9ea43cab4747...410f4cdf8ff0",
"commits": [
{
"id": "410f4cdf8ff09b87f245b13845e8497f90b90a4c",
"distinct": true,
"message": "Update Dockerfile",
"timestamp": "2015-09-11T14:26:16-04:00",
"url": "https://github.com/josephschorr/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c",
"url": "https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c",
"added": [],
"removed": [],
"modified": [
@ -26,7 +26,7 @@
"distinct": true,
"message": "Update Dockerfile",
"timestamp": "2015-09-11T14:26:16-04:00",
"url": "https://github.com/josephschorr/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c",
"url": "https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c",
"added": [],
"removed": [],
"modified": [
@ -34,60 +34,60 @@
]
},
"repository": {
"id": 34876107,
"id": 12345678,
"name": "anothertest",
"full_name": "josephschorr/anothertest",
"full_name": "jsmith/anothertest",
"owner": {
"name": "josephschorr",
"email": "josephschorr@users.noreply.github.com"
"name": "jsmith",
"email": "jsmith@users.noreply.github.com"
},
"private": false,
"html_url": "https://github.com/josephschorr/anothertest",
"html_url": "https://github.com/jsmith/anothertest",
"description": "",
"fork": false,
"url": "https://github.com/josephschorr/anothertest",
"forks_url": "https://api.github.com/repos/josephschorr/anothertest/forks",
"keys_url": "https://api.github.com/repos/josephschorr/anothertest/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/josephschorr/anothertest/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/josephschorr/anothertest/teams",
"hooks_url": "https://api.github.com/repos/josephschorr/anothertest/hooks",
"issue_events_url": "https://api.github.com/repos/josephschorr/anothertest/issues/events{/number}",
"events_url": "https://api.github.com/repos/josephschorr/anothertest/events",
"assignees_url": "https://api.github.com/repos/josephschorr/anothertest/assignees{/user}",
"branches_url": "https://api.github.com/repos/josephschorr/anothertest/branches{/branch}",
"tags_url": "https://api.github.com/repos/josephschorr/anothertest/tags",
"blobs_url": "https://api.github.com/repos/josephschorr/anothertest/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/josephschorr/anothertest/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/josephschorr/anothertest/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/josephschorr/anothertest/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/josephschorr/anothertest/statuses/{sha}",
"languages_url": "https://api.github.com/repos/josephschorr/anothertest/languages",
"stargazers_url": "https://api.github.com/repos/josephschorr/anothertest/stargazers",
"contributors_url": "https://api.github.com/repos/josephschorr/anothertest/contributors",
"subscribers_url": "https://api.github.com/repos/josephschorr/anothertest/subscribers",
"subscription_url": "https://api.github.com/repos/josephschorr/anothertest/subscription",
"commits_url": "https://api.github.com/repos/josephschorr/anothertest/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/josephschorr/anothertest/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/josephschorr/anothertest/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/josephschorr/anothertest/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/josephschorr/anothertest/contents/{+path}",
"compare_url": "https://api.github.com/repos/josephschorr/anothertest/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/josephschorr/anothertest/merges",
"archive_url": "https://api.github.com/repos/josephschorr/anothertest/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/josephschorr/anothertest/downloads",
"issues_url": "https://api.github.com/repos/josephschorr/anothertest/issues{/number}",
"pulls_url": "https://api.github.com/repos/josephschorr/anothertest/pulls{/number}",
"milestones_url": "https://api.github.com/repos/josephschorr/anothertest/milestones{/number}",
"notifications_url": "https://api.github.com/repos/josephschorr/anothertest/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/josephschorr/anothertest/labels{/name}",
"releases_url": "https://api.github.com/repos/josephschorr/anothertest/releases{/id}",
"url": "https://github.com/jsmith/anothertest",
"forks_url": "https://api.github.com/repos/jsmith/anothertest/forks",
"keys_url": "https://api.github.com/repos/jsmith/anothertest/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/jsmith/anothertest/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/jsmith/anothertest/teams",
"hooks_url": "https://api.github.com/repos/jsmith/anothertest/hooks",
"issue_events_url": "https://api.github.com/repos/jsmith/anothertest/issues/events{/number}",
"events_url": "https://api.github.com/repos/jsmith/anothertest/events",
"assignees_url": "https://api.github.com/repos/jsmith/anothertest/assignees{/user}",
"branches_url": "https://api.github.com/repos/jsmith/anothertest/branches{/branch}",
"tags_url": "https://api.github.com/repos/jsmith/anothertest/tags",
"blobs_url": "https://api.github.com/repos/jsmith/anothertest/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/jsmith/anothertest/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/jsmith/anothertest/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/jsmith/anothertest/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/jsmith/anothertest/statuses/{sha}",
"languages_url": "https://api.github.com/repos/jsmith/anothertest/languages",
"stargazers_url": "https://api.github.com/repos/jsmith/anothertest/stargazers",
"contributors_url": "https://api.github.com/repos/jsmith/anothertest/contributors",
"subscribers_url": "https://api.github.com/repos/jsmith/anothertest/subscribers",
"subscription_url": "https://api.github.com/repos/jsmith/anothertest/subscription",
"commits_url": "https://api.github.com/repos/jsmith/anothertest/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/jsmith/anothertest/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/jsmith/anothertest/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/jsmith/anothertest/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/jsmith/anothertest/contents/{+path}",
"compare_url": "https://api.github.com/repos/jsmith/anothertest/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/jsmith/anothertest/merges",
"archive_url": "https://api.github.com/repos/jsmith/anothertest/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/jsmith/anothertest/downloads",
"issues_url": "https://api.github.com/repos/jsmith/anothertest/issues{/number}",
"pulls_url": "https://api.github.com/repos/jsmith/anothertest/pulls{/number}",
"milestones_url": "https://api.github.com/repos/jsmith/anothertest/milestones{/number}",
"notifications_url": "https://api.github.com/repos/jsmith/anothertest/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/jsmith/anothertest/labels{/name}",
"releases_url": "https://api.github.com/repos/jsmith/anothertest/releases{/id}",
"created_at": 1430426945,
"updated_at": "2015-04-30T20:49:05Z",
"pushed_at": 1441995976,
"git_url": "git://github.com/josephschorr/anothertest.git",
"ssh_url": "git@github.com:josephschorr/anothertest.git",
"clone_url": "https://github.com/josephschorr/anothertest.git",
"svn_url": "https://github.com/josephschorr/anothertest",
"git_url": "git://github.com/jsmith/anothertest.git",
"ssh_url": "git@github.com:jsmith/anothertest.git",
"clone_url": "https://github.com/jsmith/anothertest.git",
"svn_url": "https://github.com/jsmith/anothertest",
"homepage": null,
"size": 144,
"stargazers_count": 0,
@ -108,25 +108,25 @@
"master_branch": "master"
},
"pusher": {
"name": "josephschorr",
"email": "josephschorr@users.noreply.github.com"
"name": "jsmith",
"email": "jsmith@users.noreply.github.com"
},
"sender": {
"login": "josephschorr",
"id": 4073002,
"login": "jsmith",
"id": 1234567,
"avatar_url": "https://avatars.githubusercontent.com/u/4073002?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/josephschorr",
"html_url": "https://github.com/josephschorr",
"followers_url": "https://api.github.com/users/josephschorr/followers",
"following_url": "https://api.github.com/users/josephschorr/following{/other_user}",
"gists_url": "https://api.github.com/users/josephschorr/gists{/gist_id}",
"starred_url": "https://api.github.com/users/josephschorr/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/josephschorr/subscriptions",
"organizations_url": "https://api.github.com/users/josephschorr/orgs",
"repos_url": "https://api.github.com/users/josephschorr/repos",
"events_url": "https://api.github.com/users/josephschorr/events{/privacy}",
"received_events_url": "https://api.github.com/users/josephschorr/received_events",
"url": "https://api.github.com/users/jsmith",
"html_url": "https://github.com/jsmith",
"followers_url": "https://api.github.com/users/jsmith/followers",
"following_url": "https://api.github.com/users/jsmith/following{/other_user}",
"gists_url": "https://api.github.com/users/jsmith/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jsmith/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jsmith/subscriptions",
"organizations_url": "https://api.github.com/users/jsmith/orgs",
"repos_url": "https://api.github.com/users/jsmith/repos",
"events_url": "https://api.github.com/users/jsmith/events{/privacy}",
"received_events_url": "https://api.github.com/users/jsmith/received_events",
"type": "User",
"site_admin": false
}

View File

@ -6,21 +6,21 @@
"deleted": false,
"forced": false,
"base_ref": null,
"compare": "https://github.com/silas/docker-test/compare/9716b5169392...118b07121695",
"compare": "https://github.com/jsmith/docker-test/compare/9716b5169392...118b07121695",
"commits": [
{
"id": "118b07121695d9f2e40a5ff264fdcc2917680870",
"distinct": true,
"message": "Fail",
"timestamp": "2015-09-25T14:55:11-04:00",
"url": "https://github.com/silas/docker-test/commit/118b07121695d9f2e40a5ff264fdcc2917680870",
"url": "https://github.com/jsmith/docker-test/commit/118b07121695d9f2e40a5ff264fdcc2917680870",
"author": {
"name": "Silas Sewell",
"email": "silas@sewell-fail.org"
"name": "John Smith",
"email": "j@smith.com"
},
"committer": {
"name": "Silas Sewell",
"email": "silas@sewell-fail.org"
"name": "John Smith",
"email": "j@smith.com"
},
"added": [],
"removed": [],
@ -34,14 +34,14 @@
"distinct": true,
"message": "Fail",
"timestamp": "2015-09-25T14:55:11-04:00",
"url": "https://github.com/silas/docker-test/commit/118b07121695d9f2e40a5ff264fdcc2917680870",
"url": "https://github.com/jsmith/docker-test/commit/118b07121695d9f2e40a5ff264fdcc2917680870",
"author": {
"name": "Silas Sewell",
"email": "silas@sewell-fail.org"
"name": "John Smith",
"email": "j@smith.com"
},
"committer": {
"name": "Silas Sewell",
"email": "silas@sewell-fail.org"
"name": "John Smith",
"email": "j@smith.com"
},
"added": [],
"removed": [],
@ -50,60 +50,60 @@
]
},
"repository": {
"id": 42467431,
"id": 1234567,
"name": "docker-test",
"full_name": "silas/docker-test",
"full_name": "jsmith/docker-test",
"owner": {
"name": "silas",
"email": "silas@sewell.org"
"name": "jsmith",
"email": "j@smith.com"
},
"private": false,
"html_url": "https://github.com/silas/docker-test",
"html_url": "https://github.com/jsmith/docker-test",
"description": "",
"fork": false,
"url": "https://github.com/silas/docker-test",
"forks_url": "https://api.github.com/repos/silas/docker-test/forks",
"keys_url": "https://api.github.com/repos/silas/docker-test/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/silas/docker-test/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/silas/docker-test/teams",
"hooks_url": "https://api.github.com/repos/silas/docker-test/hooks",
"issue_events_url": "https://api.github.com/repos/silas/docker-test/issues/events{/number}",
"events_url": "https://api.github.com/repos/silas/docker-test/events",
"assignees_url": "https://api.github.com/repos/silas/docker-test/assignees{/user}",
"branches_url": "https://api.github.com/repos/silas/docker-test/branches{/branch}",
"tags_url": "https://api.github.com/repos/silas/docker-test/tags",
"blobs_url": "https://api.github.com/repos/silas/docker-test/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/silas/docker-test/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/silas/docker-test/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/silas/docker-test/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/silas/docker-test/statuses/{sha}",
"languages_url": "https://api.github.com/repos/silas/docker-test/languages",
"stargazers_url": "https://api.github.com/repos/silas/docker-test/stargazers",
"contributors_url": "https://api.github.com/repos/silas/docker-test/contributors",
"subscribers_url": "https://api.github.com/repos/silas/docker-test/subscribers",
"subscription_url": "https://api.github.com/repos/silas/docker-test/subscription",
"commits_url": "https://api.github.com/repos/silas/docker-test/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/silas/docker-test/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/silas/docker-test/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/silas/docker-test/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/silas/docker-test/contents/{+path}",
"compare_url": "https://api.github.com/repos/silas/docker-test/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/silas/docker-test/merges",
"archive_url": "https://api.github.com/repos/silas/docker-test/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/silas/docker-test/downloads",
"issues_url": "https://api.github.com/repos/silas/docker-test/issues{/number}",
"pulls_url": "https://api.github.com/repos/silas/docker-test/pulls{/number}",
"milestones_url": "https://api.github.com/repos/silas/docker-test/milestones{/number}",
"notifications_url": "https://api.github.com/repos/silas/docker-test/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/silas/docker-test/labels{/name}",
"releases_url": "https://api.github.com/repos/silas/docker-test/releases{/id}",
"url": "https://github.com/jsmith/docker-test",
"forks_url": "https://api.github.com/repos/jsmith/docker-test/forks",
"keys_url": "https://api.github.com/repos/jsmith/docker-test/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/jsmith/docker-test/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/jsmith/docker-test/teams",
"hooks_url": "https://api.github.com/repos/jsmith/docker-test/hooks",
"issue_events_url": "https://api.github.com/repos/jsmith/docker-test/issues/events{/number}",
"events_url": "https://api.github.com/repos/jsmith/docker-test/events",
"assignees_url": "https://api.github.com/repos/jsmith/docker-test/assignees{/user}",
"branches_url": "https://api.github.com/repos/jsmith/docker-test/branches{/branch}",
"tags_url": "https://api.github.com/repos/jsmith/docker-test/tags",
"blobs_url": "https://api.github.com/repos/jsmith/docker-test/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/jsmith/docker-test/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/jsmith/docker-test/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/jsmith/docker-test/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/jsmith/docker-test/statuses/{sha}",
"languages_url": "https://api.github.com/repos/jsmith/docker-test/languages",
"stargazers_url": "https://api.github.com/repos/jsmith/docker-test/stargazers",
"contributors_url": "https://api.github.com/repos/jsmith/docker-test/contributors",
"subscribers_url": "https://api.github.com/repos/jsmith/docker-test/subscribers",
"subscription_url": "https://api.github.com/repos/jsmith/docker-test/subscription",
"commits_url": "https://api.github.com/repos/jsmith/docker-test/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/jsmith/docker-test/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/jsmith/docker-test/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/jsmith/docker-test/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/jsmith/docker-test/contents/{+path}",
"compare_url": "https://api.github.com/repos/jsmith/docker-test/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/jsmith/docker-test/merges",
"archive_url": "https://api.github.com/repos/jsmith/docker-test/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/jsmith/docker-test/downloads",
"issues_url": "https://api.github.com/repos/jsmith/docker-test/issues{/number}",
"pulls_url": "https://api.github.com/repos/jsmith/docker-test/pulls{/number}",
"milestones_url": "https://api.github.com/repos/jsmith/docker-test/milestones{/number}",
"notifications_url": "https://api.github.com/repos/jsmith/docker-test/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/jsmith/docker-test/labels{/name}",
"releases_url": "https://api.github.com/repos/jsmith/docker-test/releases{/id}",
"created_at": 1442254053,
"updated_at": "2015-09-14T18:07:33Z",
"pushed_at": 1443207315,
"git_url": "git://github.com/silas/docker-test.git",
"ssh_url": "git@github.com:silas/docker-test.git",
"clone_url": "https://github.com/silas/docker-test.git",
"svn_url": "https://github.com/silas/docker-test",
"git_url": "git://github.com/jsmith/docker-test.git",
"ssh_url": "git@github.com:jsmith/docker-test.git",
"clone_url": "https://github.com/jsmith/docker-test.git",
"svn_url": "https://github.com/jsmith/docker-test",
"homepage": null,
"size": 108,
"stargazers_count": 0,
@ -124,25 +124,25 @@
"master_branch": "master"
},
"pusher": {
"name": "silas",
"email": "silas@sewell.org"
"name": "jsmith",
"email": "j@smith.com"
},
"sender": {
"login": "silas",
"id": 18528,
"avatar_url": "https://avatars.githubusercontent.com/u/18528?v=3",
"login": "jsmith",
"id": 1234567,
"avatar_url": "https://avatars.githubusercontent.com/u/1234567?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/silas",
"html_url": "https://github.com/silas",
"followers_url": "https://api.github.com/users/silas/followers",
"following_url": "https://api.github.com/users/silas/following{/other_user}",
"gists_url": "https://api.github.com/users/silas/gists{/gist_id}",
"starred_url": "https://api.github.com/users/silas/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/silas/subscriptions",
"organizations_url": "https://api.github.com/users/silas/orgs",
"repos_url": "https://api.github.com/users/silas/repos",
"events_url": "https://api.github.com/users/silas/events{/privacy}",
"received_events_url": "https://api.github.com/users/silas/received_events",
"url": "https://api.github.com/users/jsmith",
"html_url": "https://github.com/jsmith",
"followers_url": "https://api.github.com/users/jsmith/followers",
"following_url": "https://api.github.com/users/jsmith/following{/other_user}",
"gists_url": "https://api.github.com/users/jsmith/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jsmith/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jsmith/subscriptions",
"organizations_url": "https://api.github.com/users/jsmith/orgs",
"repos_url": "https://api.github.com/users/jsmith/repos",
"events_url": "https://api.github.com/users/jsmith/events{/privacy}",
"received_events_url": "https://api.github.com/users/jsmith/received_events",
"type": "User",
"site_admin": false
}

View File

@ -6,23 +6,23 @@
"deleted": false,
"forced": false,
"base_ref": null,
"compare": "https://github.com/josephschorr/anothertest/compare/9ea43cab4747...410f4cdf8ff0",
"compare": "https://github.com/jsmith/anothertest/compare/9ea43cab4747...410f4cdf8ff0",
"commits": [
{
"id": "410f4cdf8ff09b87f245b13845e8497f90b90a4c",
"distinct": true,
"message": "Update Dockerfile",
"timestamp": "2015-09-11T14:26:16-04:00",
"url": "https://github.com/josephschorr/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c",
"url": "https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c",
"author": {
"name": "josephschorr",
"email": "josephschorr@users.noreply.github.com",
"username": "josephschorr"
"name": "jsmith",
"email": "jsmith@users.noreply.github.com",
"username": "jsmith"
},
"committer": {
"name": "josephschorr",
"email": "josephschorr@users.noreply.github.com",
"username": "josephschorr"
"name": "jsmith",
"email": "jsmith@users.noreply.github.com",
"username": "jsmith"
},
"added": [],
"removed": [],
@ -36,16 +36,16 @@
"distinct": true,
"message": "Update Dockerfile",
"timestamp": "2015-09-11T14:26:16-04:00",
"url": "https://github.com/josephschorr/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c",
"url": "https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c",
"author": {
"name": "josephschorr",
"email": "josephschorr@users.noreply.github.com",
"username": "josephschorr"
"name": "jsmith",
"email": "jsmith@users.noreply.github.com",
"username": "jsmith"
},
"committer": {
"name": "josephschorr",
"email": "josephschorr@users.noreply.github.com",
"username": "josephschorr"
"name": "jsmith",
"email": "jsmith@users.noreply.github.com",
"username": "jsmith"
},
"added": [],
"removed": [],
@ -54,60 +54,60 @@
]
},
"repository": {
"id": 34876107,
"id": 1234567,
"name": "anothertest",
"full_name": "josephschorr/anothertest",
"full_name": "jsmith/anothertest",
"owner": {
"name": "josephschorr",
"email": "josephschorr@users.noreply.github.com"
"name": "jsmith",
"email": "jsmith@users.noreply.github.com"
},
"private": false,
"html_url": "https://github.com/josephschorr/anothertest",
"html_url": "https://github.com/jsmith/anothertest",
"description": "",
"fork": false,
"url": "https://github.com/josephschorr/anothertest",
"forks_url": "https://api.github.com/repos/josephschorr/anothertest/forks",
"keys_url": "https://api.github.com/repos/josephschorr/anothertest/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/josephschorr/anothertest/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/josephschorr/anothertest/teams",
"hooks_url": "https://api.github.com/repos/josephschorr/anothertest/hooks",
"issue_events_url": "https://api.github.com/repos/josephschorr/anothertest/issues/events{/number}",
"events_url": "https://api.github.com/repos/josephschorr/anothertest/events",
"assignees_url": "https://api.github.com/repos/josephschorr/anothertest/assignees{/user}",
"branches_url": "https://api.github.com/repos/josephschorr/anothertest/branches{/branch}",
"tags_url": "https://api.github.com/repos/josephschorr/anothertest/tags",
"blobs_url": "https://api.github.com/repos/josephschorr/anothertest/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/josephschorr/anothertest/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/josephschorr/anothertest/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/josephschorr/anothertest/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/josephschorr/anothertest/statuses/{sha}",
"languages_url": "https://api.github.com/repos/josephschorr/anothertest/languages",
"stargazers_url": "https://api.github.com/repos/josephschorr/anothertest/stargazers",
"contributors_url": "https://api.github.com/repos/josephschorr/anothertest/contributors",
"subscribers_url": "https://api.github.com/repos/josephschorr/anothertest/subscribers",
"subscription_url": "https://api.github.com/repos/josephschorr/anothertest/subscription",
"commits_url": "https://api.github.com/repos/josephschorr/anothertest/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/josephschorr/anothertest/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/josephschorr/anothertest/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/josephschorr/anothertest/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/josephschorr/anothertest/contents/{+path}",
"compare_url": "https://api.github.com/repos/josephschorr/anothertest/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/josephschorr/anothertest/merges",
"archive_url": "https://api.github.com/repos/josephschorr/anothertest/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/josephschorr/anothertest/downloads",
"issues_url": "https://api.github.com/repos/josephschorr/anothertest/issues{/number}",
"pulls_url": "https://api.github.com/repos/josephschorr/anothertest/pulls{/number}",
"milestones_url": "https://api.github.com/repos/josephschorr/anothertest/milestones{/number}",
"notifications_url": "https://api.github.com/repos/josephschorr/anothertest/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/josephschorr/anothertest/labels{/name}",
"releases_url": "https://api.github.com/repos/josephschorr/anothertest/releases{/id}",
"url": "https://github.com/jsmith/anothertest",
"forks_url": "https://api.github.com/repos/jsmith/anothertest/forks",
"keys_url": "https://api.github.com/repos/jsmith/anothertest/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/jsmith/anothertest/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/jsmith/anothertest/teams",
"hooks_url": "https://api.github.com/repos/jsmith/anothertest/hooks",
"issue_events_url": "https://api.github.com/repos/jsmith/anothertest/issues/events{/number}",
"events_url": "https://api.github.com/repos/jsmith/anothertest/events",
"assignees_url": "https://api.github.com/repos/jsmith/anothertest/assignees{/user}",
"branches_url": "https://api.github.com/repos/jsmith/anothertest/branches{/branch}",
"tags_url": "https://api.github.com/repos/jsmith/anothertest/tags",
"blobs_url": "https://api.github.com/repos/jsmith/anothertest/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/jsmith/anothertest/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/jsmith/anothertest/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/jsmith/anothertest/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/jsmith/anothertest/statuses/{sha}",
"languages_url": "https://api.github.com/repos/jsmith/anothertest/languages",
"stargazers_url": "https://api.github.com/repos/jsmith/anothertest/stargazers",
"contributors_url": "https://api.github.com/repos/jsmith/anothertest/contributors",
"subscribers_url": "https://api.github.com/repos/jsmith/anothertest/subscribers",
"subscription_url": "https://api.github.com/repos/jsmith/anothertest/subscription",
"commits_url": "https://api.github.com/repos/jsmith/anothertest/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/jsmith/anothertest/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/jsmith/anothertest/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/jsmith/anothertest/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/jsmith/anothertest/contents/{+path}",
"compare_url": "https://api.github.com/repos/jsmith/anothertest/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/jsmith/anothertest/merges",
"archive_url": "https://api.github.com/repos/jsmith/anothertest/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/jsmith/anothertest/downloads",
"issues_url": "https://api.github.com/repos/jsmith/anothertest/issues{/number}",
"pulls_url": "https://api.github.com/repos/jsmith/anothertest/pulls{/number}",
"milestones_url": "https://api.github.com/repos/jsmith/anothertest/milestones{/number}",
"notifications_url": "https://api.github.com/repos/jsmith/anothertest/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/jsmith/anothertest/labels{/name}",
"releases_url": "https://api.github.com/repos/jsmith/anothertest/releases{/id}",
"created_at": 1430426945,
"updated_at": "2015-04-30T20:49:05Z",
"pushed_at": 1441995976,
"git_url": "git://github.com/josephschorr/anothertest.git",
"ssh_url": "git@github.com:josephschorr/anothertest.git",
"clone_url": "https://github.com/josephschorr/anothertest.git",
"svn_url": "https://github.com/josephschorr/anothertest",
"git_url": "git://github.com/jsmith/anothertest.git",
"ssh_url": "git@github.com:jsmith/anothertest.git",
"clone_url": "https://github.com/jsmith/anothertest.git",
"svn_url": "https://github.com/jsmith/anothertest",
"homepage": null,
"size": 144,
"stargazers_count": 0,
@ -128,25 +128,25 @@
"master_branch": "master"
},
"pusher": {
"name": "josephschorr",
"email": "josephschorr@users.noreply.github.com"
"name": "jsmith",
"email": "jsmith@users.noreply.github.com"
},
"sender": {
"login": "josephschorr",
"id": 4073002,
"avatar_url": "https://avatars.githubusercontent.com/u/4073002?v=3",
"login": "jsmith",
"id": 1234567,
"avatar_url": "https://avatars.githubusercontent.com/u/1234567?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/josephschorr",
"html_url": "https://github.com/josephschorr",
"followers_url": "https://api.github.com/users/josephschorr/followers",
"following_url": "https://api.github.com/users/josephschorr/following{/other_user}",
"gists_url": "https://api.github.com/users/josephschorr/gists{/gist_id}",
"starred_url": "https://api.github.com/users/josephschorr/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/josephschorr/subscriptions",
"organizations_url": "https://api.github.com/users/josephschorr/orgs",
"repos_url": "https://api.github.com/users/josephschorr/repos",
"events_url": "https://api.github.com/users/josephschorr/events{/privacy}",
"received_events_url": "https://api.github.com/users/josephschorr/received_events",
"url": "https://api.github.com/users/jsmith",
"html_url": "https://github.com/jsmith",
"followers_url": "https://api.github.com/users/jsmith/followers",
"following_url": "https://api.github.com/users/jsmith/following{/other_user}",
"gists_url": "https://api.github.com/users/jsmith/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jsmith/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jsmith/subscriptions",
"organizations_url": "https://api.github.com/users/jsmith/orgs",
"repos_url": "https://api.github.com/users/jsmith/repos",
"events_url": "https://api.github.com/users/jsmith/events{/privacy}",
"received_events_url": "https://api.github.com/users/jsmith/received_events",
"type": "User",
"site_admin": false
}

View File

@ -5,17 +5,17 @@
"ref": "refs/heads/master",
"checkout_sha": "fb88379ee45de28a0a4590fddcbd8eff8b36026e",
"message": null,
"user_id": 95973,
"user_name": "Jimmy Zelinskie",
"user_email": "jimmyzelinskie@gmail.com",
"project_id": 406414,
"user_id": 98765,
"user_name": "John Smith",
"user_email": "j@smith.com",
"project_id": 12344567,
"repository": {
"name": "www-gitlab-com",
"url": "git@gitlab.com:jzelinskie/www-gitlab-com.git",
"name": "somerepo",
"url": "git@gitlab.com:jsmith/somerepo.git",
"description": "",
"homepage": "https://gitlab.com/jzelinskie/www-gitlab-com",
"git_http_url": "https://gitlab.com/jzelinskie/www-gitlab-com.git",
"git_ssh_url": "git@gitlab.com:jzelinskie/www-gitlab-com.git",
"homepage": "https://gitlab.com/jsmith/somerepo",
"git_http_url": "https://gitlab.com/jsmith/somerepo.git",
"git_ssh_url": "git@gitlab.com:jsmith/somerepo.git",
"visibility_level": 20
},
"commits": [
@ -23,30 +23,30 @@
"id": "fb88379ee45de28a0a4590fddcbd8eff8b36026e",
"message": "Fix link\n",
"timestamp": "2015-08-13T19:33:18+00:00",
"url": "https://gitlab.com/jzelinskie/www-gitlab-com/commit/fb88379ee45de28a0a4590fddcbd8eff8b36026e",
"url": "https://gitlab.com/jsmith/somerepo/commit/fb88379ee45de28a0a4590fddcbd8eff8b36026e",
"author": {
"name": "Sytse Sijbrandij",
"email": "sytse@gitlab.com"
"name": "Jane Smith",
"email": "jane@smith.com"
}
},
{
"id": "4ca166bc0b511f21fa331873f260f1a7cb38d723",
"message": "Merge branch 'git-lfs' into 'master'\n\nGit lfs\n\n@JobV @dzaporozhets @DouweM please review the tone of this\n\nSee merge request !899\n",
"message": "Do Some Cool Thing",
"timestamp": "2015-08-13T15:52:15+00:00",
"url": "https://gitlab.com/jzelinskie/www-gitlab-com/commit/4ca166bc0b511f21fa331873f260f1a7cb38d723",
"url": "https://gitlab.com/jsmith/somerepo/commit/4ca166bc0b511f21fa331873f260f1a7cb38d723",
"author": {
"name": "Sytse Sijbrandij",
"email": "sytse@gitlab.com"
"name": "Jane Smith",
"email": "jane@smith.com"
}
},
{
"id": "11fcaca195e8b17ca7e3dc47d9608d5b6b892f45",
"message": "Merge branch 'release-7-3-5' into 'master'\n\n7-13-5 Release post.\n\nSee merge request !900\n",
"message": "Merge another cool thing",
"timestamp": "2015-08-13T09:31:47+00:00",
"url": "https://gitlab.com/jzelinskie/www-gitlab-com/commit/11fcaca195e8b17ca7e3dc47d9608d5b6b892f45",
"url": "https://gitlab.com/jsmith/somerepo/commit/11fcaca195e8b17ca7e3dc47d9608d5b6b892f45",
"author": {
"name": "Valery Sizov",
"email": "valery@gitlab.com"
"name": "Kate Smith",
"email": "kate@smith.com"
}
}
],

View File

@ -0,0 +1,61 @@
{
"ref": "refs/tags/fourthtag",
"user_id": 4797254,
"object_kind": "tag_push",
"repository": {
"git_ssh_url": "git@gitlab.com:someuser/some-test-project.git",
"name": "Some test project",
"url": "git@gitlab.com:someuser/some-test-project.git",
"git_http_url": "https://gitlab.com/someuser/some-test-project.git",
"visibility_level": 0,
"homepage": "https://gitlab.com/someuser/some-test-project",
"description": "Some test project"
},
"event_name": "tag_push",
"commits": [
{
"added": [],
"author": {
"name": "Some User",
"email": "someuser@somedomain.com"
},
"url": "https://gitlab.com/someuser/some-test-project/commit/770830e7ca132856991e6db4f7fc0f4dbe20bd5f",
"timestamp": "2019-10-17T18:07:48Z",
"message": "Update Dockerfile",
"removed": [],
"modified": [
"Dockerfile"
],
"id": "770830e7ca132856991e6db4f7fc0f4dbe20bd5f"
}
],
"after": "770830e7ca132856991e6db4f7fc0f4dbe20bd5f",
"project": {
"git_ssh_url": "git@gitlab.com:someuser/some-test-project.git",
"ci_config_path": null,
"web_url": "https://gitlab.com/someuser/some-test-project",
"description": "Some test project",
"url": "git@gitlab.com:someuser/some-test-project.git",
"namespace": "Some User",
"default_branch": "master",
"homepage": "https://gitlab.com/someuser/some-test-project",
"git_http_url": "https://gitlab.com/someuser/some-test-project.git",
"avatar_url": null,
"ssh_url": "git@gitlab.com:someuser/some-test-project.git",
"http_url": "https://gitlab.com/someuser/some-test-project.git",
"path_with_namespace": "someuser/some-test-project",
"visibility_level": 0,
"id": 14838571,
"name": "Some test project"
},
"user_username": "someuser",
"checkout_sha": "770830e7ca132856991e6db4f7fc0f4dbe20bd5f",
"total_commits_count": 1,
"before": "0000000000000000000000000000000000000000",
"user_avatar": "https://secure.gravatar.com/avatar/0ea05bdf5c3f2cb8aac782a4a2ac3177?s=80&d=identicon",
"message": "",
"project_id": 14838571,
"user_name": "Some User",
"user_email": "",
"push_options": {}
}

View File

@ -7,35 +7,35 @@
"checkout_sha": "9a052a0b2fbe01d4a1a88638dd9fe31c1c56ef53",
"message": null,
"user_id": 750047,
"user_name": "Joseph Schorr",
"user_email": "joseph.schorr@coreos.com",
"user_avatar": "https:\/\/secure.gravatar.com\/avatar\/63e4521c07d2312d8e20c3a6cad36f57?s=80&d=identicon",
"user_name": "John Smith",
"user_email": "j@smith.com",
"user_avatar": "https:\/\/secure.gravatar.com\/avatar\/32784623495678234678234?s=80&d=identicon",
"project_id": 1756744,
"project": {
"name": "some-test-project",
"description": "",
"web_url": "https:\/\/gitlab.com\/joseph.schorr\/some-test-project",
"web_url": "https:\/\/gitlab.com\/jsmith\/some-test-project",
"avatar_url": null,
"git_ssh_url": "git@gitlab.com:joseph.schorr\/some-test-project.git",
"git_http_url": "https:\/\/gitlab.com\/joseph.schorr\/some-test-project.git",
"namespace": "joseph.schorr",
"git_ssh_url": "git@gitlab.com:jsmith\/some-test-project.git",
"git_http_url": "https:\/\/gitlab.com\/jsmith\/some-test-project.git",
"namespace": "jsmith",
"visibility_level": 0,
"path_with_namespace": "joseph.schorr\/some-test-project",
"path_with_namespace": "jsmith\/some-test-project",
"default_branch": "master",
"homepage": "https:\/\/gitlab.com\/joseph.schorr\/some-test-project",
"url": "git@gitlab.com:joseph.schorr\/some-test-project.git",
"ssh_url": "git@gitlab.com:joseph.schorr\/some-test-project.git",
"http_url": "https:\/\/gitlab.com\/joseph.schorr\/some-test-project.git"
"homepage": "https:\/\/gitlab.com\/jsmith\/some-test-project",
"url": "git@gitlab.com:jsmith\/some-test-project.git",
"ssh_url": "git@gitlab.com:jsmith\/some-test-project.git",
"http_url": "https:\/\/gitlab.com\/jsmith\/some-test-project.git"
},
"commits": [
{
"id": "f00a0a6a71118721ac1f586bf79650170042609f",
"message": "Add changelog",
"timestamp": "2016-09-29T14:59:23+00:00",
"url": "https:\/\/gitlab.com\/joseph.schorr\/some-test-project\/commit\/f00a0a6a71118721ac1f586bf79650170042609f",
"url": "https:\/\/gitlab.com\/jsmith\/some-test-project\/commit\/f00a0a6a71118721ac1f586bf79650170042609f",
"author": {
"name": "Joseph Schorr",
"email": "joseph.schorr@coreos.com"
"name": "John Smith",
"email": "j@smith.com"
},
"added": [
"CHANGELOG"
@ -51,10 +51,10 @@
"id": "cc66287314cb154c986665a6c29377ef42edee60",
"message": "Add new file",
"timestamp": "2016-09-29T15:02:01+00:00",
"url": "https:\/\/gitlab.com\/joseph.schorr\/some-test-project\/commit\/cc66287314cb154c986665a6c29377ef42edee60",
"url": "https:\/\/gitlab.com\/jsmith\/some-test-project\/commit\/cc66287314cb154c986665a6c29377ef42edee60",
"author": {
"name": "Joseph Schorr",
"email": "joseph.schorr@coreos.com"
"name": "John Smith",
"email": "j@smith.com"
},
"added": [
"YetAnotherFIle"
@ -70,10 +70,10 @@
"id": "9a052a0b2fbe01d4a1a88638dd9fe31c1c56ef53",
"message": "Merge branch 'foobar' into 'master'\r\n\r\nAdd changelog\r\n\r\nSome merge thing\r\n\r\nSee merge request !1",
"timestamp": "2016-09-29T15:02:41+00:00",
"url": "https:\/\/gitlab.com\/joseph.schorr\/some-test-project\/commit\/9a052a0b2fbe01d4a1a88638dd9fe31c1c56ef53",
"url": "https:\/\/gitlab.com\/jsmith\/some-test-project\/commit\/9a052a0b2fbe01d4a1a88638dd9fe31c1c56ef53",
"author": {
"name": "Joseph Schorr",
"email": "joseph.schorr@coreos.com"
"name": "John Smith",
"email": "j@smith.com"
},
"added": [
"CHANGELOG",
@ -90,11 +90,11 @@
"total_commits_count": 3,
"repository": {
"name": "some-test-project",
"url": "git@gitlab.com:joseph.schorr\/some-test-project.git",
"url": "git@gitlab.com:jsmith\/some-test-project.git",
"description": "",
"homepage": "https:\/\/gitlab.com\/joseph.schorr\/some-test-project",
"git_http_url": "https:\/\/gitlab.com\/joseph.schorr\/some-test-project.git",
"git_ssh_url": "git@gitlab.com:joseph.schorr\/some-test-project.git",
"homepage": "https:\/\/gitlab.com\/jsmith\/some-test-project",
"git_http_url": "https:\/\/gitlab.com\/jsmith\/some-test-project.git",
"git_ssh_url": "git@gitlab.com:jsmith\/some-test-project.git",
"visibility_level": 0
}
}

View File

@ -7,25 +7,25 @@
"checkout_sha": null,
"message": null,
"user_id": 750047,
"user_name": "Joseph Schorr",
"user_email": "joseph.schorr@coreos.com",
"user_avatar": "https:\/\/secure.gravatar.com\/avatar\/63e4521c07d2312d8e20c3a6cad36f57?s=80&d=identicon",
"user_name": "John Smith",
"user_email": "j@smith.com",
"user_avatar": "https:\/\/secure.gravatar.com\/avatar\/2348972348972348973?s=80&d=identicon",
"project_id": 1756744,
"project": {
"name": "some-test-project",
"description": "",
"web_url": "https:\/\/gitlab.com\/joseph.schorr\/some-test-project",
"web_url": "https:\/\/gitlab.com\/jsmith\/some-test-project",
"avatar_url": null,
"git_ssh_url": "git@gitlab.com:joseph.schorr\/some-test-project.git",
"git_http_url": "https:\/\/gitlab.com\/joseph.schorr\/some-test-project.git",
"namespace": "joseph.schorr",
"git_ssh_url": "git@gitlab.com:jsmith\/some-test-project.git",
"git_http_url": "https:\/\/gitlab.com\/jsmith\/some-test-project.git",
"namespace": "jsmith",
"visibility_level": 0,
"path_with_namespace": "joseph.schorr\/some-test-project",
"path_with_namespace": "jsmith\/some-test-project",
"default_branch": "master",
"homepage": "https:\/\/gitlab.com\/joseph.schorr\/some-test-project",
"url": "git@gitlab.com:joseph.schorr\/some-test-project.git",
"ssh_url": "git@gitlab.com:joseph.schorr\/some-test-project.git",
"http_url": "https:\/\/gitlab.com\/joseph.schorr\/some-test-project.git"
"homepage": "https:\/\/gitlab.com\/jsmith\/some-test-project",
"url": "git@gitlab.com:jsmith\/some-test-project.git",
"ssh_url": "git@gitlab.com:jsmith\/some-test-project.git",
"http_url": "https:\/\/gitlab.com\/jsmith\/some-test-project.git"
},
"commits": [
@ -33,11 +33,11 @@
"total_commits_count": 0,
"repository": {
"name": "some-test-project",
"url": "git@gitlab.com:joseph.schorr\/some-test-project.git",
"url": "git@gitlab.com:jsmith\/some-test-project.git",
"description": "",
"homepage": "https:\/\/gitlab.com\/joseph.schorr\/some-test-project",
"git_http_url": "https:\/\/gitlab.com\/joseph.schorr\/some-test-project.git",
"git_ssh_url": "git@gitlab.com:joseph.schorr\/some-test-project.git",
"homepage": "https:\/\/gitlab.com\/jsmith\/some-test-project",
"git_http_url": "https:\/\/gitlab.com\/jsmith\/some-test-project.git",
"git_ssh_url": "git@gitlab.com:jsmith\/some-test-project.git",
"visibility_level": 0
}
}

View File

@ -0,0 +1,61 @@
{
"after": "770830e7ca132856991e6db4f7fc0f4dbe20bd5f",
"before": "0000000000000000000000000000000000000000",
"checkout_sha": "770830e7ca132856991e6db4f7fc0f4dbe20bd5f",
"commits": [
{
"added": [],
"author": {
"name": "Some User",
"email": "some.user@someplace.com"
},
"id": "770830e7ca132856991e6db4f7fc0f4dbe20bd5f",
"message": "Update Dockerfile",
"modified": [
"Dockerfile"
],
"removed": [],
"timestamp": "2019-10-17T18:07:48Z",
"url": "https://gitlab.com/someuser/some-test-project/commit/770830e7ca132856991e6db4f7fc0f4dbe20bd5f"
}
],
"event_name": "tag_push",
"message": "",
"object_kind": "tag_push",
"project": {
"avatar_url": null,
"ci_config_path": null,
"default_branch": "master",
"description": "Some test project",
"git_http_url": "https://gitlab.com/someuser/some-test-project.git",
"git_ssh_url": "git@gitlab.com:someuser/some-test-project.git",
"homepage": "https://gitlab.com/someuser/some-test-project",
"http_url": "https://gitlab.com/someuser/some-test-project.git",
"id": 14838571,
"name": "Some test project",
"namespace": "Joey Schorr",
"path_with_namespace": "someuser/some-test-project",
"ssh_url": "git@gitlab.com:someuser/some-test-project.git",
"url": "git@gitlab.com:someuser/some-test-project.git",
"visibility_level": 0,
"web_url": "https://gitlab.com/someuser/some-test-project"
},
"project_id": 14838571,
"push_options": {},
"ref": "refs/tags/thirdtag",
"repository": {
"description": "Some test project",
"git_http_url": "https://gitlab.com/someuser/some-test-project.git",
"git_ssh_url": "git@gitlab.com:someuser/some-test-project.git",
"homepage": "https://gitlab.com/someuser/some-test-project",
"name": "Some test project",
"url": "git@gitlab.com:someuser/some-test-project.git",
"visibility_level": 0
},
"total_commits_count": 1,
"user_avatar": "https://secure.gravatar.com/avatar/someavatar?s=80&d=identicon",
"user_email": "",
"user_id": 4797254,
"user_name": "Some User",
"user_username": "someuser"
}

View File

@ -0,0 +1,38 @@
{
"object_kind": "tag_push",
"before": "0000000000000000000000000000000000000000",
"after": "82b3d5ae55f7080f1e6022629cdb57bfae7cccc7",
"ref": "refs/tags/v1.0.0",
"checkout_sha": null,
"user_id": 1,
"user_name": "John Smith",
"user_avatar": "https://s.gravatar.com/avatar/d4c74594d841139328695756648b6bd6?s=8://s.gravatar.com/avatar/d4c74594d841139328695756648b6bd6?s=80",
"project_id": 1,
"project":{
"name":"Example",
"description":"",
"web_url":"http://example.com/jsmith/example",
"avatar_url":null,
"git_ssh_url":"git@example.com:jsmith/example.git",
"git_http_url":"http://example.com/jsmith/example.git",
"namespace":"Jsmith",
"visibility_level":0,
"path_with_namespace":"jsmith/example",
"default_branch":"master",
"homepage":"http://example.com/jsmith/example",
"url":"git@example.com:jsmith/example.git",
"ssh_url":"git@example.com:jsmith/example.git",
"http_url":"http://example.com/jsmith/example.git"
},
"repository":{
"name": "Example",
"url": "ssh://git@example.com/jsmith/example.git",
"description": "",
"homepage": "http://example.com/jsmith/example",
"git_http_url":"http://example.com/jsmith/example.git",
"git_ssh_url":"git@example.com:jsmith/example.git",
"visibility_level":0
},
"commits": [],
"total_commits_count": 0
}

3
cirun.config.yaml Normal file
View File

@ -0,0 +1,3 @@
SETUP_COMPLETE: true
V3_UPGRADE_MODE: complete
DATABASE_SECRET_KEY: anothercrazykey!

View File

@ -1,3 +0,0 @@
Evan Cordell <evan.cordell@coreos.com> (@ecordell)
Jake Moshenko <jake.moshenko@coreos.com> (@jakedt)
Jimmy Zelinskie <jimmy.zelinskie@coreos.com> (@jzelinskie)

View File

@ -1,10 +0,0 @@
#! /bin/sh
QUAYPATH=${QUAYPATH:-"."}
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
cd ${QUAYDIR:-"/"}
if [ -e $QUAYCONF/stack/syslog-ng-extra.conf ]
then
cp $QUAYCONF/stack/syslog-ng-extra.conf /etc/syslog-ng/conf.d/
fi

View File

@ -8,5 +8,5 @@ cd $QUAYDIR
if [[ "$KUBERNETES_SERVICE_HOST" != "" ]];then
echo "Running on kubernetes, attempting to retrieve extra certs from secret"
venv/bin/python $QUAYCONF/init/02_get_kube_certs.py
fi
python $QUAYCONF/init/02_get_kube_certs.py
fi

0
conf/init/__init__.py Normal file
View File

View File

@ -3,8 +3,13 @@ set -e
QUAYPATH=${QUAYPATH:-"."}
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
cd ${QUAYDIR:-"/"}
SYSTEM_CERTDIR=${SYSTEM_CERTDIR:-"/etc/pki/ca-trust/source/anchors"}
# Create certs for jwtproxy to mitm outgoing TLS connections
echo '{"CN":"CA","key":{"algo":"rsa","size":2048}}' | cfssl gencert -initca - | cfssljson -bare mitm
cp mitm-key.pem $QUAYCONF/mitm.key
cp mitm.pem $QUAYCONF/mitm.cert
cp mitm.pem /usr/local/share/ca-certificates/mitm.crt
# echo '{"CN":"CA","key":{"algo":"rsa","size":2048}}' | cfssl gencert -initca - | cfssljson -bare mitm
mkdir -p /certificates; cd /certificates
openssl req -new -newkey rsa:4096 -days 3650 -nodes -x509 \
-subj "/C=US/ST=NY/L=NYC/O=Dis/CN=self-signed" \
-keyout mitm-key.pem -out mitm.pem
cp /certificates/mitm-key.pem $QUAYCONF/mitm.key
cp /certificates/mitm.pem $QUAYCONF/mitm.cert
cp /certificates/mitm.pem $SYSTEM_CERTDIR/mitm.crt

View File

@ -4,6 +4,9 @@ QUAYPATH=${QUAYPATH:-"."}
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
QUAYCONFIG=${QUAYCONFIG:-"$QUAYCONF/stack"}
CERTDIR=${CERTDIR:-"$QUAYCONFIG/extra_ca_certs"}
SYSTEM_CERTDIR=${SYSTEM_CERTDIR:-"/etc/pki/ca-trust/source/anchors"}
PYTHON_ROOT=${PYTHON_ROOT:-"/opt/rh/python27/root/usr/lib/python2.7"}
# If we're running under kube, the previous script (02_get_kube_certs.sh) will put the certs in a different location
if [[ "$KUBERNETES_SERVICE_HOST" != "" ]];then
@ -15,35 +18,35 @@ cd ${QUAYDIR:-"/quay-registry"}
# Add the custom LDAP certificate
if [ -e $QUAYCONFIG/ldap.crt ]
then
cp $QUAYCONFIG/ldap.crt /usr/local/share/ca-certificates/ldap.crt
cp $QUAYCONFIG/ldap.crt ${SYSTEM_CERTDIR}/ldap.crt
fi
# Add extra trusted certificates (as a directory)
if [ -d $CERTDIR ]; then
if test "$(ls -A "$CERTDIR")"; then
echo "Installing extra certificates found in $CERTDIR directory"
cp $CERTDIR/* /usr/local/share/ca-certificates/
cat $CERTDIR/* >> venv/lib/python2.7/site-packages/requests/cacert.pem
cat $CERTDIR/* >> venv/lib/python2.7/site-packages/certifi/cacert.pem
cp $CERTDIR/* ${SYSTEM_CERTDIR}
cat $CERTDIR/* >> $PYTHON_ROOT/site-packages/requests/cacert.pem
cat $CERTDIR/* >> $PYTHON_ROOT/site-packages/certifi/cacert.pem
fi
fi
# Add extra trusted certificates (as a file)
if [ -f $CERTDIR ]; then
echo "Installing extra certificates found in $CERTDIR file"
csplit -z -f /usr/local/share/ca-certificates/extra-ca- $CERTDIR '/-----BEGIN CERTIFICATE-----/' '{*}'
cat $CERTDIR >> venv/lib/python2.7/site-packages/requests/cacert.pem
cat $CERTDIR >> venv/lib/python2.7/site-packages/certifi/cacert.pem
csplit -z -f ${SYSTEM_CERTDIR}/extra-ca- $CERTDIR '/-----BEGIN CERTIFICATE-----/' '{*}'
cat $CERTDIR >> $PYTHON_ROOT/site-packages/requests/cacert.pem
cat $CERTDIR >> $PYTHON_ROOT/site-packages/certifi/cacert.pem
fi
# Add extra trusted certificates (prefixed)
for f in $(find $QUAYCONFIG/ -maxdepth 1 -type f -name "extra_ca*")
do
echo "Installing extra cert $f"
cp "$f" /usr/local/share/ca-certificates/
cat "$f" >> venv/lib/python2.7/site-packages/requests/cacert.pem
cat "$f" >> venv/lib/python2.7/site-packages/certifi/cacert.pem
cp "$f" ${SYSTEM_CERTDIR}
cat "$f" >> $PYTHON_ROOT/site-packages/requests/cacert.pem
cat "$f" >> $PYTHON_ROOT/site-packages/certifi/cacert.pem
done
# Update all CA certificates.
update-ca-certificates
update-ca-trust extract

View File

@ -1,5 +0,0 @@
#! /bin/bash
set -e
# Update the connection limit
sysctl -w net.core.somaxconn=1024 || true

View File

@ -9,6 +9,51 @@ QUAYDIR = os.getenv("QUAYDIR", "/")
QUAYCONF_DIR = os.getenv("QUAYCONF", os.path.join(QUAYDIR, QUAYPATH, "conf"))
STATIC_DIR = os.path.join(QUAYDIR, 'static')
SSL_PROTOCOL_DEFAULTS = ['TLSv1', 'TLSv1.1', 'TLSv1.2']
SSL_CIPHER_DEFAULTS = [
'ECDHE-RSA-AES128-GCM-SHA256',
'ECDHE-ECDSA-AES128-GCM-SHA256',
'ECDHE-RSA-AES256-GCM-SHA384',
'ECDHE-ECDSA-AES256-GCM-SHA384',
'DHE-RSA-AES128-GCM-SHA256',
'DHE-DSS-AES128-GCM-SHA256',
'kEDH+AESGCM',
'ECDHE-RSA-AES128-SHA256',
'ECDHE-ECDSA-AES128-SHA256',
'ECDHE-RSA-AES128-SHA',
'ECDHE-ECDSA-AES128-SHA',
'ECDHE-RSA-AES256-SHA384',
'ECDHE-ECDSA-AES256-SHA384',
'ECDHE-RSA-AES256-SHA',
'ECDHE-ECDSA-AES256-SHA',
'DHE-RSA-AES128-SHA256',
'DHE-RSA-AES128-SHA',
'DHE-DSS-AES128-SHA256',
'DHE-RSA-AES256-SHA256',
'DHE-DSS-AES256-SHA',
'DHE-RSA-AES256-SHA',
'AES128-GCM-SHA256',
'AES256-GCM-SHA384',
'AES128-SHA256',
'AES256-SHA256',
'AES128-SHA',
'AES256-SHA',
'AES',
'CAMELLIA',
'!3DES',
'!aNULL',
'!eNULL',
'!EXPORT',
'!DES',
'!RC4',
'!MD5',
'!PSK',
'!aECDH',
'!EDH-DSS-DES-CBC3-SHA',
'!EDH-RSA-DES-CBC3-SHA',
'!KRB5-DES-CBC3-SHA',
]
def write_config(filename, **kwargs):
with open(filename + ".jnj") as f:
template = jinja2.Template(f.read())
@ -26,10 +71,16 @@ def generate_nginx_config(config):
use_https = os.path.exists(os.path.join(QUAYCONF_DIR, 'stack/ssl.key'))
use_old_certs = os.path.exists(os.path.join(QUAYCONF_DIR, 'stack/ssl.old.key'))
v1_only_domain = config.get('V1_ONLY_DOMAIN', None)
enable_rate_limits = config.get('FEATURE_RATE_LIMITS', False)
ssl_protocols = config.get('SSL_PROTOCOLS', SSL_PROTOCOL_DEFAULTS)
ssl_ciphers = config.get('SSL_CIPHERS', SSL_CIPHER_DEFAULTS)
write_config(os.path.join(QUAYCONF_DIR, 'nginx/nginx.conf'), use_https=use_https,
use_old_certs=use_old_certs,
v1_only_domain=v1_only_domain)
enable_rate_limits=enable_rate_limits,
v1_only_domain=v1_only_domain,
ssl_protocols=ssl_protocols,
ssl_ciphers=':'.join(ssl_ciphers))
def generate_server_config(config):
@ -41,17 +92,35 @@ def generate_server_config(config):
tuf_host = config.get('TUF_HOST', None)
signing_enabled = config.get('FEATURE_SIGNING', False)
maximum_layer_size = config.get('MAXIMUM_LAYER_SIZE', '20G')
enable_rate_limits = config.get('FEATURE_RATE_LIMITS', False)
write_config(
os.path.join(QUAYCONF_DIR, 'nginx/server-base.conf'), tuf_server=tuf_server, tuf_host=tuf_host,
signing_enabled=signing_enabled, maximum_layer_size=maximum_layer_size, static_dir=STATIC_DIR)
signing_enabled=signing_enabled, maximum_layer_size=maximum_layer_size,
enable_rate_limits=enable_rate_limits,
static_dir=STATIC_DIR)
def generate_rate_limiting_config(config):
"""
Generates rate limiting config from the app config
"""
config = config or {}
non_rate_limited_namespaces = config.get('NON_RATE_LIMITED_NAMESPACES') or set()
enable_rate_limits = config.get('FEATURE_RATE_LIMITS', False)
write_config(
os.path.join(QUAYCONF_DIR, 'nginx/rate-limiting.conf'),
non_rate_limited_namespaces=non_rate_limited_namespaces,
enable_rate_limits=enable_rate_limits,
static_dir=STATIC_DIR)
if __name__ == "__main__":
if os.path.exists(os.path.join(QUAYCONF_DIR, 'stack/config.yaml')):
with open(os.path.join(QUAYCONF_DIR, 'stack/config.yaml'), 'r') as f:
config = yaml.load(f)
else:
config = None
generate_rate_limiting_config(config)
generate_server_config(config)
generate_nginx_config(config)

View File

@ -5,4 +5,4 @@ QUAYPATH=${QUAYPATH:-"."}
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
cd $QUAYDIR
venv/bin/python $QUAYCONF/init/nginx_conf_create.py
python $QUAYCONF/init/nginx_conf_create.py

View File

@ -1,6 +1,10 @@
#!/bin/bash
QUAYPATH=${QUAYPATH:-"."}
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
set -e
cd ${QUAYDIR:-"/"}
# Run the database migration
PYTHONPATH=${QUAYPATH:-"."} venv/bin/alembic upgrade head
PYTHONPATH=${QUAYPATH:-"."} python $QUAYCONF/init/v3_migration.py > revision_head
PYTHONPATH=${QUAYPATH:-"."} alembic upgrade `cat revision_head`

View File

@ -1,4 +0,0 @@
#!/bin/sh
# Start the logger
exec logger -i -t ipresolverupdateworker

View File

@ -1,9 +0,0 @@
#! /bin/bash
echo 'Starting ip resolver update worker'
QUAYPATH=${QUAYPATH:-"."}
cd ${QUAYDIR:-"/"}
PYTHONPATH=$QUAYPATH venv/bin/python -m workers.ipresolverupdateworker 2>&1
echo 'IP resolver update worker exited'

View File

@ -0,0 +1,147 @@
import os
import os.path
import jinja2
QUAYPATH = os.getenv("QUAYPATH", ".")
QUAYDIR = os.getenv("QUAYDIR", "/")
QUAYCONF_DIR = os.getenv("QUAYCONF", os.path.join(QUAYDIR, QUAYPATH, "conf"))
QUAY_SERVICES = os.getenv("QUAY_SERVICES", [])
QUAY_OVERRIDE_SERVICES = os.getenv("QUAY_OVERRIDE_SERVICES", [])
def default_services():
return {
"blobuploadcleanupworker": {
"autostart": "true"
},
"buildlogsarchiver": {
"autostart": "true"
},
"builder": {
"autostart": "true"
},
"chunkcleanupworker": {
"autostart": "true"
},
"expiredappspecifictokenworker": {
"autostart": "true"
},
"exportactionlogsworker": {
"autostart": "true"
},
"gcworker": {
"autostart": "true"
},
"globalpromstats": {
"autostart": "true"
},
"labelbackfillworker": {
"autostart": "true"
},
"logrotateworker": {
"autostart": "true"
},
"namespacegcworker": {
"autostart": "true"
},
"notificationworker": {
"autostart": "true"
},
"queuecleanupworker": {
"autostart": "true"
},
"repositoryactioncounter": {
"autostart": "true"
},
"security_notification_worker": {
"autostart": "true"
},
"securityworker": {
"autostart": "true"
},
"storagereplication": {
"autostart": "true"
},
"tagbackfillworker": {
"autostart": "true"
},
"teamsyncworker": {
"autostart": "true"
},
"dnsmasq": {
"autostart": "true"
},
"gunicorn-registry": {
"autostart": "true"
},
"gunicorn-secscan": {
"autostart": "true"
},
"gunicorn-verbs": {
"autostart": "true"
},
"gunicorn-web": {
"autostart": "true"
},
"ip-resolver-update-worker": {
"autostart": "true"
},
"jwtproxy": {
"autostart": "true"
},
"memcache": {
"autostart": "true"
},
"nginx": {
"autostart": "true"
},
"prometheus-aggregator": {
"autostart": "true"
},
"servicekey": {
"autostart": "true"
},
"repomirrorworker": {
"autostart": "false"
}
}
def generate_supervisord_config(filename, config):
with open(filename + ".jnj") as f:
template = jinja2.Template(f.read())
rendered = template.render(config=config)
with open(filename, 'w') as f:
f.write(rendered)
def limit_services(config, enabled_services):
if enabled_services == []:
return
for service in config.keys():
if service in enabled_services:
config[service]["autostart"] = "true"
else:
config[service]["autostart"] = "false"
def override_services(config, override_services):
if override_services == []:
return
for service in config.keys():
if service + "=true" in override_services:
config[service]["autostart"] = "true"
elif service + "=false" in override_services:
config[service]["autostart"] = "false"
if __name__ == "__main__":
config = default_services()
limit_services(config, QUAY_SERVICES)
override_services(config, QUAY_OVERRIDE_SERVICES)
generate_supervisord_config(os.path.join(QUAYCONF_DIR, 'supervisord.conf'), config)

View File

@ -0,0 +1,8 @@
#!/bin/bash
QUAYDIR=${QUAYDIR:-"/"}
QUAYPATH=${QUAYPATH:-"."}
QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"}
cd $QUAYDIR
python $QUAYCONF/init/supervisord_conf_create.py

View File

@ -1,143 +0,0 @@
@version: 3.5
@include "scl.conf"
@include "`scl-root`/system/tty10.conf"
# Syslog-ng configuration file, compatible with default Debian syslogd
# installation.
# First, set some global options.
options { chain_hostnames(off); flush_lines(0); use_dns(no); use_fqdn(no);
owner("root"); group("adm"); perm(0640); stats_freq(0);
bad_hostname("^gconfd$"); keep-timestamp(no);
};
########################
# Sources
########################
# This is the default behavior of sysklogd package
# Logs may come from unix stream, but not from another machine.
#
source s_src {
unix-stream("/dev/log");
internal();
};
# If you wish to get logs from remote machine you should uncomment
# this and comment the above source line.
#
#source s_net { tcp(ip(127.0.0.1) port(1000)); };
########################
# Destinations
########################
# First some standard logfile
#
destination d_auth { file("/var/log/auth.log"); };
destination d_cron { file("/var/log/cron.log"); };
destination d_daemon { file("/var/log/daemon.log"); };
destination d_kern { file("/var/log/kern.log"); };
destination d_lpr { file("/var/log/lpr.log"); };
destination d_mail { file("/var/log/mail.log"); };
destination d_syslog { file("/var/log/syslog"); };
destination d_user { file("/var/log/user.log"); };
destination d_uucp { file("/var/log/uucp.log"); };
# This files are the log come from the mail subsystem.
#
destination d_mailinfo { file("/var/log/mail.info"); };
destination d_mailwarn { file("/var/log/mail.warn"); };
destination d_mailerr { file("/var/log/mail.err"); };
# Logging for INN news system
#
destination d_newscrit { file("/var/log/news/news.crit"); };
destination d_newserr { file("/var/log/news/news.err"); };
destination d_newsnotice { file("/var/log/news/news.notice"); };
# Some `catch-all' logfiles.
#
destination d_debug { file("/var/log/debug"); };
destination d_error { file("/var/log/error"); };
destination d_messages { file("/var/log/messages"); };
# The named pipe /dev/xconsole is for the nsole' utility. To use it,
# you must invoke nsole' with the -file' option:
#
# $ xconsole -file /dev/xconsole [...]
#
destination d_xconsole { pipe("/dev/xconsole"); };
# Send the messages to an other host
#
#destination d_net { tcp("127.0.0.1" port(1000) log_fifo_size(1000)); };
# Debian only
destination d_ppp { file("/var/log/ppp.log"); };
########################
# Filters
########################
# Here's come the filter options. With this rules, we can set which
# message go where.
filter f_dbg { level(debug); };
filter f_info { level(info); };
filter f_notice { level(notice); };
filter f_warn { level(warn); };
filter f_err { level(err); };
filter f_crit { level(crit .. emerg); };
filter f_debug { level(debug) and not facility(auth, authpriv, news, mail); };
filter f_error { level(err .. emerg) ; };
filter f_auth { facility(auth, authpriv) and not filter(f_debug); };
filter f_cron { facility(cron) and not filter(f_debug); };
filter f_daemon { facility(daemon) and not filter(f_debug); };
filter f_kern { facility(kern) and not filter(f_debug); };
filter f_lpr { facility(lpr) and not filter(f_debug); };
filter f_local { facility(local0, local1, local3, local4, local5,
local6, local7) and not filter(f_debug); };
filter f_mail { facility(mail) and not filter(f_debug); };
filter f_news { facility(news) and not filter(f_debug); };
filter f_syslog3 { not facility(auth, authpriv, mail) and not filter(f_debug); };
filter f_uucp { facility(uucp) and not filter(f_debug); };
filter f_cnews { level(notice, err, crit) and facility(news); };
filter f_cother { level(debug, info, notice, warn) or facility(daemon, mail); };
filter f_ppp { facility(local2) and not filter(f_debug); };
filter f_console { level(warn .. emerg); };
########################
# Log paths
########################
log { source(s_src); filter(f_auth); destination(d_auth); };
log { source(s_src); filter(f_cron); destination(d_cron); };
log { source(s_src); filter(f_daemon); destination(d_daemon); };
log { source(s_src); filter(f_kern); destination(d_kern); };
log { source(s_src); filter(f_lpr); destination(d_lpr); };
log { source(s_src); filter(f_syslog3); destination(d_syslog); };
log { source(s_src); filter(f_uucp); destination(d_uucp); };
log { source(s_src); filter(f_mail); destination(d_mail); };
#log { source(s_src); filter(f_mail); filter(f_info); destination(d_mailinfo); };
#log { source(s_src); filter(f_mail); filter(f_warn); destination(d_mailwarn); };
#log { source(s_src); filter(f_mail); filter(f_err); destination(d_mailerr); };
log { source(s_src); filter(f_news); filter(f_crit); destination(d_newscrit); };
log { source(s_src); filter(f_news); filter(f_err); destination(d_newserr); };
log { source(s_src); filter(f_news); filter(f_notice); destination(d_newsnotice); };
#log { source(s_src); filter(f_ppp); destination(d_ppp); };
log { source(s_src); filter(f_debug); destination(d_debug); };
log { source(s_src); filter(f_error); destination(d_error); };
# All messages send to a remote site
#
#log { source(s_src); destination(d_net); };
###
# Include all config files in /etc/syslog-ng/conf.d/
###
@include "/etc/syslog-ng/conf.d/*.conf"

View File

View File

@ -0,0 +1,778 @@
import os
import pytest
import json
import yaml
import jinja2
from ..supervisord_conf_create import QUAYCONF_DIR, default_services, limit_services
def render_supervisord_conf(config):
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../supervisord.conf.jnj")) as f:
template = jinja2.Template(f.read())
return template.render(config=config)
def test_supervisord_conf_create_defaults():
config = default_services()
limit_services(config, [])
rendered = render_supervisord_conf(config)
expected = """[supervisord]
nodaemon=true
[unix_http_server]
file=%(ENV_QUAYCONF)s/supervisord.sock
user=root
[supervisorctl]
serverurl=unix:///%(ENV_QUAYCONF)s/supervisord.sock
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[eventlistener:stdout]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command = supervisor_stdout
buffer_size = 1024
events = PROCESS_LOG
result_handler = supervisor_stdout:event_handler
;;; Run batch scripts
[program:blobuploadcleanupworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.blobuploadcleanupworker.blobuploadcleanupworker
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:buildlogsarchiver]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.buildlogsarchiver.buildlogsarchiver
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:builder]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m buildman.builder
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:chunkcleanupworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.chunkcleanupworker
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:expiredappspecifictokenworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.expiredappspecifictokenworker
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:exportactionlogsworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.exportactionlogsworker
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:gcworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.gc.gcworker
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:globalpromstats]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.globalpromstats.globalpromstats
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:labelbackfillworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.labelbackfillworker
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:logrotateworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.logrotateworker
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:namespacegcworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.namespacegcworker
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:notificationworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.notificationworker.notificationworker
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:queuecleanupworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.queuecleanupworker
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:repositoryactioncounter]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.repositoryactioncounter
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:security_notification_worker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.security_notification_worker
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:securityworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.securityworker.securityworker
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:storagereplication]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.storagereplication
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:tagbackfillworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.tagbackfillworker
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:teamsyncworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.teamsyncworker.teamsyncworker
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
;;; Run interactive scripts
[program:dnsmasq]
command=/usr/sbin/dnsmasq --no-daemon --user=root --listen-address=127.0.0.1 --port=8053
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:gunicorn-registry]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s,
DB_CONNECTION_POOLING=%(ENV_DB_CONNECTION_POOLING_REGISTRY)s
command=nice -n 10 gunicorn -c %(ENV_QUAYCONF)s/gunicorn_registry.py registry:application
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:gunicorn-secscan]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=gunicorn -c %(ENV_QUAYCONF)s/gunicorn_secscan.py secscan:application
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:gunicorn-verbs]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=nice -n 10 gunicorn -c %(ENV_QUAYCONF)s/gunicorn_verbs.py verbs:application
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:gunicorn-web]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=gunicorn -c %(ENV_QUAYCONF)s/gunicorn_web.py web:application
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:jwtproxy]
command=/usr/local/bin/jwtproxy --config %(ENV_QUAYCONF)s/jwtproxy_conf.yaml
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:memcache]
command=memcached -u memcached -m 64 -l 127.0.0.1 -p 18080
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:nginx]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=nginx -c %(ENV_QUAYCONF)s/nginx/nginx.conf
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:prometheus-aggregator]
command=/usr/local/bin/prometheus-aggregator
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:servicekey]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.servicekeyworker.servicekeyworker
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:repomirrorworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.repomirrorworker.repomirrorworker
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
# EOF NO NEWLINE"""
assert rendered == expected
def test_supervisord_conf_create_all_overrides():
config = default_services()
limit_services(config, "servicekey,prometheus-aggregator")
rendered = render_supervisord_conf(config)
expected = """[supervisord]
nodaemon=true
[unix_http_server]
file=%(ENV_QUAYCONF)s/supervisord.sock
user=root
[supervisorctl]
serverurl=unix:///%(ENV_QUAYCONF)s/supervisord.sock
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[eventlistener:stdout]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command = supervisor_stdout
buffer_size = 1024
events = PROCESS_LOG
result_handler = supervisor_stdout:event_handler
;;; Run batch scripts
[program:blobuploadcleanupworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.blobuploadcleanupworker.blobuploadcleanupworker
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:buildlogsarchiver]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.buildlogsarchiver.buildlogsarchiver
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:builder]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m buildman.builder
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:chunkcleanupworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.chunkcleanupworker
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:expiredappspecifictokenworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.expiredappspecifictokenworker
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:exportactionlogsworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.exportactionlogsworker
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:gcworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.gc.gcworker
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:globalpromstats]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.globalpromstats.globalpromstats
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:labelbackfillworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.labelbackfillworker
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:logrotateworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.logrotateworker
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:namespacegcworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.namespacegcworker
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:notificationworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.notificationworker.notificationworker
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:queuecleanupworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.queuecleanupworker
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:repositoryactioncounter]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.repositoryactioncounter
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:security_notification_worker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.security_notification_worker
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:securityworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.securityworker.securityworker
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:storagereplication]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.storagereplication
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:tagbackfillworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.tagbackfillworker
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:teamsyncworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.teamsyncworker.teamsyncworker
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
;;; Run interactive scripts
[program:dnsmasq]
command=/usr/sbin/dnsmasq --no-daemon --user=root --listen-address=127.0.0.1 --port=8053
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:gunicorn-registry]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s,
DB_CONNECTION_POOLING=%(ENV_DB_CONNECTION_POOLING_REGISTRY)s
command=nice -n 10 gunicorn -c %(ENV_QUAYCONF)s/gunicorn_registry.py registry:application
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:gunicorn-secscan]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=gunicorn -c %(ENV_QUAYCONF)s/gunicorn_secscan.py secscan:application
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:gunicorn-verbs]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=nice -n 10 gunicorn -c %(ENV_QUAYCONF)s/gunicorn_verbs.py verbs:application
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:gunicorn-web]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=gunicorn -c %(ENV_QUAYCONF)s/gunicorn_web.py web:application
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:jwtproxy]
command=/usr/local/bin/jwtproxy --config %(ENV_QUAYCONF)s/jwtproxy_conf.yaml
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:memcache]
command=memcached -u memcached -m 64 -l 127.0.0.1 -p 18080
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:nginx]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=nginx -c %(ENV_QUAYCONF)s/nginx/nginx.conf
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:prometheus-aggregator]
command=/usr/local/bin/prometheus-aggregator
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:servicekey]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.servicekeyworker.servicekeyworker
autostart = true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
[program:repomirrorworker]
environment=
PYTHONPATH=%(ENV_QUAYDIR)s
command=python -m workers.repomirrorworker.repomirrorworker
autostart = false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
stdout_events_enabled = true
stderr_events_enabled = true
# EOF NO NEWLINE"""
assert rendered == expected

18
conf/init/v3_migration.py Normal file
View File

@ -0,0 +1,18 @@
from app import app
from active_migration import ActiveDataMigration
if not app.config.get('SETUP_COMPLETE', False):
print 'head'
else:
v3_upgrade_mode = app.config.get('V3_UPGRADE_MODE')
if v3_upgrade_mode == 'background':
raise Exception('V3_UPGRADE_MODE must be "complete". This requires a full upgrade to Quay:v3.0. See https://access.qa.redhat.com/documentation/en-us/red_hat_quay/3/html/upgrade_quay/index')
elif v3_upgrade_mode == 'production-transition':
print '481623ba00ba'
elif v3_upgrade_mode == 'post-oci-rollout' or v3_upgrade_mode == 'post-oci-roll-back-compat' or v3_upgrade_mode == 'complete':
if ActiveDataMigration is not None:
print ActiveDataMigration.alembic_migration_revision
else:
print 'head'
else:
raise Exception('Unknown V3_UPGRADE_MODE: %s' % v3_upgrade_mode)

View File

@ -1,4 +1,4 @@
#!/bin/bash
cd ${QUAYDIR:-"/"}
venv/bin/python ${QUAYPATH:-"."}/boot.py
python ${QUAYPATH:-"."}/boot.py

View File

@ -1,7 +1,7 @@
jwtproxy:
signer_proxy:
enabled: true
listen_addr: :8080
listen_addr: :8081
ca_key_file: {{ conf_dir }}/mitm.key
ca_crt_file: {{ conf_dir }}/mitm.cert
@ -13,10 +13,11 @@ jwtproxy:
type: preshared
options:
key_id: {{ key_id }}
private_key_path: {{ conf_dir }}/quay.pem
private_key_path: {{ service_key_location }}
verifier_proxies:
- enabled: true
listen_addr: unix:/tmp/jwtproxy_secscan.sock
socket_permission: 0777
verifier:
upstream: unix:/tmp/gunicorn_secscan.sock
audience: {{ audience }}

View File

@ -1,7 +1,7 @@
# vim: ft=nginx
server {
listen 80 default_server;
listen 8080 default_server;
server_name _;
rewrite ^ https://$host$request_uri? permanent;
}

View File

@ -9,10 +9,18 @@ log_format lb_logs '$remote_addr ($proxy_protocol_addr) '
'($request_time $request_length $upstream_response_time)';
types_hash_max_size 2048;
include /etc/nginx/mime.types;
include /etc/opt/rh/rh-nginx112/nginx/mime.types;
default_type application/octet-stream;
access_log /dev/stdout;
access_log /var/log/nginx/access.log;
error_log /var/log/nginx/error.log;
client_body_temp_path /tmp/nginx 1 2;
proxy_temp_path /tmp/nginx-proxy;
fastcgi_temp_path /tmp/nginx-fastcgi;
uwsgi_temp_path /tmp/nginx-uwsgi;
scgi_temp_path /tmp/nginx-scgi;
sendfile on;
gzip on;
@ -48,6 +56,13 @@ upstream registry_app_server {
server unix:/tmp/gunicorn_registry.sock fail_timeout=0;
}
# NOTE: Exposed for the _internal_ping *only*. All other secscan routes *MUST* go through
# the jwtproxy.
upstream secscan_app_server {
server unix:/tmp/gunicorn_secscan.sock fail_timeout=0;
}
upstream build_manager_controller_server {
server localhost:8686;
}

View File

@ -11,21 +11,16 @@ http {
server_names_hash_bucket_size 64;
resolver 127.0.0.1 valid=10s;
resolver 127.0.0.1:8053 valid=10s;
ssl_ciphers 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:!3DES:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA';
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers '{{ ssl_ciphers }}';
ssl_protocols {% for ssl_protocol in ssl_protocols %}{{ ssl_protocol }} {% endfor %};
ssl_session_cache shared:SSL:60m;
ssl_session_timeout 2h;
ssl_session_tickets on;
ssl_prefer_server_ciphers on;
ssl_dhparam dhparams.pem;
# TODO: learn OCSP stapling and do it the right way
#ssl_stapling on;
#ssl_stapling_verify off;
#ssl_trusted_certificate ../stack/ssl.key;
server {
server_name _;
@ -34,14 +29,14 @@ http {
include server-base.conf;
listen 443 ssl http2 default;
listen 8443 ssl http2 default;
ssl on;
# This header must be set only for HTTPS
add_header Strict-Transport-Security "max-age=63072000; preload";
access_log /dev/stdout lb_logs;
access_log /var/log/nginx/access.log lb_logs;
}
server {
@ -52,7 +47,7 @@ http {
include server-base.conf;
listen 8443 ssl http2 default proxy_protocol;
listen 7443 ssl http2 default proxy_protocol;
ssl on;
# This header must be set only for HTTPS
@ -60,7 +55,7 @@ http {
real_ip_header proxy_protocol;
access_log /dev/stdout lb_logs;
access_log /var/log/nginx/access.log lb_logs;
}
{% if v1_only_domain %}
@ -77,14 +72,14 @@ http {
ssl_certificate_key ../stack/ssl.key;
{% endif %}
listen 443 ssl;
listen 8443 ssl;
ssl on;
# This header must be set only for HTTPS
add_header Strict-Transport-Security "max-age=63072000; preload";
access_log /dev/stdout lb_logs;
access_log /var/log/nginx/access.log lb_logs;
}
server {
@ -100,7 +95,7 @@ http {
include server-base.conf;
listen 8443 ssl proxy_protocol;
listen 7443 ssl proxy_protocol;
ssl on;
# This header must be set only for HTTPS
@ -108,7 +103,7 @@ http {
real_ip_header proxy_protocol;
access_log /dev/stdout lb_logs;
access_log /var/log/nginx/access.log lb_logs;
}
{% endif %}
@ -120,14 +115,14 @@ http {
include http-base.conf;
include rate-limiting.conf;
resolver 127.0.0.1 valid=10s;
resolver 127.0.0.1:8053 valid=10s;
server {
include server-base.conf;
listen 80 default;
listen 8080 default;
access_log /dev/stdout lb_logs;
access_log /var/log/nginx/access.log lb_logs;
}
}

Some files were not shown because too many files have changed in this diff Show More