Compare commits

..

No commits in common. "master" and "v1.18.1" have entirely different histories.

1873 changed files with 45512 additions and 135968 deletions

View file

@ -1,8 +1,4 @@
./ci/
conf/stack conf/stack
conf/stack/**
conf/stack/config.yaml
config.yaml
screenshots screenshots
tools tools
test/data/registry test/data/registry
@ -10,26 +6,10 @@ venv
.git .git
!.git/HEAD !.git/HEAD
.gitignore .gitignore
.github
Bobfile Bobfile
README.md README.md
ROADMAP.md ROADMAP.md
requirements-nover.txt requirements-nover.txt
run-local.sh run-local.sh
.DS_Store .DS_Store
**/*.pyc *.pyc
.tox
htmlcov
.coverage
coverage
.cache
.npm-debug.log
test/__pycache__
__pycache__
**/__pycache__
static/build/**
.gitlab-ci/*
.gitlab-ci.*
docker-compose.yaml
test/dockerclients/**
node_modules

View file

@ -1,29 +0,0 @@
### Description of Changes
* details about the implementation of the changes
* motivation for the change (broken code, new feature, etc)
* contrast with previous behavior
#### Changes:
* ..
* ..
#### Issue: <link to story or task>
**TESTING** ->
**BREAKING CHANGE** ->
---
## Reviewer Checklist
- [ ] It works!
- [ ] Comments provide sufficient explanations for the next contributor
- [ ] Tests cover changes and corner cases
- [ ] Follows Quay syntax patterns and format

20
.gitignore vendored
View file

@ -1,30 +1,14 @@
*.pyc *.pyc
venv venv
screenshots/screenshots/ screenshots/screenshots/
conf/stack stack
*/node_modules grunt/node_modules
dist dist
dest dest
node_modules node_modules
static/ldn static/ldn
static/fonts static/fonts
static/build
stack_local stack_local
test/data/registry/ test/data/registry/
GIT_HEAD GIT_HEAD
.idea .idea
.python-version
.pylintrc
.coverage
coverage
htmlcov
.tox
.cache
.npm-debug.log
Dockerfile-e
.vscode
*.iml
.DS_Store
.pytest_cache/*
test/dockerclients/Vagrantfile
test/dockerclients/.*

View file

@ -1,37 +0,0 @@
[style]
based_on_style = chromium
COLUMN_LIMIT=99
INDENT_WIDTH=2
BLANK_LINE_BEFORE_CLASS_DOCSTRING=False
#True
ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT=True
# False
ALLOW_MULTILINE_DICTIONARY_KEYS=True
# False
BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF=False
# False
COALESCE_BRACKETS=True
DEDENT_CLOSING_BRACKETS=False
CONTINUATION_INDENT_WIDTH=2
# False
INDENT_DICTIONARY_VALUE=True
JOIN_MULTIPLE_LINES=False
# True
SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET=False
# True
SPLIT_BEFORE_NAMED_ASSIGNS=False
SPLIT_PENALTY_AFTER_OPENING_BRACKET=30
SPLIT_PENALTY_AFTER_UNARY_OPERATOR=10000
SPLIT_PENALTY_BEFORE_IF_EXPR=0
SPLIT_PENALTY_BITWISE_OPERATOR=300
SPLIT_PENALTY_EXCESS_CHARACTER=10000
SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT=30
SPLIT_PENALTY_IMPORT_NAMES=450
SPLIT_PENALTY_LOGICAL_OPERATOR=300
USE_TABS=False
SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED=False
# Align closing bracket with visual indentation.
align_closing_bracket_with_visual_indent=True
# Allow lambdas to be formatted on more than one line.
allow_multiline_lambdas=True

View file

@ -1,103 +0,0 @@
---
language: python
python: 2.7
sudo: required
services:
- docker
install: true
branches:
only:
- master
# Stop default database instances here to avoid port conflicts.
before_script:
- sudo service mysql stop
- sudo service postgresql stop
# Clean the cache if any step fails.
before_cache:
- scripts/ci fail-clean
cache:
timeout: 1000
directories:
- $HOME/docker
stages:
- build
- test
- clean
# We should label the steps if Travis ever supports it:
# https://github.com/travis-ci/travis-ci/issues/5898
jobs:
include:
- stage: build
name: Build
script: scripts/ci build
# To further shard, change the script to shard_X_of_XS and add new steps
- stage: test
name: Unit tests (shard 1)
script: scripts/ci unit shard_1_of_2
- stage: test
name: Unit tests (shard 2)
script: scripts/ci unit shard_2_of_2
- stage: test
name: Registry tests (shard 1)
script: scripts/ci registry shard_1_of_5
- stage: test
name: Registry tests (shard 2)
script: scripts/ci registry shard_2_of_5
- stage: test
name: Registry tests (shard 3)
script: scripts/ci registry shard_3_of_5
- stage: test
name: Registry tests (shard 4)
script: scripts/ci registry shard_4_of_5
- stage: test
name: Registry tests (shard 5)
script: scripts/ci registry shard_5_of_5
- stage: test
name: Legacy registry tests
script: scripts/ci registry_old
- stage: test
name: Custom TLS certs test
script: scripts/ci certs_test
- stage: test
name: Gunicorn worker test
script: scripts/ci gunicorn_test
- stage: test
name: MySQL unit tests (shard 1)
script: scripts/ci mysql shard_1_of_2
- stage: test
name: MySQL unit tests (shard 2)
script: scripts/ci mysql shard_2_of_2
- stage: test
name: Postgres unit tests (shard 1)
script: scripts/ci postgres shard_1_of_2
- stage: test
name: Postgres unit tests (shard 2)
script: scripts/ci postgres shard_2_of_2
- stage: clean
name: Cleanup
script: scripts/ci clean
notifications:
slack:
rooms:
- secure: "fBR3YMXaOkoX2Iz7oSJVAw9zrcDoqwadiMEWTWhx7Ic0zoM8IieD2EWIcDHAoGpqf3ixHkc1v/iLBpbWHgvK7TkrSrGEbFyEmu/uomuHU8oGTiazWCbMWg9T2mhWYFyVaKtt8bzMbFo8k72kYK/NWV8bR4W/Qe/opkH2GGzfhZA="
on_success: change
on_failure: always
on_pull_requests: false

View file

@ -1,457 +1,6 @@
### v3.1.2 ### v1.18.1
- Fixed: Repository mirroring properly updates status - Fixed: Exception when using RADOS GW Storage driver (#2057)
- Fixed: Application repositories in public namespaces shown in UI
- Fixed: Description of log operations in UI
- Fixed: Quay V3 upgrade fails with "id field missing from v1Compatibility JSON"
- Fixed: Security token for storage proxy properly URL encoded
### v3.1.1
- Fixed: Quoting of username/password for repository mirror
- Fixed: Changing next sync date in repository mirror UI
- Fixed: Enable cancel button in repository mirror UI
### v3.1.0
- Added: New Repository Mirror functionality to continously synchronize repositories from external source registries into Quay
- Added: New Repository Mode setting (Normal, Mirrored, Read-Only) to indicate how a repository is updated
- Added: New Quay Setup Operator (Dev Preview) to automate configuring Quay on OpenShift
- Added: Support for using Red Hat OpenShift Container Storage 3 as a Quay storage backend
- Added: Support for using the Crunchy Data Operator to deploy Postgresql as Quay database
- Added: Ability to use build ARGS as first line in Dockerfiles in Quay builds
- Added: New Red Hat color scheme in Quay web UI
- Fixed: Display of repo_verb logs in logs panel
- Fixed: Ensure robot accounts being granted access actually belongs in same namespace
- Fixed: Numerous documentation improvements
### v3.0.5
- Fixed: LDAP config error when user search results exceeds 1000 objects (#1736)[https://jira.coreos.com/browse/QUAY-1736]
- Fixed: Remove obsolete 01_copy_syslog_config.sh (#1768)[https://jira.coreos.com/browse/QUAY-1768)
- Fixed: Config tool fails to set up database when password string contains "$" (#1510)[https://jira.coreos.com/browse/QUAY-1510)
- Added: Config flag to disable TLSv1.0 support (#1726)[https://jira.coreos.com/browse/QUAY-1726]
### v3.0.4
- Fixed: Package vulnerability notifications now shown in UI
- Fixed: Error deleting manifest after pushing new tag
- Fixed: Manifest now shown in UI for all types
- Fixed: CSRF rotation corrected
- Fixed: nginx access and error logs now to stdout
### v3.0.3
- Fixed: Security scan notifications endpoint not working (part #2) (#3472)
- Fixed: Exception raised during parallel pushes of same manifest on Postgres (#3478)
- Fixed: Connection pooling was ignoring environment variable (#3480)
- Fixed: Exception when in OAuth approval flow (#3491)
### v3.0.2
- Fixed: Configuration tool now operates in disconnected environments (#3468)
- Fixed: Security scan notifications endpoint not working (#3472)
### v3.0.1
- Fixed: Instance health endpoint (`/health/instance`) (#3467)
### v3.0.0
**IMPORTANT NOTE:** This release is a **major** release and has special upgrade instructions. Please see the upgrade instructions documentation.
- Added: Full support for Docker Manifest Version 2, Schema 2, including support for manifest lists and Windows images
- Added: New, distinct configuration tool for Quay that can be run outside of Quay itself and perform in-place configuration changes
- Added: Disabling of V1 push support by default and support for whitelist-enabling specific namespaces for this legacy protocol (#3398)
- Added: Full support for blob mounting via the Docker protocol (#3057)
- Added: Have all registry operations be disabled if a namespace is disabled (#3091)
- Added: Allow syncing of team members from LDAP/Keystone groups, even if user creation is disabled (#3089)
- Added: Add a feature flag to allow username confirmation to be disabled (#3099)
- Added: New indexes which should result in significant database performance when accessing lists of tags
- Added: Add support for POST on OIDC endpoints, to support those providers that POST back (#3246)
- Added: Add support for configuration of the claims required for OIDC authentication (#3246)
- Added: Have the instance health check verify the disk space available to ensure it doesnt run out and cause problems for nginx (#3241)
- Added: Support for basic auth on security scanner API endpoints (#3255)
- Added: Support for geo-blocking pulls in a namespace from a country (#3300)
- Fixed: Ensure that starred public repositories appear in the starred repositories list (#3098)
- Fixed: Add rate limiting to the catalog endpoint (#3106)
- Fixed: Have the catalog endpoint return empty for a namespace if it is disabled (#3106)
- Fixed: Have user logs start writing to a new LogEntry3 table, which has a BigInteger ID column, to ensure no overflow
- Fixed: Improve loading of action logs to be less jumpy (#3299)
- Fixed: Ensure that all upload segments are deleted in Swift storage engine once no longer necessary (#3260)
- Fixed: Handling of unicode in manifests (#3325)
- Fixed: Unauthorized request handling under podman for public repositories when anonymous access is disabled (#3365)
### v2.9.2
**IMPORTANT NOTE:** This release fixes a bug in which the deletion of namespaces did not result in the deletion of robot accounts under that namespace. While this is not a security issue (no permissions or credentials are leaked), it can appear unusual to users, so an upgrade is highly recommended. This change also includes a migration that cleans up the aforementioned robot accounts, so the migration step can take **several minutes**. Please plan accordingly.
- Added: Support for custom query parameters on OIDC endpoints (#3050)
- Added: Configurable options for search page length and maximum number of pages (#3060)
- Added: Better messaging for when the maximum search page is reached (#3060)
- Added: Support for browser notifications (#3068)
- Fixed: Robot accounts were not being immediately deleted under namespaces (#3071)
- Fixed: Setup under latest versions of Kubernetes (#3051)
- Fixed: Viewing of logs in repositories with many, many logs (#3082)
- Fixed: Filtering of deleting users and organizations in superuser panel (#3080)
- Fixed: Incorrect information displayed for builds triggered by deleted build triggers (#3078)
- Fixed: Robots could not be created with empty descriptions (#3073)
- Fixed: Inability to find Dockerfile in certain archives (#3072)
- Fixed: Display of empty tab in credentials dialog under certain circumstances (#3061)
- Fixed: Overflow of robot names when extremely long (#3062)
- Fixed: Respect CPU affinity when determining number of workers to run (#3064)
- Fixed: Breakage in RECATPCHA support (#3065)
### v2.9.1
**IMPORTANT NOTE:** This release fixes the 2.9.0 migration. If you experienced an error during the 2.9.0 migration, manually rollback and then upgrade your quay instance to 2.9.1.
- Fixed: Specify default server value for new integer fields added (#3052)
- Fixed: Overflow of repository grid UI (#3049)
### v2.9.0
- Added: Automatic cleanup of expired external application tokens (#3002)
- Added: Make deletions of namespaces occur in the background (#3014)
- Added: Ability to disable build triggers (#2892)
- Added: Have repeatedly failing build triggers be automatically disabled (#2892)
- Added: Automatic caching of registry Blob data for faster pull operations (#3022)
- Added: Creation date/time, last usage date/time and other metadata for robot accounts (#3024)
- Added: Collaborators view under organizations, for viewing non-members (#3025)
- Fixed: Make superusers APIs for users and organizations visible in the API browser (#3017)
- Fixed: Better messaging when attempting to create a team that already exists (#3006)
- Fixed: Prevent possible reflected text attacks by limiting API access (#2987)
- Fixed: Have checkable menus in UI respect filters (#3013)
- Fixed: Users being invited to a new organization must always be invited (#3029)
- Fixed: Removed all license requirements in Quay (#3031)
- Fixed: Squashed images with hard links pointing to deleted files no longer fail (#3032)
- Fixed: 500 error when trying to pull certain images via torrent (#3036)
### v2.8.0
- Added: Support for Azure Blob Storage (#2902)
- Added: Ability to filter out disabled users in users list API (#2954)
- Added: Image ID in expanded tags view (#2965)
- Added: Processes auto-scale based on CPU count (#2971, 2978)
- Added: Health checks for all workers (#2977)
- Added: Health checks and auto-rotation for service keys (#2909)
- Added: Ability to back GitHub or Google login with LDAP/Keystone (#2983)
- Added: Configurable page size for Docker Registry V2 API pagination (#2993)
- Fixed: Anonymous calls to API discovery endpoint (#2953)
- Fixed: Optimized creation of repositories
- Fixed: Optimized manifest pushing
- Fixed: LDAP password input is now password field (#2970)
- Fixed: 500 raised when sending an invalid release name for app repos (#2979)
- Fixed: Deletion of expired external app tokens (#2981)
- Fixed: Sizing of OIDC login buttons (#2990)
- Fixed: Hide build-related UI when builds are not enabled (#2991)
- Fixed: Incorrect caching of external application token expiration (#2996)
- Fixed: Warning bar should not be displayed for already expired application tokens (#3003)
### v2.7.0
**NOTE:** This release *removes* support for the OIDC token internal authentication mechanism and replaces it with support for a new app-specific token system. All customers using the old OIDC token auth mechanism must change their configuration after updating manually in `config.yaml`.
- Added: Support for external application tokens to be used on the Docker CLI (#2942)
- Added: Explore tab for browsing visible repositories (#2921)
- Added: Ability to view and copy full manifest SHAs in tags view (#2898)
- Added: Support for robot tokens in App Registry pushes and pulls (#2899)
- Fixed: Failure when attempting to use Skopeo tool to access the registry (#2950)
- Fixed: Ordering of segments in Swift to match spec (#2920)
- Fixed: Squashed image downloading when using Postgres DB (#2930)
- Fixed: Hide "Start Build" button if the action is not allowed (#2916)
- Fixed: Exception when pushing certain labels with JSON-like contents (#2912)
- Fixed: Don't add password required notification for non-database auth (#2910)
- Fixed: Tags UI spacing on small displays (#2904)
- Fixed: Push updated notification now shows correct tags (#2897)
- Fixed: "Restart Container" button in superuser config panel (#2928)
- Fixed: Various small JavaScript security fixes
### v2.6.2
- Fixed: Failure to register uploaded TLS certificates (#2946)
### v2.6.1
- Added: Optimized overhead for direct downloads from Swift storage (#2889)
- Fixed: Immediately expire image builds that fail to start (#2887)
- Fixed: Failure to list all GitHub Enterprise namespaces (#2894)
- Fixed: Incorrect links to builds in notifications (#2895)
- Fixed: Failure to delete certain app repositories (#2893)
- Fixed: Inability to display Tag Signing status (#2890)
- Fixed: Broken health check for OIDC authentication (#2888)
### v2.6.0
- Added: Ability to use OIDC token for CLI login (#2695)
- Added: Documentation for OIDC callback URLs in setup tool
- Added: Ability for users to change their family and given name and company info (#2870)
- Added: Support for invite-only user sign up (#2867)
- Added: Option to disable partial autocompletion of users (#2864)
- Added: Georeplication support in Swift storage (#2874)
- Fixed: Namespace links ending in slashes (#2871)
- Fixed: Contact info setup in setup tool (#2866)
- Fixed: Lazy loading of teams and robots (#2883)
- Fixed: OIDC auth headers (#2695)
### v2.5.0
- Added: Better TLS caching (#2860)
- Added: Feature flag to allow read-only users to see build logs (#2850)
- Added: Feature flag to enable team sync setup when not a superuser (#2813)
- Added: Preferred public organizations list (#2850)
- Added: OIDC support for OIDC implementations without user info endpoint (#2817)
- Added: Support for tag expiration, in UI and view a special `quay.expires-after` label (#2718)
- Added: Health checks report failure reasons (#2636)
- Added: Enable database connection pooling (#2834)
- Fixed: setting of team resync option
- Fixed: Purge repository on very large repositories
### v2.4.0
- Added: Kubernetes Applications Support
- Added: Full-page search UI (#2529)
- Added: Always generate V2 manifests for tag operations in UI (#2608)
- Added: Option to enable public repositories in v2 catalog API (#2654)
- Added: Disable repository notifications after 3 failures (#2652)
- Added: Remove requirement for flash for copy button in UI (#2667)
- Fixed: Upgrade support for Markdown (#2624)
- Fixed: Kubernetes secret generation with secrets with CAPITAL names (#2640)
- Fixed: Content-Length reporting on HEAD requests (#2616)
- Fixed: Use configured email address as the sender in email notifications (#2635)
- Fixed: Better peformance on permissions lookup (#2628)
- Fixed: Disable federated login for new users if user creation is disabled (#2623)
- Fixed: Show build logs timestamps by default (#2647)
- Fixed: Custom TLS certificates tooling in superuser panel under Kubernetes (#2646, #2663)
- Fixed: Disable debug logs in superuser panel when under multiple instances (#2663)
- Fixed: External Notification Modal UI bug (#2650)
- Fixed: Security worker thrashing when security scanner not available
- Fixed: Torrent validation in superuser config panel (#2694)
- Fixed: Expensive database call in build badges (#2688)
### v2.3.4
- Added: Always show tag expiration options in superuser panel
### v2.3.3
- Added: Prometheus metric for queued builds (#2596)
- Fixed: Allow selection of Gitlab repository when Gitlab sends no permissions (#2601)
- Fixed: Failure when viewing Gitlab repository with unexpected schema (#2599)
- Fixed: LDAP stability fixes (#2598, #2584, #2595)
- Fixed: Viewing of repositories with trust enabled caused a 500 (#2594, #2593)
- Fixed: Failure in setup tool when time machine config is not set (#2589)
### v2.3.2
- Added: Configuration of time machine in UI (#2516)
- Fixed: Auth header in OIDC login UserInfo call (#2585)
- Fixed: Flash of red error box on loading (#2562)
- Fixed: Search under postgres (#2568)
- Fixed: Gitlab namespaces with null avatars (#2570)
- Fixed: Build log archiver race condition which results in missing logs (#2575)
- Fixed: Team synchronization when encountering a user with a shared email address (#2580)
- Fixed: Create New tooltip hiding dropdown menu (#2579)
- Fixed: Ensure build logs archive lookup URL checks build permissions (#2578)
### v2.3.1
**IMPORTANT NOTE:** This release fixes the 2.3.0 migration. If you experienced an error during the 2.3.0 migration, manually rollback and then upgrade your quay instance to 2.3.1.
- Fixed: Specify default server value for new bool field added to the repository table
### v2.3.0
- Added: LDAP Team Sync support (#2387, #2527)
- Added: Improved search performance through pre-computed scores (#2441, #2531, #2533, #2539)
- Added: Ability to allow pulls even if audit logging fails (#2306)
- Added: Full error information for build errors in Superuser panel (#2505)
- Added: Better error messages passed to the Docker client (#2499)
- Added: Custom git triggers can specify separate build context directory (#2517, #2509)
- Added: Improved performance on repository list API (#2542, #2544, #2546)
- Fixed: Handle undefined case in build message (#2501)
- Fixed: OIDC configuration in Superuser panel (#2520)
- Fixed: Ability to invite team members by email address (#2522)
- Fixed: Avatars for non-owner namespaces in GitLab (#2507, #2532)
- Fixed: Update dependencies and remove warnings (#2518, #2511, #2535, #2545, #2553)
- Fixed: Remove link to blog (#2523)
- Fixed: Better handling for unavailable frontend dependencies (#2503)
- Fixed: Top level redirect logic for missing repositories (#2540)
- Fixed: Remove extra slash from missing base image permissions error in build logs (#2548)
- Fixed: Backfill replication script when adjusting replication destinations (#2555)
- Fixed: Errors when deleting repositories without security scanning enabled (#2554)
### v2.2.0
**IMPORTANT NOTE:** This release contains a migration which adds a new feature to the build system. This requires shutting down the entire cluster _including builders_ and running one instance to migrate the database forward. You _must_ use a v2.2.0 builder with a v2.2.0 Quay cluster.
- Added: Separate build contexts from Dockerfile locations (#2398, #2410, #2438, #2449, #2480, #2481)
- Added: Configuration and enforcement of maximum layer size (#2388)
- Added: OIDC configuration in the Super User Panel (#2393)
- Added: Batching of Security Scanner notifications (#2397)
- Added: Auth Failures now display messages on the docker client (#2428, #2474)
- Added: Redesigned Tags page to include Labels, Image ID Type, and more informative Security Scanner information (#2416)
- Fixed: Parsing new docker client version format (#2378)
- Fixed: Improved repository search performance (#2392, #2440)
- Fixed: Miscellaneous Build Trigger page issues (#2405, #2406, #2407, #2408, #2409, #2414, #2418, #2445)
- Fixed: Remove all actionable CVEs from the docker image (#2422, #2468)
- Fixed: Minor bugs in Repository views (#2423, #2430, #2431)
- Fixed: Improve performance by deleting keys in redis rather than expiring (#2439)
- Fixed: Better error messages when configuring cloud storage (#2444)
- Fixed: Validation and installation of custom TLS certificates (#2473)
- Fixed: Garbage Collection corner case (#2404)
### v2.1.0
**IMPORTANT NOTE FOR POSTGRES USERS:** This release contains a migration which adds full-text searching capabilities to Red Hat Quay. In order to support this feature, the migration will attempt to create the `pg_trgm` extension in the database. This operation requires **superuser access** to run and requires the extension to be installed. See https://coreos.com/quay-enterprise/docs/latest/postgres-additional-modules.html for more information on installing the extension.
If the user given to Red Hat Quay is not a superuser, please temporarily grant superuser access to the Red Hat Quay user in the database (or change the user in config) **before** upgrading.
- Added: Full text search support (#2272)
- Added: OIDC support (#2300, #2348)
- Added: API for lookup of security status of a manifest (#2334)
- Added: More descriptive logs (#2358)
- Fixed: Datetime bug in logs view (#2318)
- Fixed: Display bug in logs view (#2345)
- Fixed: Display of expiration date for licenses with multiple entries (#2354)
- Fixed: V1 search compatibility (#2344)
### v2.0.5
- Added: Build logs viewer in superuser panel
- Fixed: Support for wildcard certs in the superuser config panel
### v2.0.4
- Added: Expand allowed length of namespaces to be between 2 and 255 characters (#2291)
- Added: Better messaging for namespaces (#2283)
- Added: More customization of Message Of The Day (MOTD) (#2282)
- Added: Configurable and default timeout for LDAP (#2247)
- Added: Custom SSL certificate panel in superuser panel (#2271, #2274)
- Added: User and Organization list pagination on superuser panel (#2250)
- Added: Performance improvements for georeplication queuing (#2254)
- Added: Automatic garbage collection in security scanner (#2257)
- Added: RECAPTCHA support during create account flow (#2245)
- Added: Always display full git error in build logs (#2277)
- Added: Superuser config clarification warnings (#2279)
- Added: Performance improvements around queues (#2276, #2286, #2287)
- Added: Automatic retry for security scanning (#2242)
- Added: Better error messaging on security scanner lookup failure (#2235)
- Added: Ensure robot accounts show at top of entity autocomplete (#2243)
- Fixed: Exception when autocompleting users in teams (#2255)
- Fixed: Port mapping in ACI conversion (#2251, #2273)
- Fixed: Error messaging for attempting to join a team with invalid email (#2240)
- Fixed: Prometheus metrics for scale (#2237)
- Fixed: Security scanner notification pagination (#2233, #2249)
- Regressed: Support for wildcard certs in the superuser config panel
### v2.0.3
- Added: Allow extra_ca_certs to be a folder or a file (#2180)
- Fixed: Cancelling builds (#2203)
- Fixed: Allow license to be set in setup tool (#2200)
- Fixed: Improve queue performance (#2207, #2211)
- Fixed: Improve security scan performance (#2209)
- Fixed: Fix user lookup for external auth engines (#2206)
### v2.0.2
- Added: Ability to cancel builds that are already building. (#2041, #2127, #2186, #2189, #2190)
- Added: Notifications when a build is canceled (#2173, #2184)
- Added: Remove deprecated email flag from generated `docker login` commands (#2146)
- Added: Upgrade nginx to v1.11.5 (#2140)
- Added: Improve performance of robots management UI (#2145)
- Added: Add data about specific manifest or tag pulled in audit logs (#2152)
- Added: Debug nginx logs from non-proxy protocol connection (#2167)
- Added: Accept multiple team invitations simultaneously (#2169)
- Added: Password recovery defaults to resetting password (#2170)
- Added: Gzip javascript and svg assets (#2171)
- Added: Add support for custom ports in RADOS and S3 storage engines (#2185)
- Added: Prometheus metric for number of unscanned images (#2183)
- Fixed: Fix entity search under Postgres (regression in v2.0.0) (#2172)
- Fixed: Error displayed for OAuth if an existing token already matches scopes (#2139)
- Fixed: Reduce timeouts of the build manager when under heavy load (#2143, #2157)
- Fixed: Fix guage metrics on prometheus endpoint (#2153)
- Fixed: Disable CoreOS update-engine on ephemeral Kubernetes builders (#2159)
- Fixed: Fix notifications generated by the build manager (#2163)
- Fixed: JSON encoding for chunk cleanup in Swift storage engine (#2162)
- Fixed: Fix configuration validator when setting up storage engine (#2176)
- Fixed: Multiline message of the day to not cover the search box (#2181)
- Regressed: User lookup for external auth engines broken
### v2.0.1
- Added: A defined timeout on all HTTP calls in notification methods
- Added: Customized Build start timeouts and better debug logs
- Added: A warning bar when the license will become invalid in a week
- Added: Collection of user metadata: name and company
- Added: New Prometheus metrics
- Added: Support for temp usernames and an interstitial to confirm username
- Added: Missing parameter on RADOS storage
- Added: Stagger worker startup
- Added: Make email addresses optional in external auth if email feature is turned off
- Added: External auth emails to entity search
- Added: Banner bar message when license has expired or is invalid
- Fixed: Make sure to check for user before redirecting in update user
- Fixed: 500 on get label endpoint and add a test
- Fixed: KeyError in Github trigger setup
- Fixed: Change LDAP errors into debug statements to reduce log clutter
- Fixed: Bugs due to conflicting operation names in the API
- Fixed: Cannot-use-robot for private base image bug in build dialog
- Fixed: Swift exception reporting on deletion and add async chunk cleanup
- Fixed: Logs view for dates that start in zero
- Fixed: Small JS error fixes
- Fixed: A bug with accessing the su config panel without a license
- Fixed: Buildcomponent: raise heartbeat timeout to 60s
- Fixed: KeyError in config when not present in BitBucket trigger
- Fixed: Namespace lookup in V1 registry search
- Fixed: Build notification ref filtering setup in UI
- Fixed: Entity search API to not IndexError
- Fixed: Remove setup and superuser routes when SUPER_USERS is not enabled
- Fixed: TypeError in Gitlab trigger when user not found
- Regressed: Superuser config panel cannot save
### v2.0.0
This release is a **required release** and must be run before attempting an upgrade to v2.0.0+.
In order to upgrade to this version, your cluster must contain a valid license, which can be found and downloaded at: [tectonic.com](https://account.tectonic.com)
- Added: Require valid license to enable registry actions (#2009, #2018)
- Added: The ability to delete users and organizations (#1698)
- Added: Add option to properly handle TLS terminated outside of the container (#1986)
- Added: Updated run trigger/build dialog (#1895)
- Added: Update dependencies to latest versions (#2012)
- Added: Ability to use dots and dashes in namespaces intended for use with newer Docker clients (#1852)
- Added: Changed dead queue item cleanup from 7 days to 1 day (#2019)
- Added: Add a default database timeout to prevent failed DB connections from hanging registry and API operations (#1764)
- Fixed: Fix error if a vulnerability notification doesn't have a level filter (#1995)
- Fixed: Registry WWW-Authenticate and Link headers are now Registry API compliant (#2004)
- Fixed: Small fixes for Message of the Day feature (#2005, #2006)
- Fixed: Disallow underscores at the beginning of namespaces (#1852)
- Fixed: Installation tool liveness checks during container restarts (#2023)
- Regressed: Entity search broken under Postgres
### v1.18.0 ### v1.18.0
@ -459,7 +8,6 @@ In order to upgrade to this version, your cluster must contain a valid license,
- Added: Add repository list pagination (#1858) - Added: Add repository list pagination (#1858)
- Added: Better 404 (and 403) pages (#1857) - Added: Better 404 (and 403) pages (#1857)
- Fixed: Always use absolute URLs in Location headers to fix blob uploads on nonstandard ports (#1957)
- Fixed: Improved reliability of several JS functions (#1959) (#1980) (#1981) - Fixed: Improved reliability of several JS functions (#1959) (#1980) (#1981)
- Fixed: Handle unicode in entity search (#1939) - Fixed: Handle unicode in entity search (#1939)
- Fixed: Fix tags API pagination (#1926) - Fixed: Fix tags API pagination (#1926)
@ -723,7 +271,7 @@ In order to upgrade to this version, your cluster must contain a valid license,
### v1.13.0 ### v1.13.0
- Added new Red Hat Quay rebranding (#723, #738, #735, #745, #746, #748, #747, #751) - Added new Quay Enterprise rebranding (#723, #738, #735, #745, #746, #748, #747, #751)
- Added a styled 404 page (#683) - Added a styled 404 page (#683)
- Hid the run button from users that haven't created a trigger (#727) - Hid the run button from users that haven't created a trigger (#727)
- Added timeouts to calls to GitLab, Bitbucket, GitHub APIs (#636, #633, #631, #722) - Added timeouts to calls to GitLab, Bitbucket, GitHub APIs (#636, #633, #631, #722)

View file

@ -1,128 +1,130 @@
FROM centos:7 # vim:ft=dockerfile
LABEL maintainer "thomasmckay@redhat.com"
ENV PYTHON_VERSION=2.7 \ FROM phusion/baseimage:0.9.19
PATH=$HOME/.local/bin/:$PATH \
PYTHONUNBUFFERED=1 \
PYTHONIOENCODING=UTF-8 \
LC_ALL=en_US.UTF-8 \
LANG=en_US.UTF-8 \
PIP_NO_CACHE_DIR=off
ENV QUAYDIR /quay-registry ENV DEBIAN_FRONTEND noninteractive
ENV QUAYCONF /quay-registry/conf ENV HOME /root
ENV QUAYPATH "."
RUN mkdir $QUAYDIR # Install system packages
WORKDIR $QUAYDIR RUN apt-get update # 07SEP2016
RUN apt-get install -y \
g++ \
gdebi-core \
git \
libevent-2.0.5 \
libevent-dev \
libffi-dev \
libfreetype6-dev \
libgpgme11 \
libgpgme11-dev \
libjpeg62 \
libjpeg62-dev \
libjpeg8 \
libldap-2.4-2 \
libldap2-dev \
libmagic1 \
libpq-dev \
libpq5 \
libsasl2-dev \
libsasl2-modules \
nginx \
nodejs \
npm \
python-dev \
python-pip \
python-virtualenv
RUN INSTALL_PKGS="\ # Install python dependencies
python27 \ ADD requirements.txt requirements.txt
python27-python-pip \ RUN virtualenv --distribute venv
rh-nginx112 rh-nginx112-nginx \ RUN venv/bin/pip install -r requirements.txt # 07SEP2016
openldap \ RUN venv/bin/pip freeze
scl-utils \
gcc-c++ git \
openldap-devel \
gpgme-devel \
dnsmasq \
memcached \
openssl \
skopeo \
" && \
yum install -y yum-utils && \
yum install -y epel-release centos-release-scl && \
yum -y --setopt=tsflags=nodocs --setopt=skip_missing_names_on_install=False install $INSTALL_PKGS && \
yum -y update && \
yum -y clean all
COPY . . # Check python dependencies for the GPL
RUN scl enable python27 "\
pip install --upgrade setuptools pip && \
pip install -r requirements.txt --no-cache && \
pip install -r requirements-tests.txt --no-cache && \
pip freeze && \
mkdir -p $QUAYDIR/static/webfonts && \
mkdir -p $QUAYDIR/static/fonts && \
mkdir -p $QUAYDIR/static/ldn && \
PYTHONPATH=$QUAYPATH python -m external_libraries \
"
RUN cp -r $QUAYDIR/static/ldn $QUAYDIR/config_app/static/ldn && \
cp -r $QUAYDIR/static/fonts $QUAYDIR/config_app/static/fonts && \
cp -r $QUAYDIR/static/webfonts $QUAYDIR/config_app/static/webfonts
# Check python dependencies for GPL
# Due to the following bug, pip results must be piped to a file before grepping: # Due to the following bug, pip results must be piped to a file before grepping:
# https://github.com/pypa/pip/pull/3304 # https://github.com/pypa/pip/pull/3304
# 'docutils' is a setup dependency of botocore required by s3transfer. It's under RUN cat requirements.txt | grep -v "^-e" | awk -F'==' '{print $1}' | xargs venv/bin/pip --disable-pip-version-check show > pipinfo.txt && \
# GPLv3, and so is manually removed. test -z $(cat pipinfo.txt | grep GPL | grep -v LGPL) && \
RUN rm -Rf /opt/rh/python27/root/usr/lib/python2.7/site-packages/docutils && \ rm pipinfo.txt
scl enable python27 "pip freeze" | grep -v '^-e' | awk -F == '{print $1}' | grep -v docutils > piplist.txt && \
scl enable python27 "xargs -a piplist.txt pip --disable-pip-version-check show" > pipinfo.txt && \
test -z "$(cat pipinfo.txt | grep GPL | grep -v LGPL)" && \
rm -f piplist.txt pipinfo.txt
# # Front-end # Install cfssl
RUN curl --silent --location https://rpm.nodesource.com/setup_8.x | bash - && \ RUN mkdir /gocode
yum install -y nodejs && \ ENV GOPATH /gocode
curl --silent --location https://dl.yarnpkg.com/rpm/yarn.repo | tee /etc/yum.repos.d/yarn.repo && \ RUN curl -O https://storage.googleapis.com/golang/go1.6.linux-amd64.tar.gz && \
rpm --import https://dl.yarnpkg.com/rpm/pubkey.gpg && \ tar -xvf go1.6.linux-amd64.tar.gz && \
yum install -y yarn && \ mv go /usr/local && \
yarn install --ignore-engines && \ rm -rf go1.6.linux-amd64.tar.gz && \
yarn build && \ /usr/local/go/bin/go get -u github.com/cloudflare/cfssl/cmd/cfssl && \
yarn build-config-app /usr/local/go/bin/go get -u github.com/cloudflare/cfssl/cmd/cfssljson && \
cp /gocode/bin/cfssljson /bin/cfssljson && \
cp /gocode/bin/cfssl /bin/cfssl && \
rm -rf /gocode && rm -rf /usr/local/go
# TODO: Build jwtproxy in dist-git # Install jwtproxy
# https://jira.coreos.com/browse/QUAY-1315 RUN curl -L -o /usr/local/bin/jwtproxy https://github.com/coreos/jwtproxy/releases/download/v0.0.1/jwtproxy-linux-x64
RUN curl -fsSL -o /usr/local/bin/jwtproxy https://github.com/coreos/jwtproxy/releases/download/v0.0.3/jwtproxy-linux-x64 && \ RUN chmod +x /usr/local/bin/jwtproxy
chmod +x /usr/local/bin/jwtproxy
# TODO: Build prometheus-aggregator in dist-git # Install prometheus-aggregator
# https://jira.coreos.com/browse/QUAY-1324 RUN curl -L -o /usr/local/bin/prometheus-aggregator https://github.com/coreos/prometheus-aggregator/releases/download/v0.0.1-alpha/prometheus-aggregator
RUN curl -fsSL -o /usr/local/bin/prometheus-aggregator https://github.com/coreos/prometheus-aggregator/releases/download/v0.0.1-alpha/prometheus-aggregator &&\ RUN chmod +x /usr/local/bin/prometheus-aggregator
chmod +x /usr/local/bin/prometheus-aggregator
# Update local copy of AWS IP Ranges. # Install Grunt
RUN curl -fsSL https://ip-ranges.amazonaws.com/ip-ranges.json -o util/ipresolver/aws-ip-ranges.json RUN ln -s /usr/bin/nodejs /usr/bin/node
RUN npm install -g grunt-cli
RUN ln -s $QUAYCONF /conf && \ # Install Grunt depenencies
mkdir /var/log/nginx && \ ADD grunt grunt
ln -sf /dev/stdout /var/log/nginx/access.log && \ RUN cd grunt && npm install
ln -sf /dev/stdout /var/log/nginx/error.log && \
chmod -R a+rwx /var/log/nginx
# Cleanup # Run grunt
RUN UNINSTALL_PKGS="\ ADD static static
gcc-c++ \ RUN cd grunt && grunt
openldap-devel \
gpgme-devel \
optipng \
kernel-headers \
" && \
yum remove -y $UNINSTALL_PKGS && \
yum clean all && \
rm -rf /var/cache/yum /tmp/* /var/tmp/* /root/.cache
EXPOSE 8080 8443 7443 RUN apt-get remove -y --auto-remove python-dev g++ libjpeg62-dev libevent-dev libldap2-dev libsasl2-dev libpq-dev libffi-dev libgpgme11-dev nodejs npm
RUN apt-get autoremove -y
RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
RUN rm -rf grunt
RUN chgrp -R 0 $QUAYDIR && \ # Set up the init system
chmod -R g=u $QUAYDIR ADD conf/init/copy_config_files.sh /etc/my_init.d/
ADD conf/init/doupdatelimits.sh /etc/my_init.d/
ADD conf/init/copy_syslog_config.sh /etc/my_init.d/
ADD conf/init/certs_create.sh /etc/my_init.d/
ADD conf/init/certs_install.sh /etc/my_init.d/
ADD conf/init/runmigration.sh /etc/my_init.d/
ADD conf/init/syslog-ng.conf /etc/syslog-ng/
ADD conf/init/zz_boot.sh /etc/my_init.d/
ADD conf/init/service/ /etc/service/
RUN rm -rf /etc/service/syslog-forwarder
RUN mkdir /datastorage && chgrp 0 /datastorage && chmod g=u /datastorage && \ # remove after phusion/baseimage-docker#338 is fixed
mkdir -p /var/log/nginx && chgrp 0 /var/log/nginx && chmod g=u /var/log/nginx && \ ADD conf/init/logrotate.conf /etc/logrotate.conf
mkdir -p /conf/stack && chgrp 0 /conf/stack && chmod g=u /conf/stack && \
mkdir -p /tmp && chgrp 0 /tmp && chmod g=u /tmp && \
chmod g=u /etc/passwd
RUN chgrp 0 /var/opt/rh/rh-nginx112/log/nginx && chmod g=u /var/opt/rh/rh-nginx112/log/nginx # Download any external libs.
RUN mkdir static/fonts static/ldn
ADD external_libraries.py external_libraries.py
RUN venv/bin/python -m external_libraries
RUN mkdir -p /usr/local/nginx/logs/
VOLUME ["/var/log", "/datastorage", "/tmp", "/conf/stack"] # TODO(ssewell): only works on a detached head, make work with ref
ADD .git/HEAD GIT_HEAD
ENTRYPOINT ["/quay-registry/quay-entrypoint.sh"] # Add all of the files!
CMD ["registry"] ADD . .
# root required to create and install certs # Run the tests
# https://jira.coreos.com/browse/QUAY-1468 ARG RUN_TESTS=true
# USER 1001 ENV RUN_TESTS ${RUN_TESTS}
ENV RUN_ACI_TESTS False
RUN if [ "$RUN_TESTS" = true ]; then \
TEST=true venv/bin/python -m unittest discover -f; \
fi
RUN if [ "$RUN_TESTS" = true ]; then \
TEST=true venv/bin/python -m test.registry_tests -f; \
fi
RUN PYTHONPATH=. venv/bin/alembic heads | grep -E '^[0-9a-f]+ \(head\)$' > ALEMBIC_HEAD
VOLUME ["/conf/stack", "/var/log", "/datastorage", "/tmp", "/conf/etcd"]
EXPOSE 443 8443 80

View file

@ -1,8 +0,0 @@
FROM quay-ci-base
RUN mkdir -p conf/stack
RUN rm -rf test/data/test.db
ENV ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE remove-old-fields
ADD cirun.config.yaml conf/stack/config.yaml
RUN /usr/bin/scl enable python27 rh-nginx112 "LOGGING_LEVEL=INFO python initdb.py"
ENTRYPOINT ["/quay-registry/quay-entrypoint.sh"]
CMD ["registry"]

View file

@ -1,19 +0,0 @@
# -*- mode: dockerfile -*-
# vi: set ft=dockerfile :
FROM quay.io/quay/quay-base:latest
WORKDIR $QUAYDIR
COPY requirements.txt requirements-tests.txt ./
# Put the virtualenv outside the source directory. This lets us mount
# the Quay source as a volume for local development.
RUN virtualenv --distribute /venv \
&& /venv/bin/pip install -r requirements.txt \
&& /venv/bin/pip install -r requirements-tests.txt \
&& /venv/bin/pip freeze
ENV PATH /venv/bin:${PATH}
RUN ln -s $QUAYCONF /conf

View file

@ -1,142 +0,0 @@
FROM registry.redhat.io/rhel7:7.7
LABEL maintainer "thomasmckay@redhat.com"
ENV PYTHON_VERSION=2.7 \
PATH=$HOME/.local/bin/:$PATH \
PYTHONUNBUFFERED=1 \
PYTHONIOENCODING=UTF-8 \
LC_ALL=en_US.UTF-8 \
LANG=en_US.UTF-8 \
PIP_NO_CACHE_DIR=off
ENV QUAYDIR /quay-registry
ENV QUAYCONF /quay-registry/conf
ENV QUAYPATH "."
RUN mkdir $QUAYDIR
WORKDIR $QUAYDIR
RUN INSTALL_PKGS="\
python27 \
python27-python-pip \
rh-nginx112 rh-nginx112-nginx \
openldap \
scl-utils \
gcc-c++ git \
openldap-devel \
gpgme-devel \
dnsmasq \
memcached \
openssl \
skopeo \
" && \
yum install -y yum-utils && \
yum-config-manager --quiet --disable "*" >/dev/null && \
yum-config-manager --quiet --enable \
rhel-7-server-rpms \
rhel-server-rhscl-7-rpms \
rhel-7-server-optional-rpms \
rhel-7-server-extras-rpms \
--save >/dev/null && \
yum -y --setopt=tsflags=nodocs --setopt=skip_missing_names_on_install=False install $INSTALL_PKGS && \
yum -y update && \
yum -y clean all
COPY . .
RUN scl enable python27 "\
pip install --upgrade setuptools pip && \
pip install -r requirements.txt --no-cache && \
pip freeze && \
mkdir -p $QUAYDIR/static/webfonts && \
mkdir -p $QUAYDIR/static/fonts && \
mkdir -p $QUAYDIR/static/ldn && \
PYTHONPATH=$QUAYPATH python -m external_libraries \
"
RUN cp -r $QUAYDIR/static/ldn $QUAYDIR/config_app/static/ldn && \
cp -r $QUAYDIR/static/fonts $QUAYDIR/config_app/static/fonts && \
cp -r $QUAYDIR/static/webfonts $QUAYDIR/config_app/static/webfonts
# Check python dependencies for GPL
# Due to the following bug, pip results must be piped to a file before grepping:
# https://github.com/pypa/pip/pull/3304
# 'docutils' is a setup dependency of botocore required by s3transfer. It's under
# GPLv3, and so is manually removed.
RUN rm -Rf /opt/rh/python27/root/usr/lib/python2.7/site-packages/docutils && \
scl enable python27 "pip freeze" | grep -v '^-e' | awk -F == '{print $1}' | grep -v docutils > piplist.txt && \
scl enable python27 "xargs -a piplist.txt pip --disable-pip-version-check show" > pipinfo.txt && \
test -z "$(cat pipinfo.txt | grep GPL | grep -v LGPL)" && \
rm -f piplist.txt pipinfo.txt
# Front-end
RUN curl --silent --location https://rpm.nodesource.com/setup_8.x | bash - && \
yum install -y nodejs && \
curl --silent --location https://dl.yarnpkg.com/rpm/yarn.repo | tee /etc/yum.repos.d/yarn.repo && \
rpm --import https://dl.yarnpkg.com/rpm/pubkey.gpg && \
yum install -y yarn && \
yarn install --ignore-engines && \
yarn build && \
yarn build-config-app
# TODO: Build jwtproxy in dist-git
# https://jira.coreos.com/browse/QUAY-1315
RUN curl -fsSL -o /usr/local/bin/jwtproxy https://github.com/coreos/jwtproxy/releases/download/v0.0.3/jwtproxy-linux-x64 && \
chmod +x /usr/local/bin/jwtproxy
# TODO: Build prometheus-aggregator in dist-git
# https://jira.coreos.com/browse/QUAY-1324
RUN curl -fsSL -o /usr/local/bin/prometheus-aggregator https://github.com/coreos/prometheus-aggregator/releases/download/v0.0.1-alpha/prometheus-aggregator &&\
chmod +x /usr/local/bin/prometheus-aggregator
# Update local copy of AWS IP Ranges.
RUN curl -fsSL https://ip-ranges.amazonaws.com/ip-ranges.json -o util/ipresolver/aws-ip-ranges.json
RUN ln -s $QUAYCONF /conf && \
mkdir /var/log/nginx && \
ln -sf /dev/stdout /var/log/nginx/access.log && \
ln -sf /dev/stdout /var/log/nginx/error.log && \
chmod -R a+rwx /var/log/nginx
# Cleanup
RUN UNINSTALL_PKGS="\
gcc-c++ git \
openldap-devel \
gpgme-devel \
optipng \
kernel-headers \
" && \
yum remove -y $UNINSTALL_PKGS && \
yum clean all && \
rm -rf /var/cache/yum /tmp/* /var/tmp/* /root/.cache
EXPOSE 8080 8443 7443
RUN chgrp -R 0 $QUAYDIR && \
chmod -R g=u $QUAYDIR
RUN mkdir /datastorage && chgrp 0 /datastorage && chmod g=u /datastorage && \
mkdir -p /var/log/nginx && chgrp 0 /var/log/nginx && chmod g=u /var/log/nginx && \
mkdir -p /conf/stack && chgrp 0 /conf/stack && chmod g=u /conf/stack && \
mkdir -p /tmp && chgrp 0 /tmp && chmod g=u /tmp && \
chmod g=u /etc/passwd
RUN chgrp 0 /var/opt/rh/rh-nginx112/log/nginx && chmod g=u /var/opt/rh/rh-nginx112/log/nginx
# Allow TLS certs to be created and installed as non-root user
RUN chgrp -R 0 /etc/pki/ca-trust/extracted && \
chmod -R g=u /etc/pki/ca-trust/extracted && \
chgrp -R 0 /etc/pki/ca-trust/source/anchors && \
chmod -R g=u /etc/pki/ca-trust/source/anchors && \
chgrp -R 0 /opt/rh/python27/root/usr/lib/python2.7/site-packages/requests && \
chmod -R g=u /opt/rh/python27/root/usr/lib/python2.7/site-packages/requests && \
chgrp -R 0 /opt/rh/python27/root/usr/lib/python2.7/site-packages/certifi && \
chmod -R g=u /opt/rh/python27/root/usr/lib/python2.7/site-packages/certifi
VOLUME ["/var/log", "/datastorage", "/tmp", "/conf/stack"]
USER 1001
ENTRYPOINT ["/quay-registry/quay-entrypoint.sh"]
CMD ["registry"]

View file

@ -1,133 +0,0 @@
FROM registry.redhat.io/rhel7:7.7
LABEL maintainer "thomasmckay@redhat.com"
ENV PYTHON_VERSION=2.7 \
PATH=$HOME/.local/bin/:$PATH \
PYTHONUNBUFFERED=1 \
PYTHONIOENCODING=UTF-8 \
LC_ALL=en_US.UTF-8 \
LANG=en_US.UTF-8 \
PIP_NO_CACHE_DIR=off
ENV QUAYDIR /quay-registry
ENV QUAYCONF /quay-registry/conf
ENV QUAYPATH "."
RUN mkdir $QUAYDIR
WORKDIR $QUAYDIR
RUN INSTALL_PKGS="\
python27 \
python27-python-pip \
rh-nginx112 rh-nginx112-nginx \
openldap \
scl-utils \
gcc-c++ git \
openldap-devel \
gpgme-devel \
dnsmasq \
memcached \
openssl \
skopeo \
" && \
yum install -y yum-utils && \
yum-config-manager --quiet --disable "*" >/dev/null && \
yum-config-manager --quiet --enable \
rhel-7-server-rpms \
rhel-server-rhscl-7-rpms \
rhel-7-server-optional-rpms \
rhel-7-server-extras-rpms \
--save >/dev/null && \
yum -y --setopt=tsflags=nodocs --setopt=skip_missing_names_on_install=False install $INSTALL_PKGS && \
yum -y update && \
yum -y clean all
COPY . .
RUN scl enable python27 "\
pip install --upgrade setuptools pip && \
pip install -r requirements.txt --no-cache && \
pip freeze && \
mkdir -p $QUAYDIR/static/webfonts && \
mkdir -p $QUAYDIR/static/fonts && \
mkdir -p $QUAYDIR/static/ldn && \
PYTHONPATH=$QUAYPATH python -m external_libraries \
"
RUN cp -r $QUAYDIR/static/ldn $QUAYDIR/config_app/static/ldn && \
cp -r $QUAYDIR/static/fonts $QUAYDIR/config_app/static/fonts && \
cp -r $QUAYDIR/static/webfonts $QUAYDIR/config_app/static/webfonts
# Check python dependencies for GPL
# Due to the following bug, pip results must be piped to a file before grepping:
# https://github.com/pypa/pip/pull/3304
# 'docutils' is a setup dependency of botocore required by s3transfer. It's under
# GPLv3, and so is manually removed.
RUN rm -Rf /opt/rh/python27/root/usr/lib/python2.7/site-packages/docutils && \
scl enable python27 "pip freeze" | grep -v '^-e' | awk -F == '{print $1}' | grep -v docutils > piplist.txt && \
scl enable python27 "xargs -a piplist.txt pip --disable-pip-version-check show" > pipinfo.txt && \
test -z "$(cat pipinfo.txt | grep GPL | grep -v LGPL)" && \
rm -f piplist.txt pipinfo.txt
# Front-end
RUN curl --silent --location https://rpm.nodesource.com/setup_8.x | bash - && \
yum install -y nodejs && \
curl --silent --location https://dl.yarnpkg.com/rpm/yarn.repo | tee /etc/yum.repos.d/yarn.repo && \
rpm --import https://dl.yarnpkg.com/rpm/pubkey.gpg && \
yum install -y yarn && \
yarn install --ignore-engines && \
yarn build && \
yarn build-config-app
# TODO: Build jwtproxy in dist-git
# https://jira.coreos.com/browse/QUAY-1315
RUN curl -fsSL -o /usr/local/bin/jwtproxy https://github.com/coreos/jwtproxy/releases/download/v0.0.3/jwtproxy-linux-x64 && \
chmod +x /usr/local/bin/jwtproxy
# TODO: Build prometheus-aggregator in dist-git
# https://jira.coreos.com/browse/QUAY-1324
RUN curl -fsSL -o /usr/local/bin/prometheus-aggregator https://github.com/coreos/prometheus-aggregator/releases/download/v0.0.1-alpha/prometheus-aggregator &&\
chmod +x /usr/local/bin/prometheus-aggregator
# Update local copy of AWS IP Ranges.
RUN curl -fsSL https://ip-ranges.amazonaws.com/ip-ranges.json -o util/ipresolver/aws-ip-ranges.json
RUN ln -s $QUAYCONF /conf && \
mkdir /var/log/nginx && \
ln -sf /dev/stdout /var/log/nginx/access.log && \
ln -sf /dev/stdout /var/log/nginx/error.log && \
chmod -R a+rwx /var/log/nginx
# Cleanup
RUN UNINSTALL_PKGS="\
gcc-c++ git \
openldap-devel \
gpgme-devel \
optipng \
kernel-headers \
" && \
yum remove -y $UNINSTALL_PKGS && \
yum clean all && \
rm -rf /var/cache/yum /tmp/* /var/tmp/* /root/.cache
EXPOSE 8080 8443 7443
RUN chgrp -R 0 $QUAYDIR && \
chmod -R g=u $QUAYDIR
RUN mkdir /datastorage && chgrp 0 /datastorage && chmod g=u /datastorage && \
mkdir -p /var/log/nginx && chgrp 0 /var/log/nginx && chmod g=u /var/log/nginx && \
mkdir -p /conf/stack && chgrp 0 /conf/stack && chmod g=u /conf/stack && \
mkdir -p /tmp && chgrp 0 /tmp && chmod g=u /tmp && \
chmod g=u /etc/passwd
RUN chgrp 0 /var/opt/rh/rh-nginx112/log/nginx && chmod g=u /var/opt/rh/rh-nginx112/log/nginx
VOLUME ["/var/log", "/datastorage", "/tmp", "/conf/stack"]
ENTRYPOINT ["/quay-registry/quay-entrypoint.sh"]
CMD ["registry"]
# root required to create and install certs
# https://jira.coreos.com/browse/QUAY-1468
# USER 1001

View file

@ -1,66 +0,0 @@
# Project Quay Governance
Project Quay is run according to the guidelines specified below. This is a living document and is expected to evolve along with Project Quay itself.
## Principles
Project Quay strives to follow these principles at all times:
* Openness - Quay evolves and improves out in the open, with transparent work and decision making that is clear and well understood.
* Respectfulness - Quay is a project for a diverse community where different points of view are welcomed. Healthy and respectful discussions help us meet our goals and deliver a better end product.
* Meritocracy - In the Quay community all ideas are heard but only the best ideas help drive the project forward. As an open, respectful community we will judge all ideas on their technical merit and alignment with Quay's design principles.
* Accountability - The Quay community is accountable
* to our users to deliver the best software possible
* to the project to ensure each Contributor and Maintainer carries out their duties to the best of their abilities
* to itself to ensure the Quay remains a project where indviduals can be passionate about contributing their time and energy
## Maintainers
Maintainers play a special role to ensure that contributions align with the expected quality, consistency and long term vision for Project Quay. Each Maintainer is vital to the success of Project Quay and has decided to make the commitment to that cause. Being a Maintainer is difficult work and not for everyone. Therefore Project Quay will have a small group of Maintainers- as many as deemed necessary to handle the pipeline of contributions being made to the project.
### Becoming a Maintainer
Each Maintainer must also be a Contributor. Candidates for the Maintainer role are individuals who have made recent, substantial and recurring contributions to the project. The existing Maintainers will periodically identify Contributors and make recommendations to the community that those individuals become Maintainers. The Maintainers will then vote on the candidate and if so agreed the candidate will be invited to raise a PR to add their name into the MAINTAINERS.md file. Approval of that PR signals the Contributor is now a Maintainer.
### Responsibilities of a Maintainer
Project Quay's success depends on how well Maintainers perform their duties. Maintainers are responsible to monitor Slack and e-mail lists, help triage issues on the Project Quay JIRA board, review PRs and ensure responses are being provided to Contributors, assist with regular Project Quay releases. If Contributors are the lifeblood of an open source community, the Maintainers act as the heart, hands, eyes and ears, helping to keep the project moving and viable.
### Stepping Down as a Maintainer
A Maintainer may decide they are no longer interested in or able to carry out the role. In such a situation the Maintainer should notify the other Maintainers of their intentions to step down and help identify a replacement from existing Contributors. Ideally the outgoing Maintainer will ensure that any outstanding work has been transitioned to another Maintainer. To carry out the actual removal the outgoing Maintainer raises a PR against MAINTAINERS.md file to remove their name.
## Contributors
Anyone can be a Contributor to Project Quay. No special approval is required- simply go through our Getting Started guide, fork one of our repositories and submit a PR. All types of conributions will be welcome, whether it is through bug reports via JIRA, code, or documentation.
## Sub-Projects
Project Quay will be primarily focused on the delivery of Quay itself but also contains various sub-projects such as Clair and Quay-Builders. Each sub-project must have their own dedicated repositories containing a MAINTAINERS.md file. Each sub-project will abide by this Governance model.
Requests for new sub-projects under Project Quay should be raised to the Maintainers.
## Code of Conduct
Project Quay abides by the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
## How Decisons Are Made
Most of the decison making for Project Quay will happen through the regular PR approval process. We stand by the notion that what exists in the Project Quay repositories are the end result of countless community-driven decisions.
When a more complex decision is required, for example a technical issue related to a PR, it is expected that involved parties will resolve the dispute in a respectful and efficent manner. If the dispute cannot be resolved between the involved parties then the Maintainers will review the dispute and come to an agreement via majority vote amongst themselves. All decision making should be tracked via a JIRA issue and performed transparently via the Project Quay communications channels.
## Project Quay Releases
On a regular basis, Project Quay will issue a release. The release cadence will not be strictly defined but should happen approximately every 3 months. Maintainers will be part of a rotating "Release Nanny" role whereby each Maintainer shares the responsibility of creating a Quay release.
Release duties include:
* Creating the Release Notes
* Verifying the automated tests have passed
* Building the necessary Quay, Clair-JWT, and Quay-Builder container images
* Publishing the container images to quay.io
* Updating the github release pages
* Notifying the community of the new release
## DCO and Licenses
Project Quay uses the [Apache 2.0](https://opensource.org/licenses/Apache-2.0) license.

201
LICENSE
View file

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

3
MAINTAINERS Normal file
View file

@ -0,0 +1,3 @@
Joseph Schorr <joseph.schorr@coreos.com> (@josephschorr)
Jimmy Zelinskie <jimmy.zelinskie@coreos.com> (@jzelinskie)
Jake Moshenko <jake.moshenko@coreos.com> (@jakedt)

180
Makefile
View file

@ -1,180 +0,0 @@
SHELL := /bin/bash
export PATH := ./venv/bin:$(PATH)
SHA := $(shell git rev-parse --short HEAD )
REPO := quay.io/quay/quay
TAG := $(REPO):$(SHA)
MODIFIED_FILES_COUNT = $(shell git diff --name-only origin/master | grep -E .+\.py$ | wc -l)
GIT_MERGE_BASED = $(shell git merge-base origin/master HEAD)
MODIFIED_FILES = $(shell git diff --name-only $(GIT_MERGE_BASED) | grep -E .+\.py$ | paste -sd ' ')
show-modified:
echo $(MODIFIED_FILES)
.PHONY: all unit-test registry-test registry-test-old buildman-test test pkgs build run clean
all: clean pkgs test build
pkgs: requirements.txt requirements-dev.txt requirements-tests.txt
pip install -r $<
requirements.txt: requirements-nover.txt
# Create a new virtualenv and activate it
pyenv virtualenv 2.7.12 quay-deps
pyenv activate quay-deps
# Install unversioned dependencies with your changes
pip install -r requirements-nover.txt
# Run the unit test suite
$(MAKE) unit
# Freeze the versions of all of the dependencies
pip freeze > requirements.txt
# Delete the virtualenv
pyenv uninstall quay-deps
QUAY_CONFIG ?= ../quay-config
conf/stack/license: $(QUAY_CONFIG)/local/license
mkdir -p conf/stack
ln -s $(QUAY_CONFIG)/local/license conf/stack/license
unit-test:
ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields TEST=true PYTHONPATH="." py.test \
--cov="." --cov-report=html --cov-report=term-missing \
--timeout=3600 --verbose -x \
./
registry-test:
TEST=true ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields PYTHONPATH="." py.test \
--cov="." --cov-report=html --cov-report=term-missing \
--timeout=3600 --verbose --show-count -x \
test/registry/registry_tests.py
registry-test-old:
TEST=true PYTHONPATH="." ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields py.test \
--cov="." --cov-report=html --cov-report=term-missing \
--timeout=3600 --verbose --show-count -x \
./test/registry_tests.py
buildman-test:
TEST=true PYTHONPATH="." ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields py.test \
--cov="." --cov-report=html --cov-report=term-missing \
--timeout=3600 --verbose --show-count -x \
./buildman/
certs-test:
./test/test_certs_install.sh
full-db-test: ensure-test-db
TEST=true PYTHONPATH=. QUAY_OVERRIDE_CONFIG='{"DATABASE_SECRET_KEY": "anothercrazykey!"}' \
ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields alembic upgrade head
TEST=true PYTHONPATH=. ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields \
SKIP_DB_SCHEMA=true py.test --timeout=7200 \
--verbose --show-count -x --ignore=endpoints/appr/test/ \
./
clients-test:
cd test/clients; python clients_test.py
test: unit-test registry-test registry-test-old certs-test
ensure-test-db:
@if [ -z $(TEST_DATABASE_URI) ]; then \
echo "TEST_DATABASE_URI is undefined"; \
exit 1; \
fi
PG_PASSWORD := quay
PG_USER := quay
PG_HOST := postgresql://$(PG_USER):$(PG_PASSWORD)@localhost/quay
test_postgres : TEST_ENV := SKIP_DB_SCHEMA=true TEST=true \
TEST_DATABASE_URI=$(PG_HOST) PYTHONPATH=.
test_postgres:
docker rm -f postgres-testrunner-postgres || true
docker run --name postgres-testrunner-postgres \
-e POSTGRES_PASSWORD=$(PG_PASSWORD) -e POSTGRES_USER=${PG_USER} \
-p 5432:5432 -d postgres:9.2
until pg_isready -d $(PG_HOST); do sleep 1; echo "Waiting for postgres"; done
$(TEST_ENV) alembic upgrade head
$(TEST_ENV) py.test --timeout=7200 --verbose --show-count ./ --color=no \
--ignore=endpoints/appr/test/ -x
docker rm -f postgres-testrunner-postgres || true
WEBPACK := node_modules/.bin/webpack
$(WEBPACK): package.json
npm install webpack
npm install
BUNDLE := static/js/build/bundle.js
$(BUNDLE): $(WEBPACK) tsconfig.json webpack.config.js typings.json
$(WEBPACK)
GRUNT := grunt/node_modules/.bin/grunt
$(GRUNT): grunt/package.json
cd grunt && npm install
JS := quay-frontend.js quay-frontend.min.js template-cache.js
CSS := quay-frontend.css
DIST := $(addprefix static/dist/, $(JS) $(CSS) cachebusters.json)
$(DIST): $(GRUNT)
cd grunt && ../$(GRUNT)
build: $(WEBPACK) $(GRUNT)
docker-build: pkgs build
ifneq (0,$(shell git status --porcelain | awk 'BEGIN {print $N}'))
echo 'dirty build not supported - run `FORCE=true make clean` to remove'
exit 1
endif
# get named head (ex: branch, tag, etc..)
NAME = $(shell git rev-parse --abbrev-ref HEAD)
# checkout commit so .git/HEAD points to full sha (used in Dockerfile)
git checkout $(SHA)
docker build -t $(TAG) .
git checkout $(NAME)
echo $(TAG)
app-sre-docker-build:
# get named head (ex: branch, tag, etc..)
export NAME=$(shell git rev-parse --abbrev-ref HEAD)
# checkout commit so .git/HEAD points to full sha (used in Dockerfile)
echo "$(SHA)"
git checkout $(SHA)
$(BUILD_CMD) -t ${IMG} .
git checkout $(NAME)
run: license
goreman start
clean:
find . -name "*.pyc" -exec rm -rf {} \;
rm -rf node_modules 2> /dev/null
rm -rf grunt/node_modules 2> /dev/null
rm -rf dest 2> /dev/null
rm -rf dist 2> /dev/null
rm -rf .cache 2> /dev/null
rm -rf static/js/build
rm -rf static/build
rm -rf static/dist
rm -rf build
rm -rf conf/stack
rm -rf screenshots
yapf-all:
yapf -r . -p -i
yapf-diff:
if [ $(MODIFIED_FILES_COUNT) -ne 0 ]; then yapf -d -p $(MODIFIED_FILES) ; fi
yapf-test:
if [ `yapf -d -p $(MODIFIED_FILES) | wc -l` -gt 0 ] ; then false ; else true ;fi

View file

@ -1,69 +0,0 @@
SHELL := /bin/bash
PYTEST_MARK ?= shard_1_of_1
export PATH := ./venv/bin:$(PATH)
.PHONY: all unit-test registry-test registry-test-old test
all: test
unit-test:
TEST=true PYTHONPATH="." ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields py.test \
-m $(PYTEST_MARK) \
--cov="." --cov-report=html --cov-report=term-missing \
--timeout=3600 --verbose -x \
./
registry-test:
TEST=true PYTHONPATH="." ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields py.test \
-m $(PYTEST_MARK) \
--cov="." --cov-report=html --cov-report=term-missing \
--timeout=3600 --verbose --show-count -x \
test/registry/registry_tests.py
registry-test-old:
TEST=true PYTHONPATH="." ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields py.test \
--cov="." --cov-report=html --cov-report=term-missing \
--timeout=3600 --verbose --show-count -x \
./test/registry_tests.py
certs-test:
./test/test_certs_install.sh
gunicorn-tests:
./test/test_gunicorn_running.sh
full-db-test: ensure-test-db
TEST=true PYTHONPATH=. QUAY_OVERRIDE_CONFIG='{"DATABASE_SECRET_KEY": "anothercrazykey!"}' \
ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields alembic upgrade head
TEST=true PYTHONPATH=. ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields \
SKIP_DB_SCHEMA=true py.test --timeout=7200 \
-m $(PYTEST_MARK) \
--verbose --show-count -x --ignore=endpoints/appr/test/ \
./
test: unit-test registry-test
ensure-test-db:
@if [ -z $(TEST_DATABASE_URI) ]; then \
echo "TEST_DATABASE_URI is undefined"; \
exit 1; \
fi
PG_PASSWORD := quay
PG_USER := quay
PG_HOST := postgresql://$(PG_USER):$(PG_PASSWORD)@localhost/quay
test_postgres : TEST_ENV := SKIP_DB_SCHEMA=true TEST=true \
TEST_DATABASE_URI=$(PG_HOST) PYTHONPATH=.
test_postgres:
docker rm -f postgres-testrunner-postgres || true
docker run --name postgres-testrunner-postgres \
-e POSTGRES_PASSWORD=$(PG_PASSWORD) -e POSTGRES_USER=${PG_USER} \
-p 5432:5432 -d postgres:9.2
until pg_isready -d $(PG_HOST); do sleep 1; echo "Waiting for postgres"; done
$(TEST_ENV) alembic upgrade head
$(TEST_ENV) py.test --timeout=7200 --verbose --show-count ./ --color=no \
--ignore=endpoints/appr/test/ -x
docker rm -f postgres-testrunner-postgres || true

View file

@ -1,4 +0,0 @@
app: gunicorn -c conf/gunicorn_local.py application:application
webpack: npm run watch
builder: python -m buildman.builder

232
README.md
View file

@ -1,36 +1,35 @@
# Project Quay # quay
[![Build Status](https://travis-ci.com/quay/quay.svg?token=pWvEz2TeyDsVn69Hkiwq&branch=master)](https://travis-ci.com/quay/quay) ![Docker Repository on Quay](https://quay.io/repository/quay/quay/status?token=7bffbc13-8bb0-4fb4-8a70-684a0cf485d3 "Docker Repository on Quay")
:warning: The `master` branch may be in an *unstable or even broken state* during development. **Note**: The `master` branch may be in an *unstable or even broken state* during development.
Please use [releases] instead of the `master` branch in order to get stable software. Please use [releases] instead of the `master` branch in order to get stable binaries.
[releases]: https://github.com/quay/quay/releases ![Quay Logo](static/img/quay_preview.png)
![Project Quay Logo](project_quay_logo.png) Quay is project to build, store, and distribute container images.
Project Quay builds, stores, and distributes your container images.
High-level features include: High-level features include:
- Docker Registry Protocol [v2] - Docker Registry Protocol [v1], [v2]
- Docker Manifest Schema [v2.1], [v2.2] - Docker Manifest Schema [v2.1]
- [AppC Image Discovery] via on-demand transcoding - [AppC Image Discovery] via on-demand transcoding
- Image Squashing via on-demand transcoding - Image Squashing via on-demand transcoding
- Authentication provided by [LDAP], [Keystone], [OIDC], [Google], and [GitHub] - Authentication provided by [LDAP], [Keystone], [Dex], [Google], [GitHub]
- ACLs, team management, and auditability logs - ACLs, team management, and auditability logs
- Geo-replicated storage provided by local filesystems, [S3], [GCS], [Swift], and [Ceph] - Geo-replicated storage provided by local filesystems, [S3], [GCS], [Swift], [Ceph]
- Continuous Integration integrated with [GitHub], [Bitbucket], [GitLab], and [git] - Continuous Integration integrated with [GitHub], [Bitbucket], [GitLab], and [git]
- Security Vulnerability Analysis via [Clair] - Security Vulnerability Analysis via [Clair]
- [Swagger]-compliant HTTP API - [Swagger]-compliant HTTP API
[v2]: https://docs.docker.com/registry/spec/api/ [releases]: https://github.com/coreos-inc/quay/releases
[v1]: https://docs.docker.com/v1.6/reference/api/registry_api/
[v2]: https://docs.docker.com/v1.6/registry/
[v2.1]: https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md [v2.1]: https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md
[v2.2]: https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-2.md
[AppC Image Discovery]: https://github.com/appc/spec/blob/master/spec/discovery.md [AppC Image Discovery]: https://github.com/appc/spec/blob/master/spec/discovery.md
[LDAP]: https://en.wikipedia.org/wiki/Lightweight_Directory_Access_Protocol [LDAP]: https://en.wikipedia.org/wiki/Lightweight_Directory_Access_Protocol
[Keystone]: http://docs.openstack.org/developer/keystone [Keystone]: http://docs.openstack.org/developer/keystone
[OIDC]: https://en.wikipedia.org/wiki/OpenID_Connect [Dex]: https://github.com/coreos/dex
[Google]: https://developers.google.com/identity/sign-in/web/sign-in [Google]: https://developers.google.com/identity/sign-in/web/sign-in
[GitHub]: https://developer.github.com/v3/oauth [GitHub]: https://developer.github.com/v3/oauth
[S3]: https://aws.amazon.com/s3 [S3]: https://aws.amazon.com/s3
@ -41,34 +40,195 @@ High-level features include:
[Bitbucket]: https://bitbucket.com [Bitbucket]: https://bitbucket.com
[GitLab]: https://gitlab.com [GitLab]: https://gitlab.com
[git]: https://git-scm.com [git]: https://git-scm.com
[Clair]: https://github.com/quay/clair [Clair]: https://github.com/coreos/clair
[Swagger]: http://swagger.io [Swagger]: http://swagger.io
## Getting Started ## Getting Started
* Explore a live instance of Project Quay hosted at [Quay.io] ### macOS
* Watch [talks] given about Project Quay
* Review the [documentation] for Red Hat Quay
* Get up and running with a containerized [development environment]
[Quay.io]: https://quay.io macOS developers will need:
[talks]: /docs/talks.md
[documentation]: https://access.redhat.com/documentation/en-us/red_hat_quay
[development environment]: /docs/development-container.md
## Community * [command line tools] or [xcode]
* [brew]
* Mailing List: [quay-dev@googlegroups.com] [command line tools]: https://developer.apple.com/downloads
* IRC: #quay on [freenode.net] [xcode]: https://developer.apple.com/downloads
* Bug tracking: [JBoss JIRA] [brew]: https://github.com/Homebrew/brew
* Security Issues: [security@redhat.com]
[quay-dev@googlegroups.com]: https://groups.google.com/forum/#!forum/quay-dev ```
[freenode.net]: https://webchat.freenode.net # Download the code
[JBoss JIRA]: https://issues.jboss.org/projects/PROJQUAY git clone git@github.com:coreos-inc/quay.git && cd quay
[security@redhat.com]: mailto:security@redhat.com
## License # Install the system dependencies
brew install libevent libmagic postgresql gpgme pyenv pyenv-virtualenv docker docker-machine
Project Quay is under the Apache 2.0 license. # create a default virtualmachine for docker
See the LICENSE file for details. docker-machine create -d virtualbox default
# setup brew dependencies
# note you may want to add these to your bashrc or zshrc file
eval "$(pyenv virtualenv-init -)"
eval "$(pyenv init -)"
eval $(/usr/local/bin/docker-machine env default)
# Some installs don't have /usr/include, required for finding SASL header files
# http://apple.stackexchange.com/questions/196224/unix-ln-s-command-not-permitted-in-osx-el-capitan-beta3
# note this command might fail because of new OSx write protections here above is a link that explains
# how to fix that
if [ ! -e /usr/include ]; then sudo ln -s `xcrun --show-sdk-path`/usr/include /usr/include; fi
# Install the Python dependencies
pyenv install 2.7.11
pyenv virtualenv 2.7.11 quay
pyenv activate quay
# Some packages may fail to build with clang (which now defaults to C11).
CFLAGS='-std=c99' pip install -r requirements.txt
pip install -r requirements-dev.txt
# Setup a local config
git clone git@github.com:coreos-inc/quay-config.git ../quay-config
ln -s ../../quay-config/local conf/stack
```
### Useful docs
[docker](https://beta.docker.com/docs/mac/getting-started://beta.docker.com/docs/mac/getting-started)
[docker-machine](https://docs.docker.com/machine/install-machine://docs.docker.com/machine/install-machine)
[pyenv](https://github.com/yyuu/pyenv)
[pyenv-virtualenv](https://github.com/yyuu/pyenv-virtualenv)
### Linux
TODO
## Running and Testing
### Test Data
A SQLite database full of test data is committed to this git repository at `test/data/test.db`.
This database is generated by executing `python initdb.py`.
The username and password of the admin test account is `devtable` and `password`, respectively.
### Local Scripts
* `local-run` runs the web server for testing
* `local-test` runs the unit test suite
### Development inside Docker
To build and run a development container, pass one argument to local-docker.sh:
- `buildman`: run the buildmanager
- `dev`: run web server on port 5000
- `initdb`: clear and initialize the test database
- `notifications`: run the notification worker
- `test`: run the unit test suite
### Adding a Python Dependency
```
# Create a new virtualenv and activate it
pyenv virtualenv 2.7.11 quay-deps
pyenv activate quay-deps
# Install unversioned dependencies with your changes
pip install -r requirements-nover.txt
# Run the unit test suite
./local-test.sh
# Freeze the versions of all of the dependencies
pip freeze > requirements.txt
```
### Running the Build System
TODO
```
# Run an instance of redis
docker run -d -p 6379:6379 quay.io/quay/redis
```
### To run individual tests
```
# To run a specific suite
TEST=true python -m test.test_api_usage -f
# To run a specific test in a suite
TEST=true python -m test.test_api_usage -f SuiteName
```
### Running migrations
```
# To create a new migration with this description.
# Note there might be some errors about unique id being to long
# That's okay as long as the migration file is created
./data/migrations/migration.sh "Description goes here"
# To test the up and down of the migration
./data/migrations/migration.sh # without params
# Migrations get run when you create a docker image or you can run them
# manually with the following command.
PYTHONPATH=. alembic upgrade head
# You can also rebuild your local sqlite db image from initdb.py using
# And once you have a migration you should do this and check in the
# changes to share your migration with others.
rm test/data/test.db
python initdb.py
```
## Documentation
* [Quay Enterprise Documentation](https://tectonic.com/quay-enterprise/docs/latest)
* [Quay.io Documentation](https://docs.quay.io)
### [Architecture at a Glance](https://docs.google.com/a/coreos.com/drawings/d/1J-YZs7aun1lLy-1wFwIZcBma5IJmZQ8WfgtEftHCKJ0/edit?usp=sharing)
### Terminology
#### Organizations
- **AppC**: a standards body responsible for a _Runtime_ and _Image Format_ superseded by the _Open Container Initiative_
- **Open Container Initiative**: a standards body responsible for a _Runtime_ specification and an _Image Format_
- **Docker**: a company that builds a platform that has its own _Image Formats_, _Build System_, _Container Runtime_, and _Container Orchestration_
#### Concepts
- **Image**: an archive containing all of the contents necessary to execute a container
- **Image Format**: a specification for the structure of an _Image_
- **Image Layer**: an _Image_ that may depend on being applied to other _Images_ to generate a final _Image_
- **Image Squashing**: the process of compressing an _Image_ into a single _Layer_
- **Manifest**: a text file containing metadata for a particular _Image_
- **Tag**: a human-friendly named, mutable pointer to a particular set of _Images_
- **Build System**: a program used to generate _Images_
- **Registry**: a program that speaks one or more standard protocols to store and receive _Images_
- **Repository**: a collection of related _Tags_ organized by a _Registry_
- **Push**: the act of uploading an _Image_ to a _Registry_
- **Pull**: the act of downloading an _Image_ from a _Registry_
- **Container**: an _Image_ and its execution environment
- **Container Runtime**: a program that can transform an _Image_ into a _Container_ by executing it
- **Container Orchestration**: a program or set of programs that provides a framework for deploying _Containers_
#### Software
- **Quay.io**: CoreOS's hosted _Registry_
- **Quay**: CoreOS's enterprise-grade _Registry_ product
- **quayctl**: an open source program that implements alternative methods for _pulling_ _Images_ from _Quay_
- **Clair**: an open source static analysis tool used to detect vulnerability in _Images_
- **Quay Security Scanning**: the integration between _Clair_ and _Quay_
- **Kubernetes**: an open source program implementing _Container Orchestration_
- **Docker Hub**: Docker's hosted _Registry_
- **Docker Trusted Registry**: Docker's enterprise-grade _Registry_ product
- **Notary**: an open source implementation of the TUF protocol used in _Docker Content Trust_
- **Docker Content Trust**: the integration between _Notary_ and _Docker Trusted Registry_
- **Docker Engine**: a program used to interact with all aspects of the Docker platform
- **Swarm**: a program implementing _Container Orchestration_ for the Docker platform

101
ROADMAP.md Normal file
View file

@ -0,0 +1,101 @@
# Quay Roadmap
| Abbrebiation | Feature |
|---|---|
| **(H)** | Hosted Quay.io |
| **(ER)**| Enterprise Registry Only |
| **(B)** | Builders |
### Sprint 3/2 - 3/16
- **(H)** Launch Clair 1.0
- Tasks
- Backfill DB
- Provide timely logo feedback
- Quay blog post
- Clair blog post
- Screencast
- **(H)** Test and launch torrent GA
- Have a use case which shows improved performance
- Tasks
- Docs detailing reference use cases
- Publish quayctl
- Quayctl man page README
- Notify marketing when the above is done
- **(ER)** Figure out how to handle client cert generation
- Auto approval rules
- Auto generation
- UI for approving
- Tasks
- See if there is anything from Ed's tool that we can re-use
- Test assumptions around nginx client cert auth
- Figure out if we can verify certs in python if nginx approves
- Have a hangout with gtank w.r.t. client certs vs hmac vs jwt
- **(ER)** Clair in ER
- Tasks
- Integrate Clair with cert generation tool
- Blog post for Clair in ER
- Add Clair config to the setup tool
- Bugs
- Fix Quay permission loading performance for Clair
- OR: Make the Clair API on Quay batch
- Fix Clair readme
- Address Huawei PR for new Clair endpoint
### Unallocated
- **(ER)** Torrent support in ER
- Setup tool support
- Docs on how to get Chihaya running
- **(ER)** Online upgrade tool
- Migrations while site is live
- Nag people to upgrade
- **(B)** Dockerfile flag support
- Requires quay.yaml
- **(B)** Move build traffic to Packet
- Preliminary tests reduce build start latency from 2 minutes to 20 seconds
- **(B)** Multi-step builds
- build artifact
- bundle artifact
- test bundle
- **(H)** Docker Notary
- Support signed images with a known key
- **(H/ER)** Labels
- Support for Midas Package Manager-like distribution
- Integrated with Docker labels
- Mutable and immutable
- Searchable and fleshed out API
- **(H)** Integrate with tectonic.com sales pipeline
- Mirror Quay customers in tectonic (SVOC)?
- Callbacks to inform tectonic about quay events
- Accept and apply QE licenses to the stack
- **(ER)** Tectonic care and feeding
- Build tools to give us a concrete/declarative cluster deploy story
- Build a tool to migrate an app between tectonic clusters
- Assess the feasibility of upgrading a running cluster
- **(H)** Geo distribution through tectonic
- Spin up a tectonic cluster in another region
- Modify registry to run standalone on a tectonic cluster
- **(H)** Read available Quay.io
- Ability to choose uptime of data-plane auditability
- **(H)** Launch our API GA
- Versioned and backward compatible
- Adequate documentation
- **(B)** Builds as top level concept
- Multiple Quay.io repos from a single git push
- **(H)** Become the Tectonic app store
- Pods/apps as top level concept
- **(H)** Distribution tool
- Help people to get their apps from quay to Tectonic
- Requires App manifest or adequate flexibility
- **(H)** AppC support
- rkt push
- discovery
- **(H/ER)** Mirroring from another registry (pull)
### Speculative
- **(H)** Immediately consistent multi-region data availability
- Cockroach?
- **(H)** 2 factor auth
- How to integrate with Docker CLI?
- **(H)** Mirroring to a dependent registry (push)

View file

@ -1,49 +0,0 @@
# Testing quay
## Unit tests (run in CI automatically)
Basic unit tests for testing all the functionality of Quay:
```sh
make unit-test
```
## Registry tests (run in CI automatically)
Quay has two sets of registry tests (current and legacy), which simulate Docker clients by executing
REST operations against a spanwed Quay.
```sh
make registry-test
make registry-test-old
```
## Certs tests (run in CI automatically)
Ensures that custom TLS certificates are correctly loaded into the Quay container on startup.
```sh
make certs-test
```
## Full database tests (run in CI automatically)
The full database tests runs the entire suite of Quay unit tests against a real running database
instance.
NOTE: The database *must be running* on the local machine before this test can be run.
```sh
TEST_DATABASE_URI=database-connection-string make full-db-test
```
## Clients tests (must be manually run)
The clients test spawns CoreOS virtual machines via Vagrant and VirtualBox and runs real Docker/podman
commands against a *running Quay*.
NOTE: A Quay *must be running* on the local machine before this test can be run.
```sh
make clients-test 10.0.2.2:5000 # IP+Port of the Quay on the host machine.
```

View file

@ -1,47 +0,0 @@
import os
import re
import subprocess
from util.config.provider import get_config_provider
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
CONF_DIR = os.getenv("QUAYCONF", os.path.join(ROOT_DIR, "conf/"))
STATIC_DIR = os.path.join(ROOT_DIR, 'static/')
STATIC_LDN_DIR = os.path.join(STATIC_DIR, 'ldn/')
STATIC_FONTS_DIR = os.path.join(STATIC_DIR, 'fonts/')
STATIC_WEBFONTS_DIR = os.path.join(STATIC_DIR, 'webfonts/')
TEMPLATE_DIR = os.path.join(ROOT_DIR, 'templates/')
IS_TESTING = 'TEST' in os.environ
IS_BUILDING = 'BUILDING' in os.environ
IS_KUBERNETES = 'KUBERNETES_SERVICE_HOST' in os.environ
OVERRIDE_CONFIG_DIRECTORY = os.path.join(CONF_DIR, 'stack/')
config_provider = get_config_provider(OVERRIDE_CONFIG_DIRECTORY, 'config.yaml', 'config.py',
testing=IS_TESTING, kubernetes=IS_KUBERNETES)
def _get_version_number_changelog():
try:
with open(os.path.join(ROOT_DIR, 'CHANGELOG.md')) as f:
return re.search(r'(v[0-9]+\.[0-9]+\.[0-9]+)', f.readline()).group(0)
except IOError:
return ''
def _get_git_sha():
if os.path.exists("GIT_HEAD"):
with open(os.path.join(ROOT_DIR, "GIT_HEAD")) as f:
return f.read()
else:
try:
return subprocess.check_output(["git", "rev-parse", "HEAD"]).strip()[0:8]
except (OSError, subprocess.CalledProcessError, Exception):
pass
return "unknown"
__version__ = _get_version_number_changelog()
__gitrev__ = _get_git_sha()

View file

@ -1,22 +0,0 @@
from enum import Enum, unique
from data.migrationutil import DefinedDataMigration, MigrationPhase
@unique
class ERTMigrationFlags(Enum):
""" Flags for the encrypted robot token migration. """
READ_OLD_FIELDS = 'read-old'
WRITE_OLD_FIELDS = 'write-old'
ActiveDataMigration = DefinedDataMigration(
'encrypted_robot_tokens',
'ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE',
[
MigrationPhase('add-new-fields', 'c13c8052f7a6', [ERTMigrationFlags.READ_OLD_FIELDS,
ERTMigrationFlags.WRITE_OLD_FIELDS]),
MigrationPhase('backfill-then-read-only-new',
'703298a825c2', [ERTMigrationFlags.WRITE_OLD_FIELDS]),
MigrationPhase('stop-writing-both', '703298a825c2', []),
MigrationPhase('remove-old-fields', 'c059b952ed76', []),
]
)

283
app.py
View file

@ -1,82 +1,63 @@
import hashlib
import json
import logging import logging
import os import os
import json
from functools import partial from functools import partial
from flask import Flask, request, Request, _request_ctx_stack
from Crypto.PublicKey import RSA
from flask import Flask, request, Request
from flask_login import LoginManager
from flask_mail import Mail
from flask_principal import Principal from flask_principal import Principal
from flask_login import LoginManager, UserMixin
from flask_mail import Mail
from werkzeug.routing import BaseConverter
from jwkest.jwk import RSAKey from jwkest.jwk import RSAKey
from werkzeug.contrib.fixers import ProxyFix from Crypto.PublicKey import RSA
from werkzeug.exceptions import HTTPException
import features import features
from _init import (config_provider, CONF_DIR, IS_KUBERNETES, IS_TESTING, OVERRIDE_CONFIG_DIRECTORY,
IS_BUILDING)
from auth.auth_context import get_authenticated_user
from avatars.avatars import Avatar from avatars.avatars import Avatar
from buildman.manager.buildcanceller import BuildCanceller from storage import Storage
from data import database
from data import model from data import model
from data import logs_model from data import database
from data.archivedlogs import LogArchive
from data.billing import Billing
from data.buildlogs import BuildLogs
from data.cache import get_model_cache
from data.model.user import LoginWrappedDBUser
from data.queue import WorkQueue, BuildMetricQueueReporter
from data.userevent import UserEventsBuilderModule
from data.userfiles import Userfiles from data.userfiles import Userfiles
from data.users import UserAuthentication from data.users import UserAuthentication
from data.registry_model import registry_model from data.billing import Billing
from path_converters import RegexConverter, RepositoryPathConverter, APIRepositoryPathConverter from data.buildlogs import BuildLogs
from oauth.services.github import GithubOAuthService from data.archivedlogs import LogArchive
from oauth.services.gitlab import GitLabOAuthService from data.userevent import UserEventsBuilderModule
from oauth.loginmanager import OAuthLoginManager from data.queue import WorkQueue, BuildMetricQueueReporter
from storage import Storage
from util.config import URLSchemeAndHostname
from util.log import filter_logs
from util import get_app_url from util import get_app_url
from util.secscan.secscan_util import get_blob_download_uri_getter
from util.ipresolver import IPResolver
from util.saas.analytics import Analytics from util.saas.analytics import Analytics
from util.saas.useranalytics import UserAnalytics
from util.saas.exceptionlog import Sentry from util.saas.exceptionlog import Sentry
from util.names import urn_generator from util.names import urn_generator
from util.config.oauth import (GoogleOAuthConfig, GithubOAuthConfig, GitLabOAuthConfig,
DexOAuthConfig)
from util.security.signing import Signer
from util.security.instancekeys import InstanceKeys
from util.saas.cloudwatch import start_cloudwatch_sender
from util.config.provider import get_config_provider
from util.config.configutil import generate_secret_key from util.config.configutil import generate_secret_key
from util.config.superusermanager import SuperUserManager from util.config.superusermanager import SuperUserManager
from util.label_validator import LabelValidator from util.secscan.api import SecurityScannerAPI
from util.metrics.metricqueue import MetricQueue from util.metrics.metricqueue import MetricQueue
from util.metrics.prometheus import PrometheusPlugin from util.metrics.prometheus import PrometheusPlugin
from util.saas.cloudwatch import start_cloudwatch_sender from util.label_validator import LabelValidator
from util.secscan.api import SecurityScannerAPI
from util.repomirror.api import RepoMirrorAPI
from util.tufmetadata.api import TUFMetadataAPI
from util.security.instancekeys import InstanceKeys
from util.security.signing import Signer
OVERRIDE_CONFIG_DIRECTORY = 'conf/stack/'
OVERRIDE_CONFIG_YAML_FILENAME = os.path.join(CONF_DIR, 'stack/config.yaml') OVERRIDE_CONFIG_YAML_FILENAME = 'conf/stack/config.yaml'
OVERRIDE_CONFIG_PY_FILENAME = os.path.join(CONF_DIR, 'stack/config.py') OVERRIDE_CONFIG_PY_FILENAME = 'conf/stack/config.py'
OVERRIDE_CONFIG_KEY = 'QUAY_OVERRIDE_CONFIG' OVERRIDE_CONFIG_KEY = 'QUAY_OVERRIDE_CONFIG'
DOCKER_V2_SIGNINGKEY_FILENAME = 'docker_v2.pem' DOCKER_V2_SIGNINGKEY_FILENAME = 'docker_v2.pem'
INIT_SCRIPTS_LOCATION = '/conf/init/'
app = Flask(__name__) app = Flask(__name__)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
# Instantiate the configuration. # Instantiate the configuration.
is_testing = IS_TESTING is_testing = 'TEST' in os.environ
is_kubernetes = IS_KUBERNETES is_kubernetes = 'KUBERNETES_SERVICE_HOST' in os.environ
is_building = IS_BUILDING config_provider = get_config_provider(OVERRIDE_CONFIG_DIRECTORY, 'config.yaml', 'config.py',
testing=is_testing, kubernetes=is_kubernetes)
if is_testing: if is_testing:
from test.testconfig import TestConfig from test.testconfig import TestConfig
@ -95,31 +76,6 @@ config_provider.update_app_config(app.config)
environ_config = json.loads(os.environ.get(OVERRIDE_CONFIG_KEY, '{}')) environ_config = json.loads(os.environ.get(OVERRIDE_CONFIG_KEY, '{}'))
app.config.update(environ_config) app.config.update(environ_config)
# Fix remote address handling for Flask.
if app.config.get('PROXY_COUNT', 1):
app.wsgi_app = ProxyFix(app.wsgi_app, num_proxies=app.config.get('PROXY_COUNT', 1))
# Ensure the V3 upgrade key is specified correctly. If not, simply fail.
# TODO: Remove for V3.1.
if not is_testing and not is_building and app.config.get('SETUP_COMPLETE', False):
v3_upgrade_mode = app.config.get('V3_UPGRADE_MODE')
if v3_upgrade_mode is None:
raise Exception('Configuration flag `V3_UPGRADE_MODE` must be set. Please check the upgrade docs')
if (v3_upgrade_mode != 'background'
and v3_upgrade_mode != 'complete'
and v3_upgrade_mode != 'production-transition'
and v3_upgrade_mode != 'post-oci-rollout'
and v3_upgrade_mode != 'post-oci-roll-back-compat'):
raise Exception('Invalid value for config `V3_UPGRADE_MODE`. Please check the upgrade docs')
# Split the registry model based on config.
# TODO: Remove once we are fully on the OCI data model.
registry_model.setup_split(app.config.get('OCI_NAMESPACE_PROPORTION') or 0,
app.config.get('OCI_NAMESPACE_WHITELIST') or set(),
app.config.get('V22_NAMESPACE_WHITELIST') or set(),
app.config.get('V3_UPGRADE_MODE'))
# Allow user to define a custom storage preference for the local instance. # Allow user to define a custom storage preference for the local instance.
_distributed_storage_preference = os.environ.get('QUAY_DISTRIBUTED_STORAGE_PREFERENCE', '').split() _distributed_storage_preference = os.environ.get('QUAY_DISTRIBUTED_STORAGE_PREFERENCE', '').split()
if _distributed_storage_preference: if _distributed_storage_preference:
@ -139,10 +95,6 @@ if (app.config['PREFERRED_URL_SCHEME'] == 'https' and
# Load features from config. # Load features from config.
features.import_features(app.config) features.import_features(app.config)
CONFIG_DIGEST = hashlib.sha256(json.dumps(app.config, default=str)).hexdigest()[0:8]
logger.debug("Loaded config", extra={"config": app.config})
class RequestWithId(Request): class RequestWithId(Request):
request_gen = staticmethod(urn_generator(['request'])) request_gen = staticmethod(urn_generator(['request']))
@ -154,85 +106,77 @@ class RequestWithId(Request):
@app.before_request @app.before_request
def _request_start(): def _request_start():
if os.getenv('PYDEV_DEBUG', None): logger.debug('Starting request: %s', request.path)
import pydevd
host, port = os.getenv('PYDEV_DEBUG').split(':')
pydevd.settrace(host, port=int(port), stdoutToServer=True, stderrToServer=True, suspend=False)
logger.debug('Starting request: %s (%s)', request.request_id, request.path,
extra={"request_id": request.request_id})
DEFAULT_FILTER = lambda x: '[FILTERED]'
FILTERED_VALUES = [
{'key': ['password'], 'fn': DEFAULT_FILTER},
{'key': ['user', 'password'], 'fn': DEFAULT_FILTER},
{'key': ['blob'], 'fn': lambda x: x[0:8]}
]
@app.after_request @app.after_request
def _request_end(resp): def _request_end(r):
try: logger.debug('Ending request: %s', request.path)
jsonbody = request.get_json(force=True, silent=True) return r
except HTTPException:
jsonbody = None
values = request.values.to_dict()
if jsonbody and not isinstance(jsonbody, dict):
jsonbody = {'_parsererror': jsonbody}
if isinstance(values, dict):
filter_logs(values, FILTERED_VALUES)
extra = {
"endpoint": request.endpoint,
"request_id" : request.request_id,
"remote_addr": request.remote_addr,
"http_method": request.method,
"original_url": request.url,
"path": request.path,
"parameters": values,
"json_body": jsonbody,
"confsha": CONFIG_DIGEST,
}
if request.user_agent is not None:
extra["user-agent"] = request.user_agent.string
logger.debug('Ending request: %s (%s)', request.request_id, request.path, extra=extra)
return resp
class InjectingFilter(logging.Filter):
def filter(self, record):
if _request_ctx_stack.top is not None:
record.msg = '[%s] %s' % (request.request_id, record.msg)
return True
root_logger = logging.getLogger() root_logger = logging.getLogger()
# Add the request id filter to all handlers of the root logger
for handler in root_logger.handlers:
handler.addFilter(InjectingFilter())
app.request_class = RequestWithId app.request_class = RequestWithId
# Register custom converters. # Register custom converters.
class RegexConverter(BaseConverter):
""" Converter for handling custom regular expression patterns in paths. """
def __init__(self, url_map, regex_value):
super(RegexConverter, self).__init__(url_map)
self.regex = regex_value
class RepositoryPathConverter(BaseConverter):
""" Converter for handling repository paths. Handles both library and non-library paths (if
configured).
"""
def __init__(self, url_map):
super(RepositoryPathConverter, self).__init__(url_map)
self.weight = 200
if features.LIBRARY_SUPPORT:
# Allow names without namespaces.
self.regex = r'[^/]+(/[^/]+)?'
else:
self.regex = r'([^/]+/[^/]+)'
class APIRepositoryPathConverter(BaseConverter):
""" Converter for handling repository paths. Does not handle library paths.
"""
def __init__(self, url_map):
super(APIRepositoryPathConverter, self).__init__(url_map)
self.weight = 200
self.regex = r'([^/]+/[^/]+)'
app.url_map.converters['regex'] = RegexConverter app.url_map.converters['regex'] = RegexConverter
app.url_map.converters['repopath'] = RepositoryPathConverter app.url_map.converters['repopath'] = RepositoryPathConverter
app.url_map.converters['apirepopath'] = APIRepositoryPathConverter app.url_map.converters['apirepopath'] = APIRepositoryPathConverter
Principal(app, use_sessions=False) Principal(app, use_sessions=False)
tf = app.config['DB_TRANSACTION_FACTORY']
model_cache = get_model_cache(app.config)
avatar = Avatar(app) avatar = Avatar(app)
login_manager = LoginManager(app) login_manager = LoginManager(app)
mail = Mail(app) mail = Mail(app)
prometheus = PrometheusPlugin(app) prometheus = PrometheusPlugin(app)
metric_queue = MetricQueue(prometheus) metric_queue = MetricQueue(prometheus)
chunk_cleanup_queue = WorkQueue(app.config['CHUNK_CLEANUP_QUEUE_NAME'], tf, metric_queue=metric_queue)
instance_keys = InstanceKeys(app) instance_keys = InstanceKeys(app)
ip_resolver = IPResolver(app) storage = Storage(app, metric_queue, instance_keys)
storage = Storage(app, metric_queue, chunk_cleanup_queue, instance_keys, config_provider, ip_resolver)
userfiles = Userfiles(app, storage) userfiles = Userfiles(app, storage)
log_archive = LogArchive(app, storage) log_archive = LogArchive(app, storage)
analytics = Analytics(app) analytics = Analytics(app)
user_analytics = UserAnalytics(app)
billing = Billing(app) billing = Billing(app)
sentry = Sentry(app) sentry = Sentry(app)
build_logs = BuildLogs(app) build_logs = BuildLogs(app)
@ -242,48 +186,25 @@ superusers = SuperUserManager(app)
signer = Signer(app, config_provider) signer = Signer(app, config_provider)
instance_keys = InstanceKeys(app) instance_keys = InstanceKeys(app)
label_validator = LabelValidator(app) label_validator = LabelValidator(app)
build_canceller = BuildCanceller(app)
start_cloudwatch_sender(metric_queue, app) start_cloudwatch_sender(metric_queue, app)
github_trigger = GithubOAuthService(app.config, 'GITHUB_TRIGGER_CONFIG') tf = app.config['DB_TRANSACTION_FACTORY']
gitlab_trigger = GitLabOAuthService(app.config, 'GITLAB_TRIGGER_CONFIG')
oauth_login = OAuthLoginManager(app.config) github_login = GithubOAuthConfig(app.config, 'GITHUB_LOGIN_CONFIG')
oauth_apps = [github_trigger, gitlab_trigger] github_trigger = GithubOAuthConfig(app.config, 'GITHUB_TRIGGER_CONFIG')
gitlab_trigger = GitLabOAuthConfig(app.config, 'GITLAB_TRIGGER_CONFIG')
google_login = GoogleOAuthConfig(app.config, 'GOOGLE_LOGIN_CONFIG')
dex_login = DexOAuthConfig(app.config, 'DEX_LOGIN_CONFIG')
image_replication_queue = WorkQueue(app.config['REPLICATION_QUEUE_NAME'], tf, oauth_apps = [github_login, github_trigger, gitlab_trigger, google_login, dex_login]
has_namespace=False, metric_queue=metric_queue)
image_replication_queue = WorkQueue(app.config['REPLICATION_QUEUE_NAME'], tf)
dockerfile_build_queue = WorkQueue(app.config['DOCKERFILE_BUILD_QUEUE_NAME'], tf, dockerfile_build_queue = WorkQueue(app.config['DOCKERFILE_BUILD_QUEUE_NAME'], tf,
metric_queue=metric_queue, reporter=BuildMetricQueueReporter(metric_queue))
reporter=BuildMetricQueueReporter(metric_queue), notification_queue = WorkQueue(app.config['NOTIFICATION_QUEUE_NAME'], tf)
has_namespace=True) secscan_notification_queue = WorkQueue(app.config['SECSCAN_NOTIFICATION_QUEUE_NAME'], tf)
notification_queue = WorkQueue(app.config['NOTIFICATION_QUEUE_NAME'], tf, has_namespace=True, secscan_api = SecurityScannerAPI(app, app.config, storage)
metric_queue=metric_queue)
secscan_notification_queue = WorkQueue(app.config['SECSCAN_NOTIFICATION_QUEUE_NAME'], tf,
has_namespace=False,
metric_queue=metric_queue)
export_action_logs_queue = WorkQueue(app.config['EXPORT_ACTION_LOGS_QUEUE_NAME'], tf,
has_namespace=True,
metric_queue=metric_queue)
# Note: We set `has_namespace` to `False` here, as we explicitly want this queue to not be emptied
# when a namespace is marked for deletion.
namespace_gc_queue = WorkQueue(app.config['NAMESPACE_GC_QUEUE_NAME'], tf, has_namespace=False,
metric_queue=metric_queue)
all_queues = [image_replication_queue, dockerfile_build_queue, notification_queue,
secscan_notification_queue, chunk_cleanup_queue, namespace_gc_queue]
url_scheme_and_hostname = URLSchemeAndHostname(app.config['PREFERRED_URL_SCHEME'], app.config['SERVER_HOSTNAME'])
secscan_api = SecurityScannerAPI(app.config, storage, app.config['SERVER_HOSTNAME'], app.config['HTTPCLIENT'],
uri_creator=get_blob_download_uri_getter(app.test_request_context('/'), url_scheme_and_hostname),
instance_keys=instance_keys)
repo_mirror_api = RepoMirrorAPI(app.config, app.config['SERVER_HOSTNAME'], app.config['HTTPCLIENT'],
instance_keys=instance_keys)
tuf_metadata_api = TUFMetadataAPI(app, app.config)
# Check for a key in config. If none found, generate a new signing key for Docker V2 manifests. # Check for a key in config. If none found, generate a new signing key for Docker V2 manifests.
_v2_key_path = os.path.join(OVERRIDE_CONFIG_DIRECTORY, DOCKER_V2_SIGNINGKEY_FILENAME) _v2_key_path = os.path.join(OVERRIDE_CONFIG_DIRECTORY, DOCKER_V2_SIGNINGKEY_FILENAME)
@ -292,23 +213,35 @@ if os.path.exists(_v2_key_path):
else: else:
docker_v2_signing_key = RSAKey(key=RSA.generate(2048)) docker_v2_signing_key = RSAKey(key=RSA.generate(2048))
# Configure the database.
if app.config.get('DATABASE_SECRET_KEY') is None and app.config.get('SETUP_COMPLETE', False):
raise Exception('Missing DATABASE_SECRET_KEY in config; did you perhaps forget to add it?')
database.configure(app.config) database.configure(app.config)
model.config.app_config = app.config model.config.app_config = app.config
model.config.store = storage model.config.store = storage
model.config.register_image_cleanup_callback(secscan_api.cleanup_layers)
model.config.register_repo_cleanup_callback(tuf_metadata_api.delete_metadata)
@login_manager.user_loader @login_manager.user_loader
def load_user(user_uuid): def load_user(user_uuid):
logger.debug('User loader loading deferred user with uuid: %s', user_uuid) logger.debug('User loader loading deferred user with uuid: %s' % user_uuid)
return LoginWrappedDBUser(user_uuid) return LoginWrappedDBUser(user_uuid)
logs_model.configure(app.config) class LoginWrappedDBUser(UserMixin):
def __init__(self, user_uuid, db_user=None):
self._uuid = user_uuid
self._db_user = db_user
def db_user(self):
if not self._db_user:
self._db_user = model.user.get_user_by_uuid(self._uuid)
return self._db_user
@property
def is_authenticated(self):
return self.db_user() is not None
@property
def is_active(self):
return self.db_user().verified
def get_id(self):
return unicode(self._uuid)
get_app_url = partial(get_app_url, app.config) get_app_url = partial(get_app_url, app.config)

View file

@ -1,12 +1,6 @@
# NOTE: Must be before we import or call anything that may be synchronous.
from gevent import monkey
monkey.patch_all()
import os
import logging import logging
import logging.config import logging.config
from util.log import logfile_path
from app import app as application from app import app as application
@ -18,5 +12,5 @@ import secscan
if __name__ == '__main__': if __name__ == '__main__':
logging.config.fileConfig(logfile_path(debug=True), disable_existing_loggers=False) logging.config.fileConfig('conf/logging_debug.conf', disable_existing_loggers=False)
application.run(port=5000, debug=True, threaded=True, host='0.0.0.0') application.run(port=5000, debug=True, threaded=True, host='0.0.0.0')

View file

@ -1,21 +1,69 @@
from flask import _request_ctx_stack import logging
from flask import _request_ctx_stack
from data import model
logger = logging.getLogger(__name__)
def get_authenticated_context():
""" Returns the auth context for the current request context, if any. """
return getattr(_request_ctx_stack.top, 'authenticated_context', None)
def get_authenticated_user(): def get_authenticated_user():
""" Returns the authenticated user, if any, or None if none. """ user = getattr(_request_ctx_stack.top, 'authenticated_user', None)
context = get_authenticated_context() if not user:
return context.authed_user if context else None user_uuid = getattr(_request_ctx_stack.top, 'authenticated_user_uuid', None)
if not user_uuid:
logger.debug('No authenticated user or deferred user uuid.')
return None
logger.debug('Loading deferred authenticated user.')
loaded = model.user.get_user_by_uuid(user_uuid)
if not loaded.enabled:
return None
set_authenticated_user(loaded)
user = loaded
if user:
logger.debug('Returning authenticated user: %s', user.username)
return user
def set_authenticated_user(user_or_robot):
if not user_or_robot.enabled:
raise Exception('Attempt to authenticate a disabled user/robot: %s' % user_or_robot.username)
ctx = _request_ctx_stack.top
ctx.authenticated_user = user_or_robot
def get_grant_context():
return getattr(_request_ctx_stack.top, 'grant_context', None)
def set_grant_context(grant_context):
ctx = _request_ctx_stack.top
ctx.grant_context = grant_context
def set_authenticated_user_deferred(user_or_robot_db_uuid):
logger.debug('Deferring loading of authenticated user object with uuid: %s', user_or_robot_db_uuid)
ctx = _request_ctx_stack.top
ctx.authenticated_user_uuid = user_or_robot_db_uuid
def get_validated_oauth_token(): def get_validated_oauth_token():
""" Returns the authenticated and validated OAuth access token, if any, or None if none. """ return getattr(_request_ctx_stack.top, 'validated_oauth_token', None)
context = get_authenticated_context()
return context.authed_oauth_token if context else None
def set_authenticated_context(auth_context):
""" Sets the auth context for the current request context to that given. """ def set_validated_oauth_token(token):
ctx = _request_ctx_stack.top ctx = _request_ctx_stack.top
ctx.authenticated_context = auth_context ctx.validated_oauth_token = token
return auth_context
def get_validated_token():
return getattr(_request_ctx_stack.top, 'validated_token', None)
def set_validated_token(token):
ctx = _request_ctx_stack.top
ctx.validated_token = token

View file

@ -1,437 +0,0 @@
import logging
from abc import ABCMeta, abstractmethod
from cachetools.func import lru_cache
from six import add_metaclass
from app import app
from data import model
from flask_principal import Identity, identity_changed
from auth.auth_context import set_authenticated_context
from auth.context_entity import ContextEntityKind, CONTEXT_ENTITY_HANDLERS
from auth.permissions import QuayDeferredPermissionUser
from auth.scopes import scopes_from_scope_string
logger = logging.getLogger(__name__)
@add_metaclass(ABCMeta)
class AuthContext(object):
"""
Interface that represents the current context of authentication.
"""
@property
@abstractmethod
def entity_kind(self):
""" Returns the kind of the entity in this auth context. """
pass
@property
@abstractmethod
def is_anonymous(self):
""" Returns true if this is an anonymous context. """
pass
@property
@abstractmethod
def authed_oauth_token(self):
""" Returns the authenticated OAuth token, if any. """
pass
@property
@abstractmethod
def authed_user(self):
""" Returns the authenticated user, whether directly, or via an OAuth or access token. Note that
this property will also return robot accounts.
"""
pass
@property
@abstractmethod
def has_nonrobot_user(self):
""" Returns whether a user (not a robot) was authenticated successfully. """
pass
@property
@abstractmethod
def identity(self):
""" Returns the identity for the auth context. """
pass
@property
@abstractmethod
def description(self):
""" Returns a human-readable and *public* description of the current auth context. """
pass
@property
@abstractmethod
def credential_username(self):
""" Returns the username to create credentials for this context's entity, if any. """
pass
@abstractmethod
def analytics_id_and_public_metadata(self):
""" Returns the analytics ID and public log metadata for this auth context. """
pass
@abstractmethod
def apply_to_request_context(self):
""" Applies this auth result to the auth context and Flask-Principal. """
pass
@abstractmethod
def to_signed_dict(self):
""" Serializes the auth context into a dictionary suitable for inclusion in a JWT or other
form of signed serialization.
"""
pass
@property
@abstractmethod
def unique_key(self):
""" Returns a key that is unique to this auth context type and its data. For example, an
instance of the auth context type for the user might be a string of the form
`user-{user-uuid}`. Callers should treat this key as opaque and not rely on the contents
for anything besides uniqueness. This is typically used by callers when they'd like to
check cache but not hit the database to get a fully validated auth context.
"""
pass
class ValidatedAuthContext(AuthContext):
""" ValidatedAuthContext represents the loaded, authenticated and validated auth information
for the current request context.
"""
def __init__(self, user=None, token=None, oauthtoken=None, robot=None, appspecifictoken=None,
signed_data=None):
# Note: These field names *MUST* match the string values of the kinds defined in
# ContextEntityKind.
self.user = user
self.robot = robot
self.token = token
self.oauthtoken = oauthtoken
self.appspecifictoken = appspecifictoken
self.signed_data = signed_data
def tuple(self):
return vars(self).values()
def __eq__(self, other):
return self.tuple() == other.tuple()
@property
def entity_kind(self):
""" Returns the kind of the entity in this auth context. """
for kind in ContextEntityKind:
if hasattr(self, kind.value) and getattr(self, kind.value):
return kind
return ContextEntityKind.anonymous
@property
def authed_user(self):
""" Returns the authenticated user, whether directly, or via an OAuth token. Note that this
will also return robot accounts.
"""
authed_user = self._authed_user()
if authed_user is not None and not authed_user.enabled:
logger.warning('Attempt to reference a disabled user/robot: %s', authed_user.username)
return None
return authed_user
@property
def authed_oauth_token(self):
return self.oauthtoken
def _authed_user(self):
if self.oauthtoken:
return self.oauthtoken.authorized_user
if self.appspecifictoken:
return self.appspecifictoken.user
if self.signed_data:
return model.user.get_user(self.signed_data['user_context'])
return self.user if self.user else self.robot
@property
def is_anonymous(self):
""" Returns true if this is an anonymous context. """
return not self.authed_user and not self.token and not self.signed_data
@property
def has_nonrobot_user(self):
""" Returns whether a user (not a robot) was authenticated successfully. """
return bool(self.authed_user and not self.robot)
@property
def identity(self):
""" Returns the identity for the auth context. """
if self.oauthtoken:
scope_set = scopes_from_scope_string(self.oauthtoken.scope)
return QuayDeferredPermissionUser.for_user(self.oauthtoken.authorized_user, scope_set)
if self.authed_user:
return QuayDeferredPermissionUser.for_user(self.authed_user)
if self.token:
return Identity(self.token.get_code(), 'token')
if self.signed_data:
identity = Identity(None, 'signed_grant')
identity.provides.update(self.signed_data['grants'])
return identity
return None
@property
def entity_reference(self):
""" Returns the DB object reference for this context's entity. """
if self.entity_kind == ContextEntityKind.anonymous:
return None
return getattr(self, self.entity_kind.value)
@property
def description(self):
""" Returns a human-readable and *public* description of the current auth context. """
handler = CONTEXT_ENTITY_HANDLERS[self.entity_kind]()
return handler.description(self.entity_reference)
@property
def credential_username(self):
""" Returns the username to create credentials for this context's entity, if any. """
handler = CONTEXT_ENTITY_HANDLERS[self.entity_kind]()
return handler.credential_username(self.entity_reference)
def analytics_id_and_public_metadata(self):
""" Returns the analytics ID and public log metadata for this auth context. """
handler = CONTEXT_ENTITY_HANDLERS[self.entity_kind]()
return handler.analytics_id_and_public_metadata(self.entity_reference)
def apply_to_request_context(self):
""" Applies this auth result to the auth context and Flask-Principal. """
# Save to the request context.
set_authenticated_context(self)
# Set the identity for Flask-Principal.
if self.identity:
identity_changed.send(app, identity=self.identity)
@property
def unique_key(self):
signed_dict = self.to_signed_dict()
return '%s-%s' % (signed_dict['entity_kind'], signed_dict.get('entity_reference', '(anon)'))
def to_signed_dict(self):
""" Serializes the auth context into a dictionary suitable for inclusion in a JWT or other
form of signed serialization.
"""
dict_data = {
'version': 2,
'entity_kind': self.entity_kind.value,
}
if self.entity_kind != ContextEntityKind.anonymous:
handler = CONTEXT_ENTITY_HANDLERS[self.entity_kind]()
dict_data.update({
'entity_reference': handler.get_serialized_entity_reference(self.entity_reference),
})
# Add legacy information.
# TODO: Remove this all once the new code is fully deployed.
if self.token:
dict_data.update({
'kind': 'token',
'token': self.token.code,
})
if self.oauthtoken:
dict_data.update({
'kind': 'oauth',
'oauth': self.oauthtoken.uuid,
'user': self.authed_user.username,
})
if self.user or self.robot:
dict_data.update({
'kind': 'user',
'user': self.authed_user.username,
})
if self.appspecifictoken:
dict_data.update({
'kind': 'user',
'user': self.authed_user.username,
})
if self.is_anonymous:
dict_data.update({
'kind': 'anonymous',
})
# End of legacy information.
return dict_data
class SignedAuthContext(AuthContext):
""" SignedAuthContext represents an auth context loaded from a signed token of some kind,
such as a JWT. Unlike ValidatedAuthContext, SignedAuthContext operates lazily, only loading
the actual {user, robot, token, etc} when requested. This allows registry operations that
only need to check if *some* entity is present to do so, without hitting the database.
"""
def __init__(self, kind, signed_data, v1_dict_format):
self.kind = kind
self.signed_data = signed_data
self.v1_dict_format = v1_dict_format
@property
def unique_key(self):
if self.v1_dict_format:
# Since V1 data format is verbose, just use the validated version to get the key.
return self._get_validated().unique_key
signed_dict = self.signed_data
return '%s-%s' % (signed_dict['entity_kind'], signed_dict.get('entity_reference', '(anon)'))
@classmethod
def build_from_signed_dict(cls, dict_data, v1_dict_format=False):
if not v1_dict_format:
entity_kind = ContextEntityKind(dict_data.get('entity_kind', 'anonymous'))
return SignedAuthContext(entity_kind, dict_data, v1_dict_format)
# Legacy handling.
# TODO: Remove this all once the new code is fully deployed.
kind_string = dict_data.get('kind', 'anonymous')
if kind_string == 'oauth':
kind_string = 'oauthtoken'
kind = ContextEntityKind(kind_string)
return SignedAuthContext(kind, dict_data, v1_dict_format)
@lru_cache(maxsize=1)
def _get_validated(self):
""" Returns a ValidatedAuthContext for this signed context, resolving all the necessary
references.
"""
if not self.v1_dict_format:
if self.kind == ContextEntityKind.anonymous:
return ValidatedAuthContext()
serialized_entity_reference = self.signed_data['entity_reference']
handler = CONTEXT_ENTITY_HANDLERS[self.kind]()
entity_reference = handler.deserialize_entity_reference(serialized_entity_reference)
if entity_reference is None:
logger.debug('Could not deserialize entity reference `%s` under kind `%s`',
serialized_entity_reference, self.kind)
return ValidatedAuthContext()
return ValidatedAuthContext(**{self.kind.value: entity_reference})
# Legacy handling.
# TODO: Remove this all once the new code is fully deployed.
kind_string = self.signed_data.get('kind', 'anonymous')
if kind_string == 'oauth':
kind_string = 'oauthtoken'
kind = ContextEntityKind(kind_string)
if kind == ContextEntityKind.anonymous:
return ValidatedAuthContext()
if kind == ContextEntityKind.user or kind == ContextEntityKind.robot:
user = model.user.get_user(self.signed_data.get('user', ''))
if not user:
return None
return ValidatedAuthContext(robot=user) if user.robot else ValidatedAuthContext(user=user)
if kind == ContextEntityKind.token:
token = model.token.load_token_data(self.signed_data.get('token'))
if not token:
return None
return ValidatedAuthContext(token=token)
if kind == ContextEntityKind.oauthtoken:
user = model.user.get_user(self.signed_data.get('user', ''))
if not user:
return None
token_uuid = self.signed_data.get('oauth', '')
oauthtoken = model.oauth.lookup_access_token_for_user(user, token_uuid)
if not oauthtoken:
return None
return ValidatedAuthContext(oauthtoken=oauthtoken)
raise Exception('Unknown auth context kind `%s` when deserializing %s' % (kind,
self.signed_data))
# End of legacy handling.
@property
def entity_kind(self):
""" Returns the kind of the entity in this auth context. """
return self.kind
@property
def is_anonymous(self):
""" Returns true if this is an anonymous context. """
return self.kind == ContextEntityKind.anonymous
@property
def authed_user(self):
""" Returns the authenticated user, whether directly, or via an OAuth or access token. Note that
this property will also return robot accounts.
"""
if self.kind == ContextEntityKind.anonymous:
return None
return self._get_validated().authed_user
@property
def authed_oauth_token(self):
if self.kind == ContextEntityKind.anonymous:
return None
return self._get_validated().authed_oauth_token
@property
def has_nonrobot_user(self):
""" Returns whether a user (not a robot) was authenticated successfully. """
if self.kind == ContextEntityKind.anonymous:
return False
return self._get_validated().has_nonrobot_user
@property
def identity(self):
""" Returns the identity for the auth context. """
return self._get_validated().identity
@property
def description(self):
""" Returns a human-readable and *public* description of the current auth context. """
return self._get_validated().description
@property
def credential_username(self):
""" Returns the username to create credentials for this context's entity, if any. """
return self._get_validated().credential_username
def analytics_id_and_public_metadata(self):
""" Returns the analytics ID and public log metadata for this auth context. """
return self._get_validated().analytics_id_and_public_metadata()
def apply_to_request_context(self):
""" Applies this auth result to the auth context and Flask-Principal. """
return self._get_validated().apply_to_request_context()
def to_signed_dict(self):
""" Serializes the auth context into a dictionary suitable for inclusion in a JWT or other
form of signed serialization.
"""
return self.signed_data

View file

@ -1,58 +0,0 @@
import logging
from base64 import b64decode
from flask import request
from auth.credentials import validate_credentials
from auth.validateresult import ValidateResult, AuthKind
logger = logging.getLogger(__name__)
def has_basic_auth(username):
""" Returns true if a basic auth header exists with a username and password pair that validates
against the internal authentication system. Returns True on full success and False on any
failure (missing header, invalid header, invalid credentials, etc).
"""
auth_header = request.headers.get('authorization', '')
result = validate_basic_auth(auth_header)
return result.has_nonrobot_user and result.context.user.username == username
def validate_basic_auth(auth_header):
""" Validates the specified basic auth header, returning whether its credentials point
to a valid user or token.
"""
if not auth_header:
return ValidateResult(AuthKind.basic, missing=True)
logger.debug('Attempt to process basic auth header')
# Parse the basic auth header.
assert isinstance(auth_header, basestring)
credentials, err = _parse_basic_auth_header(auth_header)
if err is not None:
logger.debug('Got invalid basic auth header: %s', auth_header)
return ValidateResult(AuthKind.basic, missing=True)
auth_username, auth_password_or_token = credentials
result, _ = validate_credentials(auth_username, auth_password_or_token)
return result.with_kind(AuthKind.basic)
def _parse_basic_auth_header(auth):
""" Parses the given basic auth header, returning the credentials found inside.
"""
normalized = [part.strip() for part in auth.split(' ') if part]
if normalized[0].lower() != 'basic' or len(normalized) != 2:
return None, 'Invalid basic auth header'
try:
credentials = [part.decode('utf-8') for part in b64decode(normalized[1]).split(':', 1)]
except (TypeError, UnicodeDecodeError, ValueError):
logger.exception('Exception when parsing basic auth header: %s', auth)
return None, 'Could not parse basic auth header'
if len(credentials) != 2:
return None, 'Unexpected number of credentials found in basic auth header'
return credentials, None

View file

@ -1,203 +0,0 @@
from abc import ABCMeta, abstractmethod
from six import add_metaclass
from enum import Enum
from data import model
from auth.credential_consts import (ACCESS_TOKEN_USERNAME, OAUTH_TOKEN_USERNAME,
APP_SPECIFIC_TOKEN_USERNAME)
class ContextEntityKind(Enum):
""" Defines the various kinds of entities in an auth context. Note that the string values of
these fields *must* match the names of the fields in the ValidatedAuthContext class, as
we fill them in directly based on the string names here.
"""
anonymous = 'anonymous'
user = 'user'
robot = 'robot'
token = 'token'
oauthtoken = 'oauthtoken'
appspecifictoken = 'appspecifictoken'
signed_data = 'signed_data'
@add_metaclass(ABCMeta)
class ContextEntityHandler(object):
"""
Interface that represents handling specific kinds of entities under an auth context.
"""
@abstractmethod
def credential_username(self, entity_reference):
""" Returns the username to create credentials for this entity, if any. """
pass
@abstractmethod
def get_serialized_entity_reference(self, entity_reference):
""" Returns the entity reference for this kind of auth context, serialized into a form that can
be placed into a JSON object and put into a JWT. This is typically a DB UUID or another
unique identifier for the object in the DB.
"""
pass
@abstractmethod
def deserialize_entity_reference(self, serialized_entity_reference):
""" Returns the deserialized reference to the entity in the database, or None if none. """
pass
@abstractmethod
def description(self, entity_reference):
""" Returns a human-readable and *public* description of the current entity. """
pass
@abstractmethod
def analytics_id_and_public_metadata(self, entity_reference):
""" Returns the analyitics ID and a dict of public metadata for the current entity. """
pass
class AnonymousEntityHandler(ContextEntityHandler):
def credential_username(self, entity_reference):
return None
def get_serialized_entity_reference(self, entity_reference):
return None
def deserialize_entity_reference(self, serialized_entity_reference):
return None
def description(self, entity_reference):
return "anonymous"
def analytics_id_and_public_metadata(self, entity_reference):
return "anonymous", {}
class UserEntityHandler(ContextEntityHandler):
def credential_username(self, entity_reference):
return entity_reference.username
def get_serialized_entity_reference(self, entity_reference):
return entity_reference.uuid
def deserialize_entity_reference(self, serialized_entity_reference):
return model.user.get_user_by_uuid(serialized_entity_reference)
def description(self, entity_reference):
return "user %s" % entity_reference.username
def analytics_id_and_public_metadata(self, entity_reference):
return entity_reference.username, {
'username': entity_reference.username,
}
class RobotEntityHandler(ContextEntityHandler):
def credential_username(self, entity_reference):
return entity_reference.username
def get_serialized_entity_reference(self, entity_reference):
return entity_reference.username
def deserialize_entity_reference(self, serialized_entity_reference):
return model.user.lookup_robot(serialized_entity_reference)
def description(self, entity_reference):
return "robot %s" % entity_reference.username
def analytics_id_and_public_metadata(self, entity_reference):
return entity_reference.username, {
'username': entity_reference.username,
'is_robot': True,
}
class TokenEntityHandler(ContextEntityHandler):
def credential_username(self, entity_reference):
return ACCESS_TOKEN_USERNAME
def get_serialized_entity_reference(self, entity_reference):
return entity_reference.get_code()
def deserialize_entity_reference(self, serialized_entity_reference):
return model.token.load_token_data(serialized_entity_reference)
def description(self, entity_reference):
return "token %s" % entity_reference.friendly_name
def analytics_id_and_public_metadata(self, entity_reference):
return 'token:%s' % entity_reference.id, {
'token': entity_reference.friendly_name,
}
class OAuthTokenEntityHandler(ContextEntityHandler):
def credential_username(self, entity_reference):
return OAUTH_TOKEN_USERNAME
def get_serialized_entity_reference(self, entity_reference):
return entity_reference.uuid
def deserialize_entity_reference(self, serialized_entity_reference):
return model.oauth.lookup_access_token_by_uuid(serialized_entity_reference)
def description(self, entity_reference):
return "oauthtoken for user %s" % entity_reference.authorized_user.username
def analytics_id_and_public_metadata(self, entity_reference):
return 'oauthtoken:%s' % entity_reference.id, {
'oauth_token_id': entity_reference.id,
'oauth_token_application_id': entity_reference.application.client_id,
'oauth_token_application': entity_reference.application.name,
'username': entity_reference.authorized_user.username,
}
class AppSpecificTokenEntityHandler(ContextEntityHandler):
def credential_username(self, entity_reference):
return APP_SPECIFIC_TOKEN_USERNAME
def get_serialized_entity_reference(self, entity_reference):
return entity_reference.uuid
def deserialize_entity_reference(self, serialized_entity_reference):
return model.appspecifictoken.get_token_by_uuid(serialized_entity_reference)
def description(self, entity_reference):
tpl = (entity_reference.title, entity_reference.user.username)
return "app specific token %s for user %s" % tpl
def analytics_id_and_public_metadata(self, entity_reference):
return 'appspecifictoken:%s' % entity_reference.id, {
'app_specific_token': entity_reference.uuid,
'app_specific_token_title': entity_reference.title,
'username': entity_reference.user.username,
}
class SignedDataEntityHandler(ContextEntityHandler):
def credential_username(self, entity_reference):
return None
def get_serialized_entity_reference(self, entity_reference):
raise NotImplementedError
def deserialize_entity_reference(self, serialized_entity_reference):
raise NotImplementedError
def description(self, entity_reference):
return "signed"
def analytics_id_and_public_metadata(self, entity_reference):
return 'signed', {'signed': entity_reference}
CONTEXT_ENTITY_HANDLERS = {
ContextEntityKind.anonymous: AnonymousEntityHandler,
ContextEntityKind.user: UserEntityHandler,
ContextEntityKind.robot: RobotEntityHandler,
ContextEntityKind.token: TokenEntityHandler,
ContextEntityKind.oauthtoken: OAuthTokenEntityHandler,
ContextEntityKind.appspecifictoken: AppSpecificTokenEntityHandler,
ContextEntityKind.signed_data: SignedDataEntityHandler,
}

View file

@ -1,37 +0,0 @@
import logging
from uuid import UUID
from flask_login import current_user
from auth.validateresult import AuthKind, ValidateResult
logger = logging.getLogger(__name__)
def validate_session_cookie(auth_header_unusued=None):
""" Attempts to load a user from a session cookie. """
if current_user.is_anonymous:
return ValidateResult(AuthKind.cookie, missing=True)
try:
# Attempt to parse the user uuid to make sure the cookie has the right value type
UUID(current_user.get_id())
except ValueError:
logger.debug('Got non-UUID for session cookie user: %s', current_user.get_id())
return ValidateResult(AuthKind.cookie, error_message='Invalid session cookie format')
logger.debug('Loading user from cookie: %s', current_user.get_id())
db_user = current_user.db_user()
if db_user is None:
return ValidateResult(AuthKind.cookie, error_message='Could not find matching user')
# Don't allow disabled users to login.
if not db_user.enabled:
logger.debug('User %s in session cookie is disabled', db_user.username)
return ValidateResult(AuthKind.cookie, error_message='User account is disabled')
# Don't allow organizations to "login".
if db_user.organization:
logger.debug('User %s in session cookie is in-fact organization', db_user.username)
return ValidateResult(AuthKind.cookie, error_message='Cannot login to organization')
return ValidateResult(AuthKind.cookie, user=db_user)

View file

@ -1,3 +0,0 @@
ACCESS_TOKEN_USERNAME = '$token'
OAUTH_TOKEN_USERNAME = '$oauthtoken'
APP_SPECIFIC_TOKEN_USERNAME = '$app'

View file

@ -1,85 +0,0 @@
import logging
from enum import Enum
import features
from app import authentication
from auth.oauth import validate_oauth_token
from auth.validateresult import ValidateResult, AuthKind
from auth.credential_consts import (ACCESS_TOKEN_USERNAME, OAUTH_TOKEN_USERNAME,
APP_SPECIFIC_TOKEN_USERNAME)
from data import model
from util.names import parse_robot_username
logger = logging.getLogger(__name__)
class CredentialKind(Enum):
user = 'user'
robot = 'robot'
token = ACCESS_TOKEN_USERNAME
oauth_token = OAUTH_TOKEN_USERNAME
app_specific_token = APP_SPECIFIC_TOKEN_USERNAME
def validate_credentials(auth_username, auth_password_or_token):
""" Validates a pair of auth username and password/token credentials. """
# Check for access tokens.
if auth_username == ACCESS_TOKEN_USERNAME:
logger.debug('Found credentials for access token')
try:
token = model.token.load_token_data(auth_password_or_token)
logger.debug('Successfully validated credentials for access token %s', token.id)
return ValidateResult(AuthKind.credentials, token=token), CredentialKind.token
except model.DataModelException:
logger.warning('Failed to validate credentials for access token %s', auth_password_or_token)
return (ValidateResult(AuthKind.credentials, error_message='Invalid access token'),
CredentialKind.token)
# Check for App Specific tokens.
if features.APP_SPECIFIC_TOKENS and auth_username == APP_SPECIFIC_TOKEN_USERNAME:
logger.debug('Found credentials for app specific auth token')
token = model.appspecifictoken.access_valid_token(auth_password_or_token)
if token is None:
logger.debug('Failed to validate credentials for app specific token: %s',
auth_password_or_token)
return (ValidateResult(AuthKind.credentials, error_message='Invalid token'),
CredentialKind.app_specific_token)
if not token.user.enabled:
logger.debug('Tried to use an app specific token for a disabled user: %s',
token.uuid)
return (ValidateResult(AuthKind.credentials,
error_message='This user has been disabled. Please contact your administrator.'),
CredentialKind.app_specific_token)
logger.debug('Successfully validated credentials for app specific token %s', token.id)
return (ValidateResult(AuthKind.credentials, appspecifictoken=token),
CredentialKind.app_specific_token)
# Check for OAuth tokens.
if auth_username == OAUTH_TOKEN_USERNAME:
return validate_oauth_token(auth_password_or_token), CredentialKind.oauth_token
# Check for robots and users.
is_robot = parse_robot_username(auth_username)
if is_robot:
logger.debug('Found credentials header for robot %s', auth_username)
try:
robot = model.user.verify_robot(auth_username, auth_password_or_token)
logger.debug('Successfully validated credentials for robot %s', auth_username)
return ValidateResult(AuthKind.credentials, robot=robot), CredentialKind.robot
except model.InvalidRobotException as ire:
logger.warning('Failed to validate credentials for robot %s: %s', auth_username, ire)
return ValidateResult(AuthKind.credentials, error_message=str(ire)), CredentialKind.robot
# Otherwise, treat as a standard user.
(authenticated, err) = authentication.verify_and_link_user(auth_username, auth_password_or_token,
basic_auth=True)
if authenticated:
logger.debug('Successfully validated credentials for user %s', authenticated.username)
return ValidateResult(AuthKind.credentials, user=authenticated), CredentialKind.user
else:
logger.warning('Failed to validate credentials for user %s: %s', auth_username, err)
return ValidateResult(AuthKind.credentials, error_message=err), CredentialKind.user

View file

@ -1,96 +0,0 @@
import logging
from functools import wraps
from flask import request, session
from app import metric_queue
from auth.basic import validate_basic_auth
from auth.oauth import validate_bearer_auth
from auth.cookie import validate_session_cookie
from auth.signedgrant import validate_signed_grant
from util.http import abort
logger = logging.getLogger(__name__)
def _auth_decorator(pass_result=False, handlers=None):
""" Builds an auth decorator that runs the given handlers and, if any return successfully,
sets up the auth context. The wrapped function will be invoked *regardless of success or
failure of the auth handler(s)*
"""
def processor(func):
@wraps(func)
def wrapper(*args, **kwargs):
auth_header = request.headers.get('authorization', '')
result = None
for handler in handlers:
result = handler(auth_header)
# If the handler was missing the necessary information, skip it and try the next one.
if result.missing:
continue
# Check for a valid result.
if result.auth_valid:
logger.debug('Found valid auth result: %s', result.tuple())
# Set the various pieces of the auth context.
result.apply_to_context()
# Log the metric.
metric_queue.authentication_count.Inc(labelvalues=[result.kind, True])
break
# Otherwise, report the error.
if result.error_message is not None:
# Log the failure.
metric_queue.authentication_count.Inc(labelvalues=[result.kind, False])
break
if pass_result:
kwargs['auth_result'] = result
return func(*args, **kwargs)
return wrapper
return processor
process_oauth = _auth_decorator(handlers=[validate_bearer_auth, validate_session_cookie])
process_auth = _auth_decorator(handlers=[validate_signed_grant, validate_basic_auth])
process_auth_or_cookie = _auth_decorator(handlers=[validate_basic_auth, validate_session_cookie])
process_basic_auth = _auth_decorator(handlers=[validate_basic_auth], pass_result=True)
process_basic_auth_no_pass = _auth_decorator(handlers=[validate_basic_auth])
def require_session_login(func):
""" Decorates a function and ensures that a valid session cookie exists or a 401 is raised. If
a valid session cookie does exist, the authenticated user and identity are also set.
"""
@wraps(func)
def wrapper(*args, **kwargs):
result = validate_session_cookie()
if result.has_nonrobot_user:
result.apply_to_context()
metric_queue.authentication_count.Inc(labelvalues=[result.kind, True])
return func(*args, **kwargs)
elif not result.missing:
metric_queue.authentication_count.Inc(labelvalues=[result.kind, False])
abort(401, message='Method requires login and no valid login could be loaded.')
return wrapper
def extract_namespace_repo_from_session(func):
""" Extracts the namespace and repository name from the current session (which must exist)
and passes them into the decorated function as the first and second arguments. If the
session doesn't exist or does not contain these arugments, a 400 error is raised.
"""
@wraps(func)
def wrapper(*args, **kwargs):
if 'namespace' not in session or 'repository' not in session:
logger.error('Unable to load namespace or repository from session: %s', session)
abort(400, message='Missing namespace in request')
return func(session['namespace'], session['repository'], *args, **kwargs)
return wrapper

View file

@ -1,48 +0,0 @@
import logging
from datetime import datetime
from auth.scopes import scopes_from_scope_string
from auth.validateresult import AuthKind, ValidateResult
from data import model
logger = logging.getLogger(__name__)
def validate_bearer_auth(auth_header):
""" Validates an OAuth token found inside a basic auth `Bearer` token, returning whether it
points to a valid OAuth token.
"""
if not auth_header:
return ValidateResult(AuthKind.oauth, missing=True)
normalized = [part.strip() for part in auth_header.split(' ') if part]
if normalized[0].lower() != 'bearer' or len(normalized) != 2:
logger.debug('Got invalid bearer token format: %s', auth_header)
return ValidateResult(AuthKind.oauth, missing=True)
(_, oauth_token) = normalized
return validate_oauth_token(oauth_token)
def validate_oauth_token(token):
""" Validates the specified OAuth token, returning whether it points to a valid OAuth token.
"""
validated = model.oauth.validate_access_token(token)
if not validated:
logger.warning('OAuth access token could not be validated: %s', token)
return ValidateResult(AuthKind.oauth,
error_message='OAuth access token could not be validated')
if validated.expires_at <= datetime.utcnow():
logger.warning('OAuth access with an expired token: %s', token)
return ValidateResult(AuthKind.oauth, error_message='OAuth access token has expired')
# Don't allow disabled users to login.
if not validated.authorized_user.enabled:
return ValidateResult(AuthKind.oauth,
error_message='Granter of the oauth access token is disabled')
# We have a valid token
scope_set = scopes_from_scope_string(validated.scope)
logger.debug('Successfully validated oauth access token with scope: %s', scope_set)
return ValidateResult(AuthKind.oauth, oauthtoken=validated)

View file

@ -66,10 +66,6 @@ def repository_write_grant(namespace, repository):
return _RepositoryNeed(namespace, repository, 'write') return _RepositoryNeed(namespace, repository, 'write')
def repository_admin_grant(namespace, repository):
return _RepositoryNeed(namespace, repository, 'admin')
class QuayDeferredPermissionUser(Identity): class QuayDeferredPermissionUser(Identity):
def __init__(self, uuid, auth_type, auth_scopes, user=None): def __init__(self, uuid, auth_type, auth_scopes, user=None):
super(QuayDeferredPermissionUser, self).__init__(uuid, auth_type) super(QuayDeferredPermissionUser, self).__init__(uuid, auth_type)

276
auth/process.py Normal file
View file

@ -0,0 +1,276 @@
import logging
from functools import wraps
from uuid import UUID
from datetime import datetime
from base64 import b64decode
from flask import request, session
from flask.sessions import SecureCookieSessionInterface, BadSignature
from flask_login import current_user
from flask_principal import identity_changed, Identity
import scopes
from app import app, authentication
from auth_context import (set_authenticated_user, set_validated_token, set_grant_context,
set_validated_oauth_token)
from data import model
from endpoints.exception import InvalidToken, ExpiredToken
from permissions import QuayDeferredPermissionUser
from util.http import abort
logger = logging.getLogger(__name__)
SIGNATURE_PREFIX = 'sigv2='
def _load_user_from_cookie():
if not current_user.is_anonymous:
try:
# Attempt to parse the user uuid to make sure the cookie has the right value type
UUID(current_user.get_id())
except ValueError:
return None
logger.debug('Loading user from cookie: %s', current_user.get_id())
db_user = current_user.db_user()
if db_user is not None:
# Don't allow disabled users to login.
if not db_user.enabled:
return None
set_authenticated_user(db_user)
loaded = QuayDeferredPermissionUser.for_user(db_user)
identity_changed.send(app, identity=loaded)
return db_user
return None
def _validate_and_apply_oauth_token(token):
validated = model.oauth.validate_access_token(token)
if not validated:
logger.warning('OAuth access token could not be validated: %s', token)
raise InvalidToken('OAuth access token could not be validated: {token}'.format(token=token))
elif validated.expires_at <= datetime.utcnow():
logger.info('OAuth access with an expired token: %s', token)
raise ExpiredToken('OAuth access token has expired: {token}'.format(token=token))
# Don't allow disabled users to login.
if not validated.authorized_user.enabled:
return None
# We have a valid token
scope_set = scopes.scopes_from_scope_string(validated.scope)
logger.debug('Successfully validated oauth access token: %s with scope: %s', token,
scope_set)
set_authenticated_user(validated.authorized_user)
set_validated_oauth_token(validated)
new_identity = QuayDeferredPermissionUser.for_user(validated.authorized_user, scope_set)
identity_changed.send(app, identity=new_identity)
def _parse_basic_auth_header(auth):
normalized = [part.strip() for part in auth.split(' ') if part]
if normalized[0].lower() != 'basic' or len(normalized) != 2:
logger.debug('Invalid basic auth format.')
return None
logger.debug('Found basic auth header: %s', auth)
try:
credentials = [part.decode('utf-8') for part in b64decode(normalized[1]).split(':', 1)]
except TypeError:
logger.exception('Exception when parsing basic auth header')
return None
if len(credentials) != 2:
logger.debug('Invalid basic auth credential format.')
return None
return credentials
def _process_basic_auth(auth):
credentials = _parse_basic_auth_header(auth)
if credentials is None:
return
if credentials[0] == '$token':
# Use as token auth
try:
token = model.token.load_token_data(credentials[1])
logger.debug('Successfully validated token: %s', credentials[1])
set_validated_token(token)
identity_changed.send(app, identity=Identity(token.code, 'token'))
return
except model.DataModelException:
logger.debug('Invalid token: %s', credentials[1])
elif credentials[0] == '$oauthtoken':
oauth_token = credentials[1]
_validate_and_apply_oauth_token(oauth_token)
elif '+' in credentials[0]:
logger.debug('Trying robot auth with credentials %s', str(credentials))
# Use as robot auth
try:
robot = model.user.verify_robot(credentials[0], credentials[1])
logger.debug('Successfully validated robot: %s', credentials[0])
set_authenticated_user(robot)
deferred_robot = QuayDeferredPermissionUser.for_user(robot)
identity_changed.send(app, identity=deferred_robot)
return
except model.InvalidRobotException:
logger.debug('Invalid robot or password for robot: %s', credentials[0])
else:
(authenticated, _) = authentication.verify_and_link_user(credentials[0], credentials[1],
basic_auth=True)
if authenticated:
logger.debug('Successfully validated user: %s', authenticated.username)
set_authenticated_user(authenticated)
new_identity = QuayDeferredPermissionUser.for_user(authenticated)
identity_changed.send(app, identity=new_identity)
return
# We weren't able to authenticate via basic auth.
logger.debug('Basic auth present but could not be validated.')
def has_basic_auth(username):
auth = request.headers.get('authorization', '')
if not auth:
return False
credentials = _parse_basic_auth_header(auth)
if not credentials:
return False
(authenticated, _) = authentication.verify_and_link_user(credentials[0], credentials[1],
basic_auth=True)
if not authenticated:
return False
return authenticated.username == username
def generate_signed_token(grants, user_context):
ser = SecureCookieSessionInterface().get_signing_serializer(app)
data_to_sign = {
'grants': grants,
'user_context': user_context,
}
encrypted = ser.dumps(data_to_sign)
return '{0}{1}'.format(SIGNATURE_PREFIX, encrypted)
def _process_signed_grant(auth):
normalized = [part.strip() for part in auth.split(' ') if part]
if normalized[0].lower() != 'token' or len(normalized) != 2:
logger.debug('Not a token: %s', auth)
return
if not normalized[1].startswith(SIGNATURE_PREFIX):
logger.debug('Not a signed grant token: %s', auth)
return
encrypted = normalized[1][len(SIGNATURE_PREFIX):]
ser = SecureCookieSessionInterface().get_signing_serializer(app)
try:
token_data = ser.loads(encrypted, max_age=app.config['SIGNED_GRANT_EXPIRATION_SEC'])
except BadSignature:
logger.warning('Signed grant could not be validated: %s', encrypted)
abort(401, message='Signed grant could not be validated: %(auth)s', issue='invalid-auth-token',
auth=auth)
logger.debug('Successfully validated signed grant with data: %s', token_data)
loaded_identity = Identity(None, 'signed_grant')
if token_data['user_context']:
set_grant_context({
'user': token_data['user_context'],
'kind': 'user',
})
loaded_identity.provides.update(token_data['grants'])
identity_changed.send(app, identity=loaded_identity)
def process_oauth(func):
@wraps(func)
def wrapper(*args, **kwargs):
auth = request.headers.get('authorization', '')
if auth:
normalized = [part.strip() for part in auth.split(' ') if part]
if normalized[0].lower() != 'bearer' or len(normalized) != 2:
logger.debug('Invalid oauth bearer token format.')
return func(*args, **kwargs)
token = normalized[1]
_validate_and_apply_oauth_token(token)
elif _load_user_from_cookie() is None:
logger.debug('No auth header or login cookie.')
return func(*args, **kwargs)
return wrapper
def process_auth(func):
@wraps(func)
def wrapper(*args, **kwargs):
auth = request.headers.get('authorization', '')
if auth:
logger.debug('Validating auth header: %s', auth)
_process_signed_grant(auth)
_process_basic_auth(auth)
else:
logger.debug('No auth header.')
return func(*args, **kwargs)
return wrapper
def process_auth_or_cookie(func):
@wraps(func)
def wrapper(*args, **kwargs):
auth = request.headers.get('authorization', '')
if auth:
logger.debug('Validating auth header: %s', auth)
_process_basic_auth(auth)
else:
logger.debug('No auth header.')
_load_user_from_cookie()
return func(*args, **kwargs)
return wrapper
def require_session_login(func):
@wraps(func)
def wrapper(*args, **kwargs):
loaded = _load_user_from_cookie()
if loaded is None or loaded.organization:
abort(401, message='Method requires login and no valid login could be loaded.')
return func(*args, **kwargs)
return wrapper
def extract_namespace_repo_from_session(func):
@wraps(func)
def wrapper(*args, **kwargs):
if 'namespace' not in session or 'repository' not in session:
logger.error('Unable to load namespace or repository from session: %s', session)
abort(400, message='Missing namespace in request')
return func(session['namespace'], session['repository'], *args, **kwargs)
return wrapper

View file

@ -6,18 +6,19 @@ from jsonschema import validate, ValidationError
from flask import request, url_for from flask import request, url_for
from flask_principal import identity_changed, Identity from flask_principal import identity_changed, Identity
from app import app, get_app_url, instance_keys, metric_queue from app import app, get_app_url, instance_keys
from auth.auth_context import set_authenticated_context from .auth_context import set_grant_context, get_grant_context
from auth.auth_context_type import SignedAuthContext from .permissions import repository_read_grant, repository_write_grant
from auth.permissions import repository_read_grant, repository_write_grant, repository_admin_grant
from util.http import abort
from util.names import parse_namespace_repository from util.names import parse_namespace_repository
from util.http import abort
from util.security.registry_jwt import (ANONYMOUS_SUB, decode_bearer_header, from util.security.registry_jwt import (ANONYMOUS_SUB, decode_bearer_header,
InvalidBearerTokenException) InvalidBearerTokenException)
from data import model
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
CONTEXT_KINDS = ['user', 'token', 'oauth']
ACCESS_SCHEMA = { ACCESS_SCHEMA = {
'type': 'array', 'type': 'array',
@ -49,7 +50,6 @@ ACCESS_SCHEMA = {
'enum': [ 'enum': [
'push', 'push',
'pull', 'pull',
'*',
], ],
}, },
}, },
@ -62,6 +62,63 @@ class InvalidJWTException(Exception):
pass pass
class GrantedEntity(object):
def __init__(self, user=None, token=None, oauth=None):
self.user = user
self.token = token
self.oauth = oauth
def get_granted_entity():
""" Returns the entity granted in the current context, if any. Returns the GrantedEntity or None
if none.
"""
context = get_grant_context()
if not context:
return None
kind = context.get('kind', 'anonymous')
if not kind in CONTEXT_KINDS:
return None
if kind == 'user':
user = model.user.get_user(context.get('user', ''))
if not user:
return None
return GrantedEntity(user=user)
if kind == 'token':
token = model.token.load_token_data(context.get('token'))
if not token:
return None
return GrantedEntity(token=token)
if kind == 'oauth':
user = model.user.get_user(context.get('user', ''))
if not user:
return None
oauthtoken = model.oauth.lookup_access_token_for_user(user, context.get('oauth', ''))
if not oauthtoken:
return None
return GrantedEntity(oauth=oauthtoken, user=user)
return None
def get_granted_username():
""" Returns the username inside the grant, if any. """
granted = get_granted_entity()
if not granted or not granted.user:
return None
return granted.user.username
def get_auth_headers(repository=None, scopes=None): def get_auth_headers(repository=None, scopes=None):
""" Returns a dictionary of headers for auth responses. """ """ Returns a dictionary of headers for auth responses. """
headers = {} headers = {}
@ -70,11 +127,9 @@ def get_auth_headers(repository=None, scopes=None):
realm_auth_path, realm_auth_path,
app.config['SERVER_HOSTNAME']) app.config['SERVER_HOSTNAME'])
if repository: if repository:
scopes_string = "repository:{0}".format(repository) authenticate += ',scope=repository:{0}'.format(repository)
if scopes: if scopes:
scopes_string += ':' + ','.join(scopes) authenticate += ':' + ','.join(scopes)
authenticate += ',scope="{0}"'.format(scopes_string)
headers['WWW-Authenticate'] = authenticate headers['WWW-Authenticate'] = authenticate
headers['Docker-Distribution-API-Version'] = 'registry/2.0' headers['Docker-Distribution-API-Version'] = 'registry/2.0'
@ -89,8 +144,7 @@ def identity_from_bearer_token(bearer_header):
logger.debug('Validating auth header: %s', bearer_header) logger.debug('Validating auth header: %s', bearer_header)
try: try:
payload = decode_bearer_header(bearer_header, instance_keys, app.config, payload = decode_bearer_header(bearer_header, instance_keys, app.config)
metric_queue=metric_queue)
except InvalidBearerTokenException as bte: except InvalidBearerTokenException as bte:
logger.exception('Invalid bearer token: %s', bte) logger.exception('Invalid bearer token: %s', bte)
raise InvalidJWTException(bte) raise InvalidJWTException(bte)
@ -109,9 +163,7 @@ def identity_from_bearer_token(bearer_header):
for grant in payload['access']: for grant in payload['access']:
namespace, repo_name = parse_namespace_repository(grant['name'], lib_namespace) namespace, repo_name = parse_namespace_repository(grant['name'], lib_namespace)
if '*' in grant['actions']: if 'push' in grant['actions']:
loaded_identity.provides.add(repository_admin_grant(namespace, repo_name))
elif 'push' in grant['actions']:
loaded_identity.provides.add(repository_write_grant(namespace, repo_name)) loaded_identity.provides.add(repository_write_grant(namespace, repo_name))
elif 'pull' in grant['actions']: elif 'pull' in grant['actions']:
loaded_identity.provides.add(repository_read_grant(namespace, repo_name)) loaded_identity.provides.add(repository_read_grant(namespace, repo_name))
@ -130,9 +182,6 @@ def identity_from_bearer_token(bearer_header):
def process_registry_jwt_auth(scopes=None): def process_registry_jwt_auth(scopes=None):
""" Processes the registry JWT auth token found in the authorization header. If none found,
no error is returned. If an invalid token is found, raises a 401.
"""
def inner(func): def inner(func):
@wraps(func) @wraps(func)
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
@ -140,15 +189,10 @@ def process_registry_jwt_auth(scopes=None):
auth = request.headers.get('authorization', '').strip() auth = request.headers.get('authorization', '').strip()
if auth: if auth:
try: try:
extracted_identity, context_dict = identity_from_bearer_token(auth) extracted_identity, context = identity_from_bearer_token(auth)
identity_changed.send(app, identity=extracted_identity) identity_changed.send(app, identity=extracted_identity)
set_grant_context(context)
logger.debug('Identity changed to %s', extracted_identity.id) logger.debug('Identity changed to %s', extracted_identity.id)
auth_context = SignedAuthContext.build_from_signed_dict(context_dict)
if auth_context is not None:
logger.debug('Auth context set to %s', auth_context.signed_data)
set_authenticated_context(auth_context)
except InvalidJWTException as ije: except InvalidJWTException as ije:
repository = None repository = None
if 'namespace_name' in kwargs and 'repo_name' in kwargs: if 'namespace_name' in kwargs and 'repo_name' in kwargs:

View file

@ -96,10 +96,11 @@ IMPLIED_SCOPES = {
def app_scopes(app_config): def app_scopes(app_config):
scopes_from_config = dict(ALL_SCOPES)
if not app_config.get('FEATURE_SUPER_USERS', False): if not app_config.get('FEATURE_SUPER_USERS', False):
scopes_from_config = dict(ALL_SCOPES)
del scopes_from_config[SUPERUSER.scope] del scopes_from_config[SUPERUSER.scope]
return scopes_from_config return scopes_from_config
return ALL_SCOPES
def scopes_from_scope_string(scopes): def scopes_from_scope_string(scopes):

View file

@ -1,55 +0,0 @@
import logging
from flask.sessions import SecureCookieSessionInterface, BadSignature
from app import app
from auth.validateresult import AuthKind, ValidateResult
logger = logging.getLogger(__name__)
# The prefix for all signatures of signed granted.
SIGNATURE_PREFIX = 'sigv2='
def generate_signed_token(grants, user_context):
""" Generates a signed session token with the given grants and user context. """
ser = SecureCookieSessionInterface().get_signing_serializer(app)
data_to_sign = {
'grants': grants,
'user_context': user_context,
}
encrypted = ser.dumps(data_to_sign)
return '{0}{1}'.format(SIGNATURE_PREFIX, encrypted)
def validate_signed_grant(auth_header):
""" Validates a signed grant as found inside an auth header and returns whether it points to
a valid grant.
"""
if not auth_header:
return ValidateResult(AuthKind.signed_grant, missing=True)
# Try to parse the token from the header.
normalized = [part.strip() for part in auth_header.split(' ') if part]
if normalized[0].lower() != 'token' or len(normalized) != 2:
logger.debug('Not a token: %s', auth_header)
return ValidateResult(AuthKind.signed_grant, missing=True)
# Check that it starts with the expected prefix.
if not normalized[1].startswith(SIGNATURE_PREFIX):
logger.debug('Not a signed grant token: %s', auth_header)
return ValidateResult(AuthKind.signed_grant, missing=True)
# Decrypt the grant.
encrypted = normalized[1][len(SIGNATURE_PREFIX):]
ser = SecureCookieSessionInterface().get_signing_serializer(app)
try:
token_data = ser.loads(encrypted, max_age=app.config['SIGNED_GRANT_EXPIRATION_SEC'])
except BadSignature:
logger.warning('Signed grant could not be validated: %s', encrypted)
return ValidateResult(AuthKind.signed_grant,
error_message='Signed grant could not be validated')
logger.debug('Successfully validated signed grant with data: %s', token_data)
return ValidateResult(AuthKind.signed_grant, signed_data=token_data)

View file

@ -1,51 +0,0 @@
import pytest
from auth.auth_context_type import SignedAuthContext, ValidatedAuthContext, ContextEntityKind
from data import model, database
from test.fixtures import *
def get_oauth_token(_):
return database.OAuthAccessToken.get()
@pytest.mark.parametrize('kind, entity_reference, loader', [
(ContextEntityKind.anonymous, None, None),
(ContextEntityKind.appspecifictoken, '%s%s' % ('a' * 60, 'b' * 60),
model.appspecifictoken.access_valid_token),
(ContextEntityKind.oauthtoken, None, get_oauth_token),
(ContextEntityKind.robot, 'devtable+dtrobot', model.user.lookup_robot),
(ContextEntityKind.user, 'devtable', model.user.get_user),
])
@pytest.mark.parametrize('v1_dict_format', [
(True),
(False),
])
def test_signed_auth_context(kind, entity_reference, loader, v1_dict_format, initialized_db):
if kind == ContextEntityKind.anonymous:
validated = ValidatedAuthContext()
assert validated.is_anonymous
else:
ref = loader(entity_reference)
validated = ValidatedAuthContext(**{kind.value: ref})
assert not validated.is_anonymous
assert validated.entity_kind == kind
assert validated.unique_key
signed = SignedAuthContext.build_from_signed_dict(validated.to_signed_dict(),
v1_dict_format=v1_dict_format)
if not v1_dict_format:
# Under legacy V1 format, we don't track the app specific token, merely its associated user.
assert signed.entity_kind == kind
assert signed.description == validated.description
assert signed.credential_username == validated.credential_username
assert signed.analytics_id_and_public_metadata() == validated.analytics_id_and_public_metadata()
assert signed.unique_key == validated.unique_key
assert signed.is_anonymous == validated.is_anonymous
assert signed.authed_user == validated.authed_user
assert signed.has_nonrobot_user == validated.has_nonrobot_user
assert signed.to_signed_dict() == validated.to_signed_dict()

View file

@ -1,98 +0,0 @@
# -*- coding: utf-8 -*-
import pytest
from base64 import b64encode
from auth.basic import validate_basic_auth
from auth.credentials import (ACCESS_TOKEN_USERNAME, OAUTH_TOKEN_USERNAME,
APP_SPECIFIC_TOKEN_USERNAME)
from auth.validateresult import AuthKind, ValidateResult
from data import model
from test.fixtures import *
def _token(username, password):
assert isinstance(username, basestring)
assert isinstance(password, basestring)
return 'basic ' + b64encode('%s:%s' % (username, password))
@pytest.mark.parametrize('token, expected_result', [
('', ValidateResult(AuthKind.basic, missing=True)),
('someinvalidtoken', ValidateResult(AuthKind.basic, missing=True)),
('somefoobartoken', ValidateResult(AuthKind.basic, missing=True)),
('basic ', ValidateResult(AuthKind.basic, missing=True)),
('basic some token', ValidateResult(AuthKind.basic, missing=True)),
('basic sometoken', ValidateResult(AuthKind.basic, missing=True)),
(_token(APP_SPECIFIC_TOKEN_USERNAME, 'invalid'), ValidateResult(AuthKind.basic,
error_message='Invalid token')),
(_token(ACCESS_TOKEN_USERNAME, 'invalid'), ValidateResult(AuthKind.basic,
error_message='Invalid access token')),
(_token(OAUTH_TOKEN_USERNAME, 'invalid'),
ValidateResult(AuthKind.basic, error_message='OAuth access token could not be validated')),
(_token('devtable', 'invalid'), ValidateResult(AuthKind.basic,
error_message='Invalid Username or Password')),
(_token('devtable+somebot', 'invalid'), ValidateResult(
AuthKind.basic, error_message='Could not find robot with username: devtable+somebot')),
(_token('disabled', 'password'), ValidateResult(
AuthKind.basic,
error_message='This user has been disabled. Please contact your administrator.')),])
def test_validate_basic_auth_token(token, expected_result, app):
result = validate_basic_auth(token)
assert result == expected_result
def test_valid_user(app):
token = _token('devtable', 'password')
result = validate_basic_auth(token)
assert result == ValidateResult(AuthKind.basic, user=model.user.get_user('devtable'))
def test_valid_robot(app):
robot, password = model.user.create_robot('somerobot', model.user.get_user('devtable'))
token = _token(robot.username, password)
result = validate_basic_auth(token)
assert result == ValidateResult(AuthKind.basic, robot=robot)
def test_valid_token(app):
access_token = model.token.create_delegate_token('devtable', 'simple', 'sometoken')
token = _token(ACCESS_TOKEN_USERNAME, access_token.get_code())
result = validate_basic_auth(token)
assert result == ValidateResult(AuthKind.basic, token=access_token)
def test_valid_oauth(app):
user = model.user.get_user('devtable')
app = model.oauth.list_applications_for_org(model.user.get_user_or_org('buynlarge'))[0]
oauth_token, code = model.oauth.create_access_token_for_testing(user, app.client_id, 'repo:read')
token = _token(OAUTH_TOKEN_USERNAME, code)
result = validate_basic_auth(token)
assert result == ValidateResult(AuthKind.basic, oauthtoken=oauth_token)
def test_valid_app_specific_token(app):
user = model.user.get_user('devtable')
app_specific_token = model.appspecifictoken.create_token(user, 'some token')
full_token = model.appspecifictoken.get_full_token_string(app_specific_token)
token = _token(APP_SPECIFIC_TOKEN_USERNAME, full_token)
result = validate_basic_auth(token)
assert result == ValidateResult(AuthKind.basic, appspecifictoken=app_specific_token)
def test_invalid_unicode(app):
token = '\xebOH'
header = 'basic ' + b64encode(token)
result = validate_basic_auth(header)
assert result == ValidateResult(AuthKind.basic, missing=True)
def test_invalid_unicode_2(app):
token = '“4JPCOLIVMAY32Q3XGVPHC4CBF8SKII5FWNYMASOFDIVSXTC5I5NBU”'
header = 'basic ' + b64encode('devtable+somerobot:%s' % token)
result = validate_basic_auth(header)
assert result == ValidateResult(
AuthKind.basic,
error_message='Could not find robot with username: devtable+somerobot and supplied password.')

View file

@ -1,66 +0,0 @@
import uuid
from flask_login import login_user
from app import LoginWrappedDBUser
from data import model
from auth.cookie import validate_session_cookie
from test.fixtures import *
def test_anonymous_cookie(app):
assert validate_session_cookie().missing
def test_invalidformatted_cookie(app):
# "Login" with a non-UUID reference.
someuser = model.user.get_user('devtable')
login_user(LoginWrappedDBUser('somenonuuid', someuser))
# Ensure we get an invalid session cookie format error.
result = validate_session_cookie()
assert result.authed_user is None
assert result.context.identity is None
assert not result.has_nonrobot_user
assert result.error_message == 'Invalid session cookie format'
def test_disabled_user(app):
# "Login" with a disabled user.
someuser = model.user.get_user('disabled')
login_user(LoginWrappedDBUser(someuser.uuid, someuser))
# Ensure we get an invalid session cookie format error.
result = validate_session_cookie()
assert result.authed_user is None
assert result.context.identity is None
assert not result.has_nonrobot_user
assert result.error_message == 'User account is disabled'
def test_valid_user(app):
# Login with a valid user.
someuser = model.user.get_user('devtable')
login_user(LoginWrappedDBUser(someuser.uuid, someuser))
result = validate_session_cookie()
assert result.authed_user == someuser
assert result.context.identity is not None
assert result.has_nonrobot_user
assert result.error_message is None
def test_valid_organization(app):
# "Login" with a valid organization.
someorg = model.user.get_namespace_user('buynlarge')
someorg.uuid = str(uuid.uuid4())
someorg.verified = True
someorg.save()
login_user(LoginWrappedDBUser(someorg.uuid, someorg))
result = validate_session_cookie()
assert result.authed_user is None
assert result.context.identity is None
assert not result.has_nonrobot_user
assert result.error_message == 'Cannot login to organization'

View file

@ -1,147 +0,0 @@
# -*- coding: utf-8 -*-
from auth.credentials import validate_credentials, CredentialKind
from auth.credential_consts import (ACCESS_TOKEN_USERNAME, OAUTH_TOKEN_USERNAME,
APP_SPECIFIC_TOKEN_USERNAME)
from auth.validateresult import AuthKind, ValidateResult
from data import model
from test.fixtures import *
def test_valid_user(app):
result, kind = validate_credentials('devtable', 'password')
assert kind == CredentialKind.user
assert result == ValidateResult(AuthKind.credentials, user=model.user.get_user('devtable'))
def test_valid_robot(app):
robot, password = model.user.create_robot('somerobot', model.user.get_user('devtable'))
result, kind = validate_credentials(robot.username, password)
assert kind == CredentialKind.robot
assert result == ValidateResult(AuthKind.credentials, robot=robot)
def test_valid_robot_for_disabled_user(app):
user = model.user.get_user('devtable')
user.enabled = False
user.save()
robot, password = model.user.create_robot('somerobot', user)
result, kind = validate_credentials(robot.username, password)
assert kind == CredentialKind.robot
err = 'This user has been disabled. Please contact your administrator.'
assert result == ValidateResult(AuthKind.credentials, error_message=err)
def test_valid_token(app):
access_token = model.token.create_delegate_token('devtable', 'simple', 'sometoken')
result, kind = validate_credentials(ACCESS_TOKEN_USERNAME, access_token.get_code())
assert kind == CredentialKind.token
assert result == ValidateResult(AuthKind.credentials, token=access_token)
def test_valid_oauth(app):
user = model.user.get_user('devtable')
app = model.oauth.list_applications_for_org(model.user.get_user_or_org('buynlarge'))[0]
oauth_token, code = model.oauth.create_access_token_for_testing(user, app.client_id, 'repo:read')
result, kind = validate_credentials(OAUTH_TOKEN_USERNAME, code)
assert kind == CredentialKind.oauth_token
assert result == ValidateResult(AuthKind.oauth, oauthtoken=oauth_token)
def test_invalid_user(app):
result, kind = validate_credentials('devtable', 'somepassword')
assert kind == CredentialKind.user
assert result == ValidateResult(AuthKind.credentials,
error_message='Invalid Username or Password')
def test_valid_app_specific_token(app):
user = model.user.get_user('devtable')
app_specific_token = model.appspecifictoken.create_token(user, 'some token')
full_token = model.appspecifictoken.get_full_token_string(app_specific_token)
result, kind = validate_credentials(APP_SPECIFIC_TOKEN_USERNAME, full_token)
assert kind == CredentialKind.app_specific_token
assert result == ValidateResult(AuthKind.credentials, appspecifictoken=app_specific_token)
def test_valid_app_specific_token_for_disabled_user(app):
user = model.user.get_user('devtable')
user.enabled = False
user.save()
app_specific_token = model.appspecifictoken.create_token(user, 'some token')
full_token = model.appspecifictoken.get_full_token_string(app_specific_token)
result, kind = validate_credentials(APP_SPECIFIC_TOKEN_USERNAME, full_token)
assert kind == CredentialKind.app_specific_token
err = 'This user has been disabled. Please contact your administrator.'
assert result == ValidateResult(AuthKind.credentials, error_message=err)
def test_invalid_app_specific_token(app):
result, kind = validate_credentials(APP_SPECIFIC_TOKEN_USERNAME, 'somecode')
assert kind == CredentialKind.app_specific_token
assert result == ValidateResult(AuthKind.credentials, error_message='Invalid token')
def test_invalid_app_specific_token_code(app):
user = model.user.get_user('devtable')
app_specific_token = model.appspecifictoken.create_token(user, 'some token')
full_token = app_specific_token.token_name + 'something'
result, kind = validate_credentials(APP_SPECIFIC_TOKEN_USERNAME, full_token)
assert kind == CredentialKind.app_specific_token
assert result == ValidateResult(AuthKind.credentials, error_message='Invalid token')
def test_unicode(app):
result, kind = validate_credentials('someusername', 'some₪code')
assert kind == CredentialKind.user
assert not result.auth_valid
assert result == ValidateResult(AuthKind.credentials,
error_message='Invalid Username or Password')
def test_unicode_robot(app):
robot, _ = model.user.create_robot('somerobot', model.user.get_user('devtable'))
result, kind = validate_credentials(robot.username, 'some₪code')
assert kind == CredentialKind.robot
assert not result.auth_valid
msg = 'Could not find robot with username: devtable+somerobot and supplied password.'
assert result == ValidateResult(AuthKind.credentials, error_message=msg)
def test_invalid_user(app):
result, kind = validate_credentials('someinvaliduser', 'password')
assert kind == CredentialKind.user
assert not result.authed_user
assert not result.auth_valid
def test_invalid_user_password(app):
result, kind = validate_credentials('devtable', 'somepassword')
assert kind == CredentialKind.user
assert not result.authed_user
assert not result.auth_valid
def test_invalid_robot(app):
result, kind = validate_credentials('devtable+doesnotexist', 'password')
assert kind == CredentialKind.robot
assert not result.authed_user
assert not result.auth_valid
def test_invalid_robot_token(app):
robot, _ = model.user.create_robot('somerobot', model.user.get_user('devtable'))
result, kind = validate_credentials(robot.username, 'invalidpassword')
assert kind == CredentialKind.robot
assert not result.authed_user
assert not result.auth_valid
def test_invalid_unicode_robot(app):
token = '“4JPCOLIVMAY32Q3XGVPHC4CBF8SKII5FWNYMASOFDIVSXTC5I5NBU”'
result, kind = validate_credentials('devtable+somerobot', token)
assert kind == CredentialKind.robot
assert not result.auth_valid
msg = 'Could not find robot with username: devtable+somerobot'
assert result == ValidateResult(AuthKind.credentials, error_message=msg)
def test_invalid_unicode_robot_2(app):
user = model.user.get_user('devtable')
robot, password = model.user.create_robot('somerobot', user)
token = '“4JPCOLIVMAY32Q3XGVPHC4CBF8SKII5FWNYMASOFDIVSXTC5I5NBU”'
result, kind = validate_credentials('devtable+somerobot', token)
assert kind == CredentialKind.robot
assert not result.auth_valid
msg = 'Could not find robot with username: devtable+somerobot and supplied password.'
assert result == ValidateResult(AuthKind.credentials, error_message=msg)

View file

@ -1,105 +0,0 @@
import pytest
from flask import session
from flask_login import login_user
from werkzeug.exceptions import HTTPException
from app import LoginWrappedDBUser
from auth.auth_context import get_authenticated_user
from auth.decorators import (
extract_namespace_repo_from_session, require_session_login, process_auth_or_cookie)
from data import model
from test.fixtures import *
def test_extract_namespace_repo_from_session_missing(app):
def emptyfunc():
pass
session.clear()
with pytest.raises(HTTPException):
extract_namespace_repo_from_session(emptyfunc)()
def test_extract_namespace_repo_from_session_present(app):
encountered = []
def somefunc(namespace, repository):
encountered.append(namespace)
encountered.append(repository)
# Add the namespace and repository to the session.
session.clear()
session['namespace'] = 'foo'
session['repository'] = 'bar'
# Call the decorated method.
extract_namespace_repo_from_session(somefunc)()
assert encountered[0] == 'foo'
assert encountered[1] == 'bar'
def test_require_session_login_missing(app):
def emptyfunc():
pass
with pytest.raises(HTTPException):
require_session_login(emptyfunc)()
def test_require_session_login_valid_user(app):
def emptyfunc():
pass
# Login as a valid user.
someuser = model.user.get_user('devtable')
login_user(LoginWrappedDBUser(someuser.uuid, someuser))
# Call the function.
require_session_login(emptyfunc)()
# Ensure the authenticated user was updated.
assert get_authenticated_user() == someuser
def test_require_session_login_invalid_user(app):
def emptyfunc():
pass
# "Login" as a disabled user.
someuser = model.user.get_user('disabled')
login_user(LoginWrappedDBUser(someuser.uuid, someuser))
# Call the function.
with pytest.raises(HTTPException):
require_session_login(emptyfunc)()
# Ensure the authenticated user was not updated.
assert get_authenticated_user() is None
def test_process_auth_or_cookie_invalid_user(app):
def emptyfunc():
pass
# Call the function.
process_auth_or_cookie(emptyfunc)()
# Ensure the authenticated user was not updated.
assert get_authenticated_user() is None
def test_process_auth_or_cookie_valid_user(app):
def emptyfunc():
pass
# Login as a valid user.
someuser = model.user.get_user('devtable')
login_user(LoginWrappedDBUser(someuser.uuid, someuser))
# Call the function.
process_auth_or_cookie(emptyfunc)()
# Ensure the authenticated user was updated.
assert get_authenticated_user() == someuser

View file

@ -1,55 +0,0 @@
import pytest
from auth.oauth import validate_bearer_auth, validate_oauth_token
from auth.validateresult import AuthKind, ValidateResult
from data import model
from test.fixtures import *
@pytest.mark.parametrize('header, expected_result', [
('', ValidateResult(AuthKind.oauth, missing=True)),
('somerandomtoken', ValidateResult(AuthKind.oauth, missing=True)),
('bearer some random token', ValidateResult(AuthKind.oauth, missing=True)),
('bearer invalidtoken',
ValidateResult(AuthKind.oauth, error_message='OAuth access token could not be validated')),])
def test_bearer(header, expected_result, app):
assert validate_bearer_auth(header) == expected_result
def test_valid_oauth(app):
user = model.user.get_user('devtable')
app = model.oauth.list_applications_for_org(model.user.get_user_or_org('buynlarge'))[0]
token_string = '%s%s' % ('a' * 20, 'b' * 20)
oauth_token, _ = model.oauth.create_access_token_for_testing(user, app.client_id, 'repo:read',
access_token=token_string)
result = validate_bearer_auth('bearer ' + token_string)
assert result.context.oauthtoken == oauth_token
assert result.authed_user == user
assert result.auth_valid
def test_disabled_user_oauth(app):
user = model.user.get_user('disabled')
token_string = '%s%s' % ('a' * 20, 'b' * 20)
oauth_token, _ = model.oauth.create_access_token_for_testing(user, 'deadbeef', 'repo:admin',
access_token=token_string)
result = validate_bearer_auth('bearer ' + token_string)
assert result.context.oauthtoken is None
assert result.authed_user is None
assert not result.auth_valid
assert result.error_message == 'Granter of the oauth access token is disabled'
def test_expired_token(app):
user = model.user.get_user('devtable')
token_string = '%s%s' % ('a' * 20, 'b' * 20)
oauth_token, _ = model.oauth.create_access_token_for_testing(user, 'deadbeef', 'repo:admin',
access_token=token_string,
expires_in=-1000)
result = validate_bearer_auth('bearer ' + token_string)
assert result.context.oauthtoken is None
assert result.authed_user is None
assert not result.auth_valid
assert result.error_message == 'OAuth access token has expired'

View file

@ -1,37 +0,0 @@
import pytest
from auth import scopes
from auth.permissions import SuperUserPermission, QuayDeferredPermissionUser
from data import model
from test.fixtures import *
SUPER_USERNAME = 'devtable'
UNSUPER_USERNAME = 'freshuser'
@pytest.fixture()
def superuser(initialized_db):
return model.user.get_user(SUPER_USERNAME)
@pytest.fixture()
def normie(initialized_db):
return model.user.get_user(UNSUPER_USERNAME)
def test_superuser_matrix(superuser, normie):
test_cases = [
(superuser, {scopes.SUPERUSER}, True),
(superuser, {scopes.DIRECT_LOGIN}, True),
(superuser, {scopes.READ_USER, scopes.SUPERUSER}, True),
(superuser, {scopes.READ_USER}, False),
(normie, {scopes.SUPERUSER}, False),
(normie, {scopes.DIRECT_LOGIN}, False),
(normie, {scopes.READ_USER, scopes.SUPERUSER}, False),
(normie, {scopes.READ_USER}, False),
]
for user_obj, scope_set, expected in test_cases:
perm_user = QuayDeferredPermissionUser.for_user(user_obj, scope_set)
has_su = perm_user.can(SuperUserPermission())
assert has_su == expected

View file

@ -1,203 +0,0 @@
# -*- coding: utf-8 -*-
import time
import jwt
import pytest
from app import app, instance_keys
from auth.auth_context_type import ValidatedAuthContext
from auth.registry_jwt_auth import identity_from_bearer_token, InvalidJWTException
from data import model # TODO: remove this after service keys are decoupled
from data.database import ServiceKeyApprovalType
from initdb import setup_database_for_testing, finished_database_for_testing
from util.morecollections import AttrDict
from util.security.registry_jwt import ANONYMOUS_SUB, build_context_and_subject
TEST_AUDIENCE = app.config['SERVER_HOSTNAME']
TEST_USER = AttrDict({'username': 'joeuser', 'uuid': 'foobar', 'enabled': True})
MAX_SIGNED_S = 3660
TOKEN_VALIDITY_LIFETIME_S = 60 * 60 # 1 hour
ANONYMOUS_SUB = '(anonymous)'
SERVICE_NAME = 'quay'
# This import has to come below any references to "app".
from test.fixtures import *
def _access(typ='repository', name='somens/somerepo', actions=None):
actions = [] if actions is None else actions
return [{
'type': typ,
'name': name,
'actions': actions,
}]
def _delete_field(token_data, field_name):
token_data.pop(field_name)
return token_data
def _token_data(access=[], context=None, audience=TEST_AUDIENCE, user=TEST_USER, iat=None,
exp=None, nbf=None, iss=None, subject=None):
if subject is None:
_, subject = build_context_and_subject(ValidatedAuthContext(user=user))
return {
'iss': iss or instance_keys.service_name,
'aud': audience,
'nbf': nbf if nbf is not None else int(time.time()),
'iat': iat if iat is not None else int(time.time()),
'exp': exp if exp is not None else int(time.time() + TOKEN_VALIDITY_LIFETIME_S),
'sub': subject,
'access': access,
'context': context,
}
def _token(token_data, key_id=None, private_key=None, skip_header=False, alg=None):
key_id = key_id or instance_keys.local_key_id
private_key = private_key or instance_keys.local_private_key
if alg == "none":
private_key = None
token_headers = {'kid': key_id}
if skip_header:
token_headers = {}
token_data = jwt.encode(token_data, private_key, alg or 'RS256', headers=token_headers)
return 'Bearer {0}'.format(token_data)
def _parse_token(token):
return identity_from_bearer_token(token)[0]
def test_accepted_token(initialized_db):
token = _token(_token_data())
identity = _parse_token(token)
assert identity.id == TEST_USER.username, 'should be %s, but was %s' % (TEST_USER.username,
identity.id)
assert len(identity.provides) == 0
anon_token = _token(_token_data(user=None))
anon_identity = _parse_token(anon_token)
assert anon_identity.id == ANONYMOUS_SUB, 'should be %s, but was %s' % (ANONYMOUS_SUB,
anon_identity.id)
assert len(identity.provides) == 0
@pytest.mark.parametrize('access', [
(_access(actions=['pull', 'push'])),
(_access(actions=['pull', '*'])),
(_access(actions=['*', 'push'])),
(_access(actions=['*'])),
(_access(actions=['pull', '*', 'push'])),])
def test_token_with_access(access, initialized_db):
token = _token(_token_data(access=access))
identity = _parse_token(token)
assert identity.id == TEST_USER.username, 'should be %s, but was %s' % (TEST_USER.username,
identity.id)
assert len(identity.provides) == 1
role = list(identity.provides)[0][3]
if "*" in access[0]['actions']:
assert role == 'admin'
elif "push" in access[0]['actions']:
assert role == 'write'
elif "pull" in access[0]['actions']:
assert role == 'read'
@pytest.mark.parametrize('token', [
pytest.param(_token(
_token_data(access=[{
'toipe': 'repository',
'namesies': 'somens/somerepo',
'akshuns': ['pull', 'push', '*']}])), id='bad access'),
pytest.param(_token(_token_data(audience='someotherapp')), id='bad aud'),
pytest.param(_token(_delete_field(_token_data(), 'aud')), id='no aud'),
pytest.param(_token(_token_data(nbf=int(time.time()) + 600)), id='future nbf'),
pytest.param(_token(_delete_field(_token_data(), 'nbf')), id='no nbf'),
pytest.param(_token(_token_data(iat=int(time.time()) + 600)), id='future iat'),
pytest.param(_token(_delete_field(_token_data(), 'iat')), id='no iat'),
pytest.param(_token(_token_data(exp=int(time.time()) + MAX_SIGNED_S * 2)), id='exp too long'),
pytest.param(_token(_token_data(exp=int(time.time()) - 60)), id='expired'),
pytest.param(_token(_delete_field(_token_data(), 'exp')), id='no exp'),
pytest.param(_token(_delete_field(_token_data(), 'sub')), id='no sub'),
pytest.param(_token(_token_data(iss='badissuer')), id='bad iss'),
pytest.param(_token(_delete_field(_token_data(), 'iss')), id='no iss'),
pytest.param(_token(_token_data(), skip_header=True), id='no header'),
pytest.param(_token(_token_data(), key_id='someunknownkey'), id='bad key'),
pytest.param(_token(_token_data(), key_id='kid7'), id='bad key :: kid7'),
pytest.param(_token(_token_data(), alg='none', private_key=None), id='none alg'),
pytest.param('some random token', id='random token'),
pytest.param('Bearer: sometokenhere', id='extra bearer'),
pytest.param('\nBearer: dGVzdA', id='leading newline'),
])
def test_invalid_jwt(token, initialized_db):
with pytest.raises(InvalidJWTException):
_parse_token(token)
def test_mixing_keys_e2e(initialized_db):
token_data = _token_data()
# Create a new key for testing.
p, key = model.service_keys.generate_service_key(instance_keys.service_name, None, kid='newkey',
name='newkey', metadata={})
private_key = p.exportKey('PEM')
# Test first with the new valid, but unapproved key.
unapproved_key_token = _token(token_data, key_id='newkey', private_key=private_key)
with pytest.raises(InvalidJWTException):
_parse_token(unapproved_key_token)
# Approve the key and try again.
admin_user = model.user.get_user('devtable')
model.service_keys.approve_service_key(key.kid, ServiceKeyApprovalType.SUPERUSER, approver=admin_user)
valid_token = _token(token_data, key_id='newkey', private_key=private_key)
identity = _parse_token(valid_token)
assert identity.id == TEST_USER.username
assert len(identity.provides) == 0
# Try using a different private key with the existing key ID.
bad_private_token = _token(token_data, key_id='newkey',
private_key=instance_keys.local_private_key)
with pytest.raises(InvalidJWTException):
_parse_token(bad_private_token)
# Try using a different key ID with the existing private key.
kid_mismatch_token = _token(token_data, key_id=instance_keys.local_key_id,
private_key=private_key)
with pytest.raises(InvalidJWTException):
_parse_token(kid_mismatch_token)
# Delete the new key.
key.delete_instance(recursive=True)
# Ensure it still works (via the cache.)
deleted_key_token = _token(token_data, key_id='newkey', private_key=private_key)
identity = _parse_token(deleted_key_token)
assert identity.id == TEST_USER.username
assert len(identity.provides) == 0
# Break the cache.
instance_keys.clear_cache()
# Ensure the key no longer works.
with pytest.raises(InvalidJWTException):
_parse_token(deleted_key_token)
@pytest.mark.parametrize('token', [
u'someunicodetoken✡',
u'\xc9\xad\xbd',
])
def test_unicode_token(token):
with pytest.raises(InvalidJWTException):
_parse_token(token)

View file

@ -1,50 +0,0 @@
import pytest
from auth.scopes import (
scopes_from_scope_string, validate_scope_string, ALL_SCOPES, is_subset_string)
@pytest.mark.parametrize(
'scopes_string, expected',
[
# Valid single scopes.
('repo:read', ['repo:read']),
('repo:admin', ['repo:admin']),
# Invalid scopes.
('not:valid', []),
('repo:admins', []),
# Valid scope strings.
('repo:read repo:admin', ['repo:read', 'repo:admin']),
('repo:read,repo:admin', ['repo:read', 'repo:admin']),
('repo:read,repo:admin repo:write', ['repo:read', 'repo:admin', 'repo:write']),
# Partially invalid scopes.
('repo:read,not:valid', []),
('repo:read repo:admins', []),
# Invalid scope strings.
('repo:read|repo:admin', []),
# Mixture of delimiters.
('repo:read, repo:admin', []),])
def test_parsing(scopes_string, expected):
expected_scope_set = {ALL_SCOPES[scope_name] for scope_name in expected}
parsed_scope_set = scopes_from_scope_string(scopes_string)
assert parsed_scope_set == expected_scope_set
assert validate_scope_string(scopes_string) == bool(expected)
@pytest.mark.parametrize('superset, subset, result', [
('repo:read', 'repo:read', True),
('repo:read repo:admin', 'repo:read', True),
('repo:read,repo:admin', 'repo:read', True),
('repo:read,repo:admin', 'repo:admin', True),
('repo:read,repo:admin', 'repo:admin repo:read', True),
('', 'repo:read', False),
('unknown:tag', 'repo:read', False),
('repo:read unknown:tag', 'repo:read', False),
('repo:read,unknown:tag', 'repo:read', False),])
def test_subset_string(superset, subset, result):
assert is_subset_string(superset, subset) == result

View file

@ -1,32 +0,0 @@
import pytest
from auth.signedgrant import validate_signed_grant, generate_signed_token, SIGNATURE_PREFIX
from auth.validateresult import AuthKind, ValidateResult
@pytest.mark.parametrize('header, expected_result', [
pytest.param('', ValidateResult(AuthKind.signed_grant, missing=True), id='Missing'),
pytest.param('somerandomtoken', ValidateResult(AuthKind.signed_grant, missing=True),
id='Invalid header'),
pytest.param('token somerandomtoken', ValidateResult(AuthKind.signed_grant, missing=True),
id='Random Token'),
pytest.param('token ' + SIGNATURE_PREFIX + 'foo',
ValidateResult(AuthKind.signed_grant,
error_message='Signed grant could not be validated'),
id='Invalid token'),
])
def test_token(header, expected_result):
assert validate_signed_grant(header) == expected_result
def test_valid_grant():
header = 'token ' + generate_signed_token({'a': 'b'}, {'c': 'd'})
expected = ValidateResult(AuthKind.signed_grant, signed_data={
'grants': {
'a': 'b',
},
'user_context': {
'c': 'd'
},
})
assert validate_signed_grant(header) == expected

View file

@ -1,63 +0,0 @@
import pytest
from auth.auth_context import get_authenticated_context
from auth.validateresult import AuthKind, ValidateResult
from data import model
from data.database import AppSpecificAuthToken
from test.fixtures import *
def get_user():
return model.user.get_user('devtable')
def get_app_specific_token():
return AppSpecificAuthToken.get()
def get_robot():
robot, _ = model.user.create_robot('somebot', get_user())
return robot
def get_token():
return model.token.create_delegate_token('devtable', 'simple', 'sometoken')
def get_oauthtoken():
user = model.user.get_user('devtable')
return list(model.oauth.list_access_tokens_for_user(user))[0]
def get_signeddata():
return {'grants': {'a': 'b'}, 'user_context': {'c': 'd'}}
@pytest.mark.parametrize('get_entity,entity_kind', [
(get_user, 'user'),
(get_robot, 'robot'),
(get_token, 'token'),
(get_oauthtoken, 'oauthtoken'),
(get_signeddata, 'signed_data'),
(get_app_specific_token, 'appspecifictoken'),
])
def test_apply_context(get_entity, entity_kind, app):
assert get_authenticated_context() is None
entity = get_entity()
args = {}
args[entity_kind] = entity
result = ValidateResult(AuthKind.basic, **args)
result.apply_to_context()
expected_user = entity if entity_kind == 'user' or entity_kind == 'robot' else None
if entity_kind == 'oauthtoken':
expected_user = entity.authorized_user
if entity_kind == 'appspecifictoken':
expected_user = entity.user
expected_token = entity if entity_kind == 'token' else None
expected_oauth = entity if entity_kind == 'oauthtoken' else None
expected_appspecifictoken = entity if entity_kind == 'appspecifictoken' else None
expected_grant = entity if entity_kind == 'signed_data' else None
assert get_authenticated_context().authed_user == expected_user
assert get_authenticated_context().token == expected_token
assert get_authenticated_context().oauthtoken == expected_oauth
assert get_authenticated_context().appspecifictoken == expected_appspecifictoken
assert get_authenticated_context().signed_data == expected_grant

View file

@ -1,56 +0,0 @@
from enum import Enum
from auth.auth_context_type import ValidatedAuthContext, ContextEntityKind
class AuthKind(Enum):
cookie = 'cookie'
basic = 'basic'
oauth = 'oauth'
signed_grant = 'signed_grant'
credentials = 'credentials'
class ValidateResult(object):
""" A result of validating auth in one form or another. """
def __init__(self, kind, missing=False, user=None, token=None, oauthtoken=None,
robot=None, appspecifictoken=None, signed_data=None, error_message=None):
self.kind = kind
self.missing = missing
self.error_message = error_message
self.context = ValidatedAuthContext(user=user, token=token, oauthtoken=oauthtoken, robot=robot,
appspecifictoken=appspecifictoken, signed_data=signed_data)
def tuple(self):
return (self.kind, self.missing, self.error_message, self.context.tuple())
def __eq__(self, other):
return self.tuple() == other.tuple()
def apply_to_context(self):
""" Applies this auth result to the auth context and Flask-Principal. """
self.context.apply_to_request_context()
def with_kind(self, kind):
""" Returns a copy of this result, but with the kind replaced. """
result = ValidateResult(kind, missing=self.missing, error_message=self.error_message)
result.context = self.context
return result
def __repr__(self):
return 'ValidateResult: %s (missing: %s, error: %s)' % (self.kind, self.missing,
self.error_message)
@property
def authed_user(self):
""" Returns the authenticated user, whether directly, or via an OAuth token. """
return self.context.authed_user
@property
def has_nonrobot_user(self):
""" Returns whether a user (not a robot) was authenticated successfully. """
return self.context.has_nonrobot_user
@property
def auth_valid(self):
""" Returns whether authentication successfully occurred. """
return self.context.entity_kind != ContextEntityKind.anonymous

View file

@ -65,9 +65,6 @@ class BaseAvatar(object):
def get_data_for_org(self, org): def get_data_for_org(self, org):
return self.get_data(org.username, org.email, 'org') return self.get_data(org.username, org.email, 'org')
def get_data_for_external_user(self, external_user):
return self.get_data(external_user.username, external_user.email, 'user')
def get_data(self, name, email_or_id, kind='user'): def get_data(self, name, email_or_id, kind='user'):
""" Computes and returns the full data block for the avatar: """ Computes and returns the full data block for the avatar:
{ {
@ -77,11 +74,7 @@ class BaseAvatar(object):
} }
""" """
colors = self.colors colors = self.colors
hash_value = hashlib.md5(email_or_id.strip().lower()).hexdigest()
# Note: email_or_id may be None if gotten from external auth when email is disabled,
# so use the username in that case.
username_email_or_id = email_or_id or name
hash_value = hashlib.md5(username_email_or_id.strip().lower()).hexdigest()
byte_count = int(math.ceil(math.log(len(colors), 16))) byte_count = int(math.ceil(math.log(len(colors), 16)))
byte_data = hash_value[0:byte_count] byte_data = hash_value[0:byte_count]

File diff suppressed because it is too large Load diff

79
boot.py Executable file → Normal file
View file

@ -4,22 +4,15 @@ from datetime import datetime, timedelta
from urlparse import urlunparse from urlparse import urlunparse
from jinja2 import Template from jinja2 import Template
from cachetools.func import lru_cache from cachetools import lru_cache
import logging
import release import release
import os.path import os.path
from app import app from app import app
from data.model import ServiceKeyDoesNotExist
from data.model.release import set_region_release from data.model.release import set_region_release
from data.model.service_keys import get_service_key
from util.config.database import sync_database_with_config from util.config.database import sync_database_with_config
from util.generatepresharedkey import generate_key from util.generatepresharedkey import generate_key
from _init import CONF_DIR
logger = logging.getLogger(__name__)
@lru_cache(maxsize=1) @lru_cache(maxsize=1)
@ -45,82 +38,50 @@ def get_audience():
return urlunparse((scheme, hostname + ':' + port, '', '', '', '')) return urlunparse((scheme, hostname + ':' + port, '', '', '', ''))
def _verify_service_key():
try:
with open(app.config['INSTANCE_SERVICE_KEY_KID_LOCATION']) as f:
quay_key_id = f.read()
try:
get_service_key(quay_key_id, approved_only=False)
assert os.path.exists(app.config['INSTANCE_SERVICE_KEY_LOCATION'])
return quay_key_id
except ServiceKeyDoesNotExist:
logger.exception('Could not find non-expired existing service key %s; creating a new one',
quay_key_id)
return None
# Found a valid service key, so exiting.
except IOError:
logger.exception('Could not load existing service key; creating a new one')
return None
def setup_jwt_proxy(): def setup_jwt_proxy():
""" """
Creates a service key for quay to use in the jwtproxy and generates the JWT proxy configuration. Creates a service key for quay to use in the jwtproxy and generates the JWT proxy configuration.
""" """
if os.path.exists(os.path.join(CONF_DIR, 'jwtproxy_conf.yaml')): if os.path.exists('conf/jwtproxy_conf.yaml'):
# Proxy is already setup. Make sure the service key is still valid. # Proxy is already setup.
quay_key_id = _verify_service_key() return
if quay_key_id is not None:
return
# Ensure we have an existing key if in read-only mode. # Generate the key for this Quay instance to use.
if app.config.get('REGISTRY_STATE', 'normal') == 'readonly': minutes_until_expiration = app.config.get('INSTANCE_SERVICE_KEY_EXPIRATION', 120)
quay_key_id = _verify_service_key() expiration = datetime.now() + timedelta(minutes=minutes_until_expiration)
if quay_key_id is None: quay_key, quay_key_id = generate_key(app.config['INSTANCE_SERVICE_KEY_SERVICE'],
raise Exception('No valid service key found for read-only registry.') get_audience(), expiration_date=expiration)
else:
# Generate the key for this Quay instance to use.
minutes_until_expiration = app.config.get('INSTANCE_SERVICE_KEY_EXPIRATION', 120)
expiration = datetime.now() + timedelta(minutes=minutes_until_expiration)
quay_key, quay_key_id = generate_key(app.config['INSTANCE_SERVICE_KEY_SERVICE'],
get_audience(), expiration_date=expiration)
with open(app.config['INSTANCE_SERVICE_KEY_KID_LOCATION'], mode='w') as f: with open(app.config['INSTANCE_SERVICE_KEY_KID_LOCATION'], mode='w') as f:
f.truncate(0) f.truncate(0)
f.write(quay_key_id) f.write(quay_key_id)
with open(app.config['INSTANCE_SERVICE_KEY_LOCATION'], mode='w') as f: with open(app.config['INSTANCE_SERVICE_KEY_LOCATION'], mode='w') as f:
f.truncate(0) f.truncate(0)
f.write(quay_key.exportKey()) f.write(quay_key.exportKey())
# Generate the JWT proxy configuration. # Generate the JWT proxy configuration.
audience = get_audience() audience = get_audience()
registry = audience + '/keys' registry = audience + '/keys'
security_issuer = app.config.get('SECURITY_SCANNER_ISSUER_NAME', 'security_scanner') security_issuer = app.config.get('SECURITY_SCANNER_ISSUER_NAME', 'security_scanner')
with open(os.path.join(CONF_DIR, 'jwtproxy_conf.yaml.jnj')) as f: with open("conf/jwtproxy_conf.yaml.jnj") as f:
template = Template(f.read()) template = Template(f.read())
rendered = template.render( rendered = template.render(
conf_dir=CONF_DIR,
audience=audience, audience=audience,
registry=registry, registry=registry,
key_id=quay_key_id, key_id=quay_key_id,
security_issuer=security_issuer, security_issuer=security_issuer,
service_key_location=app.config['INSTANCE_SERVICE_KEY_LOCATION'],
) )
with open(os.path.join(CONF_DIR, 'jwtproxy_conf.yaml'), 'w') as f: with open('conf/jwtproxy_conf.yaml', 'w') as f:
f.write(rendered) f.write(rendered)
def main(): def main():
if not app.config.get('SETUP_COMPLETE', False): if app.config.get('SETUP_COMPLETE', False):
raise Exception('Your configuration bundle is either not mounted or setup has not been completed') sync_database_with_config(app.config)
setup_jwt_proxy()
sync_database_with_config(app.config)
setup_jwt_proxy()
# Record deploy # Record deploy
if release.REGION and release.GIT_HEAD: if release.REGION and release.GIT_HEAD:

View file

@ -1,2 +0,0 @@
Charlton Austin <charlton.austin@coreos.com> (@charltonaustin)
Joseph Schorr <joseph.schorr@coreos.com> (@josephschorr)

View file

@ -1,15 +1,5 @@
from concurrent.futures import ThreadPoolExecutor
from functools import partial from functools import partial
from trollius import get_event_loop
from trollius import get_event_loop, coroutine
def wrap_with_threadpool(obj, worker_threads=1):
"""
Wraps a class in an async executor so that it can be safely used in an event loop like trollius.
"""
async_executor = ThreadPoolExecutor(worker_threads)
return AsyncWrapper(obj, executor=async_executor), async_executor
class AsyncWrapper(object): class AsyncWrapper(object):
@ -35,8 +25,3 @@ class AsyncWrapper(object):
return self._loop.run_in_executor(self._executor, callable_delegate_attr) return self._loop.run_in_executor(self._executor, callable_delegate_attr)
return wrapper return wrapper
@coroutine
def __call__(self, *args, **kwargs):
callable_delegate_attr = partial(self._delegate, *args, **kwargs)
return self._loop.run_in_executor(self._executor, callable_delegate_attr)

View file

@ -1,12 +1,10 @@
import logging import logging
import os import os
import features
import time import time
import socket import socket
import features
from app import app, userfiles as user_files, build_logs, dockerfile_build_queue from app import app, userfiles as user_files, build_logs, dockerfile_build_queue
from util.log import logfile_path
from buildman.manager.enterprise import EnterpriseManager from buildman.manager.enterprise import EnterpriseManager
from buildman.manager.ephemeral import EphemeralBuilderManager from buildman.manager.ephemeral import EphemeralBuilderManager
@ -37,12 +35,6 @@ def run_build_manager():
time.sleep(1000) time.sleep(1000)
return return
if app.config.get('REGISTRY_STATE', 'normal') == 'readonly':
logger.debug('Building is disabled while in read-only mode.')
while True:
time.sleep(1000)
return
build_manager_config = app.config.get('BUILD_MANAGER') build_manager_config = app.config.get('BUILD_MANAGER')
if build_manager_config is None: if build_manager_config is None:
return return
@ -85,13 +77,13 @@ def run_build_manager():
server.run('0.0.0.0', websocket_port, controller_port, ssl=ssl_context) server.run('0.0.0.0', websocket_port, controller_port, ssl=ssl_context)
if __name__ == '__main__': if __name__ == '__main__':
logging.config.fileConfig(logfile_path(debug=True), disable_existing_loggers=False) logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)
logging.getLogger('peewee').setLevel(logging.WARN) logging.getLogger('peewee').setLevel(logging.WARN)
logging.getLogger('boto').setLevel(logging.WARN) logging.getLogger('boto').setLevel(logging.WARN)
if app.config.get('EXCEPTION_LOG_TYPE', 'FakeSentry') == 'Sentry': if app.config.get('EXCEPTION_LOG_TYPE', 'FakeSentry') == 'Sentry':
buildman_name = '%s:buildman' % socket.gethostname() buildman_name = '%s:buildman' % socket.gethostname()
setup_logging(SentryHandler(app.config.get('SENTRY_DSN', ''), name=buildman_name, setup_logging(SentryHandler(app.config.get('SENTRY_DSN', ''), name=buildman_name,
level=logging.ERROR)) level=logging.ERROR))
run_build_manager() run_build_manager()

View file

@ -1,37 +1,29 @@
import datetime import datetime
import os
import time import time
import logging import logging
import json import json
import trollius import trollius
import re
from autobahn.wamp.exception import ApplicationError from autobahn.wamp.exception import ApplicationError
from trollius import From, Return from trollius import From, Return
from active_migration import ActiveDataMigration, ERTMigrationFlags
from buildman.server import BuildJobResult from buildman.server import BuildJobResult
from buildman.component.basecomponent import BaseComponent from buildman.component.basecomponent import BaseComponent
from buildman.component.buildparse import extract_current_step
from buildman.jobutil.buildjob import BuildJobLoadException from buildman.jobutil.buildjob import BuildJobLoadException
from buildman.jobutil.buildstatus import StatusHandler from buildman.jobutil.buildstatus import StatusHandler
from buildman.jobutil.workererror import WorkerError from buildman.jobutil.workererror import WorkerError
from app import app from data import model
from data.database import BUILD_PHASE, UseThenDisconnect from data.database import BUILD_PHASE
from data.model import InvalidRepositoryBuildException
from data.registry_model import registry_model
from util import slash_join
HEARTBEAT_DELTA = datetime.timedelta(seconds=60) HEARTBEAT_DELTA = datetime.timedelta(seconds=30)
BUILD_HEARTBEAT_DELAY = datetime.timedelta(seconds=30) BUILD_HEARTBEAT_DELAY = datetime.timedelta(seconds=30)
HEARTBEAT_TIMEOUT = 10 HEARTBEAT_TIMEOUT = 10
INITIAL_TIMEOUT = 25 INITIAL_TIMEOUT = 25
SUPPORTED_WORKER_VERSIONS = ['0.3'] SUPPORTED_WORKER_VERSIONS = ['0.3']
# Label which marks a manifest with its source build ID.
INTERNAL_LABEL_BUILD_UUID = 'quay.build.uuid'
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class ComponentStatus(object): class ComponentStatus(object):
@ -70,11 +62,12 @@ class BuildComponent(BaseComponent):
def onJoin(self, details): def onJoin(self, details):
logger.debug('Registering methods and listeners for component %s', self.builder_realm) logger.debug('Registering methods and listeners for component %s', self.builder_realm)
yield From(self.register(self._on_ready, u'io.quay.buildworker.ready')) yield From(self.register(self._on_ready, u'io.quay.buildworker.ready'))
yield From(self.register(self._determine_cache_tag, u'io.quay.buildworker.determinecachetag')) yield From(self.register(self._determine_cache_tag,
u'io.quay.buildworker.determinecachetag'))
yield From(self.register(self._ping, u'io.quay.buildworker.ping')) yield From(self.register(self._ping, u'io.quay.buildworker.ping'))
yield From(self.register(self._on_log_message, u'io.quay.builder.logmessagesynchronously'))
yield From(self.subscribe(self._on_heartbeat, u'io.quay.builder.heartbeat')) yield From(self.subscribe(self._on_heartbeat, 'io.quay.builder.heartbeat'))
yield From(self.subscribe(self._on_log_message, 'io.quay.builder.logmessage'))
yield From(self._set_status(ComponentStatus.WAITING)) yield From(self._set_status(ComponentStatus.WAITING))
@ -103,7 +96,7 @@ class BuildComponent(BaseComponent):
try: try:
build_config = build_job.build_config build_config = build_job.build_config
except BuildJobLoadException as irbe: except BuildJobLoadException as irbe:
yield From(self._build_failure('Could not load build job information', irbe)) self._build_failure('Could not load build job information', irbe)
raise Return() raise Return()
base_image_information = {} base_image_information = {}
@ -122,22 +115,20 @@ class BuildComponent(BaseComponent):
# defaults to empty string to avoid requiring a pointer on the builder. # defaults to empty string to avoid requiring a pointer on the builder.
# sub_directory: The location within the build package of the Dockerfile and the build context. # sub_directory: The location within the build package of the Dockerfile and the build context.
# repository: The repository for which this build is occurring. # repository: The repository for which this build is occurring.
# registry: The registry for which this build is occuring (e.g. 'quay.io'). # registry: The registry for which this build is occuring (e.g. 'quay.io', 'staging.quay.io').
# pull_token: The token to use when pulling the cache for building. # pull_token: The token to use when pulling the cache for building.
# push_token: The token to use to push the built image. # push_token: The token to use to push the built image.
# tag_names: The name(s) of the tag(s) for the newly built image. # tag_names: The name(s) of the tag(s) for the newly built image.
# base_image: The image name and credentials to use to conduct the base image pull. # base_image: The image name and credentials to use to conduct the base image pull.
# username: The username for pulling the base image (if any). # username: The username for pulling the base image (if any).
# password: The password for pulling the base image (if any). # password: The password for pulling the base image (if any).
context, dockerfile_path = self.extract_dockerfile_args(build_config)
build_arguments = { build_arguments = {
'build_package': build_job.get_build_package_url(self.user_files), 'build_package': build_job.get_build_package_url(self.user_files),
'context': context, 'sub_directory': build_config.get('build_subdir', ''),
'dockerfile_path': dockerfile_path,
'repository': repository_name, 'repository': repository_name,
'registry': self.registry_hostname, 'registry': self.registry_hostname,
'pull_token': build_job.repo_build.access_token.get_code(), 'pull_token': build_job.repo_build.access_token.code,
'push_token': build_job.repo_build.access_token.get_code(), 'push_token': build_job.repo_build.access_token.code,
'tag_names': build_config.get('docker_tags', ['latest']), 'tag_names': build_config.get('docker_tags', ['latest']),
'base_image': base_image_information, 'base_image': base_image_information,
} }
@ -147,23 +138,11 @@ class BuildComponent(BaseComponent):
# url: url used to clone the git repository # url: url used to clone the git repository
# sha: the sha1 identifier of the commit to check out # sha: the sha1 identifier of the commit to check out
# private_key: the key used to get read access to the git repository # private_key: the key used to get read access to the git repository
if build_job.repo_build.trigger.private_key is not None:
# TODO(remove-unenc): Remove legacy field.
private_key = None
if build_job.repo_build.trigger is not None and \
build_job.repo_build.trigger.secure_private_key is not None:
private_key = build_job.repo_build.trigger.secure_private_key.decrypt()
if ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS) and \
private_key is None and \
build_job.repo_build.trigger is not None:
private_key = build_job.repo_build.trigger.private_key
if private_key is not None:
build_arguments['git'] = { build_arguments['git'] = {
'url': build_config['trigger_metadata'].get('git_url', ''), 'url': build_config['trigger_metadata'].get('git_url', ''),
'sha': BuildComponent._commit_sha(build_config), 'sha': BuildComponent._commit_sha(build_config),
'private_key': private_key or '', 'private_key': build_job.repo_build.trigger.private_key,
} }
# If the build args have no buildpack, mark it as a failure before sending # If the build args have no buildpack, mark it as a failure before sending
@ -171,37 +150,20 @@ class BuildComponent(BaseComponent):
if not build_arguments['build_package'] and not build_arguments['git']: if not build_arguments['build_package'] and not build_arguments['git']:
logger.error('%s: insufficient build args: %s', logger.error('%s: insufficient build args: %s',
self._current_job.repo_build.uuid, build_arguments) self._current_job.repo_build.uuid, build_arguments)
yield From(self._build_failure('Insufficient build arguments. No buildpack available.')) self._build_failure('Insufficient build arguments. No buildpack available.')
raise Return() raise Return()
# Invoke the build. # Invoke the build.
logger.debug('Invoking build: %s', self.builder_realm) logger.debug('Invoking build: %s', self.builder_realm)
logger.debug('With Arguments: %s', build_arguments) logger.debug('With Arguments: %s', build_arguments)
def build_complete_callback(result): self.call("io.quay.builder.build", **build_arguments).add_done_callback(self._build_complete)
""" This function is used to execute a coroutine as the callback. """
trollius.ensure_future(self._build_complete(result))
self.call("io.quay.builder.build", **build_arguments).add_done_callback(build_complete_callback)
# Set the heartbeat for the future. If the builder never receives the build call, # Set the heartbeat for the future. If the builder never receives the build call,
# then this will cause a timeout after 30 seconds. We know the builder has registered # then this will cause a timeout after 30 seconds. We know the builder has registered
# by this point, so it makes sense to have a timeout. # by this point, so it makes sense to have a timeout.
self._last_heartbeat = datetime.datetime.utcnow() + BUILD_HEARTBEAT_DELAY self._last_heartbeat = datetime.datetime.utcnow() + BUILD_HEARTBEAT_DELAY
@staticmethod
def extract_dockerfile_args(build_config):
dockerfile_path = build_config.get('build_subdir', '')
context = build_config.get('context', '')
if not (dockerfile_path == '' or context == ''):
# This should not happen and can be removed when we centralize validating build_config
dockerfile_abspath = slash_join('', dockerfile_path)
if ".." in os.path.relpath(dockerfile_abspath, context):
return os.path.split(dockerfile_path)
dockerfile_path = os.path.relpath(dockerfile_abspath, context)
return context, dockerfile_path
@staticmethod @staticmethod
def _commit_sha(build_config): def _commit_sha(build_config):
""" Determines whether the metadata is using an old schema or not and returns the commit. """ """ Determines whether the metadata is using an old schema or not and returns the commit. """
@ -209,14 +171,6 @@ class BuildComponent(BaseComponent):
old_commit_sha = build_config['trigger_metadata'].get('commit_sha', '') old_commit_sha = build_config['trigger_metadata'].get('commit_sha', '')
return commit_sha or old_commit_sha return commit_sha or old_commit_sha
@staticmethod
def name_and_path(subdir):
""" Returns the dockerfile path and name """
if subdir.endswith("/"):
subdir += "Dockerfile"
elif not subdir.endswith("Dockerfile"):
subdir += "/Dockerfile"
return os.path.split(subdir)
@staticmethod @staticmethod
def _total_completion(statuses, total_images): def _total_completion(statuses, total_images):
@ -254,8 +208,6 @@ class BuildComponent(BaseComponent):
status_dict[status_completion_key] = \ status_dict[status_completion_key] = \
BuildComponent._total_completion(images, max(len(images), num_images)) BuildComponent._total_completion(images, max(len(images), num_images))
@trollius.coroutine
def _on_log_message(self, phase, json_data): def _on_log_message(self, phase, json_data):
""" Tails log messages and updates the build status. """ """ Tails log messages and updates the build status. """
# Update the heartbeat. # Update the heartbeat.
@ -282,23 +234,15 @@ class BuildComponent(BaseComponent):
current_status_string = str(fully_unwrapped.encode('utf-8')) current_status_string = str(fully_unwrapped.encode('utf-8'))
if current_status_string and phase == BUILD_PHASE.BUILDING: if current_status_string and phase == BUILD_PHASE.BUILDING:
current_step = extract_current_step(current_status_string) step_increment = re.search(r'Step ([0-9]+) :', current_status_string)
if step_increment:
current_step = int(step_increment.group(1))
# Parse and update the phase and the status_dict. The status dictionary contains # Parse and update the phase and the status_dict. The status dictionary contains
# the pull/push progress, as well as the current step index. # the pull/push progress, as well as the current step index.
with self._build_status as status_dict: with self._build_status as status_dict:
try: if self._build_status.set_phase(phase, log_data.get('status_data')):
changed_phase = yield From(self._build_status.set_phase(phase, log_data.get('status_data'))) logger.debug('Build %s has entered a new phase: %s', self.builder_realm, phase)
if changed_phase:
logger.debug('Build %s has entered a new phase: %s', self.builder_realm, phase)
elif self._current_job.repo_build.phase == BUILD_PHASE.CANCELLED:
build_id = self._current_job.repo_build.uuid
logger.debug('Trying to move cancelled build into phase: %s with id: %s', phase, build_id)
raise Return(False)
except InvalidRepositoryBuildException:
build_id = self._current_job.repo_build.uuid
logger.warning('Build %s was not found; repo was probably deleted', build_id)
raise Return(False)
BuildComponent._process_pushpull_status(status_dict, phase, log_data, self._image_info) BuildComponent._process_pushpull_status(status_dict, phase, log_data, self._image_info)
@ -309,13 +253,12 @@ class BuildComponent(BaseComponent):
# If the json data contains an error, then something went wrong with a push or pull. # If the json data contains an error, then something went wrong with a push or pull.
if 'error' in log_data: if 'error' in log_data:
yield From(self._build_status.set_error(log_data['error'])) self._build_status.set_error(log_data['error'])
if current_step is not None: if current_step is not None:
yield From(self._build_status.set_command(current_status_string)) self._build_status.set_command(current_status_string)
elif phase == BUILD_PHASE.BUILDING: elif phase == BUILD_PHASE.BUILDING:
yield From(self._build_status.append_log(current_status_string)) self._build_status.append_log(current_status_string)
raise Return(True)
@trollius.coroutine @trollius.coroutine
def _determine_cache_tag(self, command_comments, base_image_name, base_image_tag, base_image_id): def _determine_cache_tag(self, command_comments, base_image_name, base_image_tag, base_image_id):
@ -328,20 +271,18 @@ class BuildComponent(BaseComponent):
tag_found = self._current_job.determine_cached_tag(base_image_id, command_comments) tag_found = self._current_job.determine_cached_tag(base_image_id, command_comments)
raise Return(tag_found or '') raise Return(tag_found or '')
@trollius.coroutine
def _build_failure(self, error_message, exception=None): def _build_failure(self, error_message, exception=None):
""" Handles and logs a failed build. """ """ Handles and logs a failed build. """
yield From(self._build_status.set_error(error_message, { self._build_status.set_error(error_message, {
'internal_error': str(exception) if exception else None 'internal_error': str(exception) if exception else None
})) })
build_id = self._current_job.repo_build.uuid build_id = self._current_job.repo_build.uuid
logger.warning('Build %s failed with message: %s', build_id, error_message) logger.warning('Build %s failed with message: %s', build_id, error_message)
# Mark that the build has finished (in an error state) # Mark that the build has finished (in an error state)
yield From(self._build_finished(BuildJobResult.ERROR)) trollius.async(self._build_finished(BuildJobResult.ERROR))
@trollius.coroutine
def _build_complete(self, result): def _build_complete(self, result):
""" Wraps up a completed build. Handles any errors and calls self._build_finished. """ """ Wraps up a completed build. Handles any errors and calls self._build_finished. """
build_id = self._current_job.repo_build.uuid build_id = self._current_job.repo_build.uuid
@ -352,35 +293,29 @@ class BuildComponent(BaseComponent):
kwargs = {} kwargs = {}
# Note: If we are hitting an older builder that didn't return ANY map data, then the result # Note: If we are hitting an older builder that didn't return ANY map data, then the result
# value will be a bool instead of a proper CallResult object. # value will be a bool instead of a proper CallResult object (because autobahn sucks).
# Therefore: we have a try-except guard here to ensure we don't hit this pitfall. # Therefore: we have a try-except guard here to ensure we don't hit this pitfall.
try: try:
kwargs = result_value.kwresults kwargs = result_value.kwresults
except: except:
pass pass
try: self._build_status.set_phase(BUILD_PHASE.COMPLETE)
yield From(self._build_status.set_phase(BUILD_PHASE.COMPLETE)) trollius.async(self._build_finished(BuildJobResult.COMPLETE))
except InvalidRepositoryBuildException:
logger.warning('Build %s was not found; repo was probably deleted', build_id)
raise Return()
yield From(self._build_finished(BuildJobResult.COMPLETE))
# Label the pushed manifests with the build metadata. # Label the pushed manifests with the build metadata.
manifest_digests = kwargs.get('digests') or [] manifest_digests = kwargs.get('digests') or []
repository = registry_model.lookup_repository(self._current_job.namespace, for digest in manifest_digests:
self._current_job.repo_name) try:
if repository is not None: manifest = model.tag.load_manifest_by_digest(self._current_job.namespace,
for digest in manifest_digests: self._current_job.repo_name, digest)
with UseThenDisconnect(app.config): model.label.create_manifest_label(manifest, model.label.INTERNAL_LABEL_BUILD_UUID,
manifest = registry_model.lookup_manifest_by_digest(repository, digest, build_id, 'internal', 'text/plain')
require_available=True) except model.InvalidManifestException:
if manifest is None: logger.debug('Could not find built manifest with digest %s under repo %s/%s for build %s',
continue digest, self._current_job.namespace, self._current_job.repo_name,
build_id)
registry_model.create_manifest_label(manifest, INTERNAL_LABEL_BUILD_UUID, continue
build_id, 'internal', 'text/plain')
# Send the notification that the build has completed successfully. # Send the notification that the build has completed successfully.
self._current_job.send_notification('build_success', self._current_job.send_notification('build_success',
@ -390,10 +325,9 @@ class BuildComponent(BaseComponent):
worker_error = WorkerError(aex.error, aex.kwargs.get('base_error')) worker_error = WorkerError(aex.error, aex.kwargs.get('base_error'))
# Write the error to the log. # Write the error to the log.
yield From(self._build_status.set_error(worker_error.public_message(), self._build_status.set_error(worker_error.public_message(), worker_error.extra_data(),
worker_error.extra_data(), internal_error=worker_error.is_internal_error(),
internal_error=worker_error.is_internal_error(), requeued=self._current_job.has_retries_remaining())
requeued=self._current_job.has_retries_remaining()))
# Send the notification that the build has failed. # Send the notification that the build has failed.
self._current_job.send_notification('build_failure', self._current_job.send_notification('build_failure',
@ -401,21 +335,17 @@ class BuildComponent(BaseComponent):
# Mark the build as completed. # Mark the build as completed.
if worker_error.is_internal_error(): if worker_error.is_internal_error():
logger.exception('[BUILD INTERNAL ERROR: Remote] Build ID: %s: %s', build_id, logger.exception('Got remote internal exception for build: %s', build_id)
worker_error.public_message()) trollius.async(self._build_finished(BuildJobResult.INCOMPLETE))
yield From(self._build_finished(BuildJobResult.INCOMPLETE))
else: else:
logger.debug('Got remote failure exception for build %s: %s', build_id, aex) logger.debug('Got remote failure exception for build %s: %s', build_id, aex)
yield From(self._build_finished(BuildJobResult.ERROR)) trollius.async(self._build_finished(BuildJobResult.ERROR))
# Remove the current job.
self._current_job = None
@trollius.coroutine @trollius.coroutine
def _build_finished(self, job_status): def _build_finished(self, job_status):
""" Alerts the parent that a build has completed and sets the status back to running. """ """ Alerts the parent that a build has completed and sets the status back to running. """
yield From(self.parent_manager.job_completed(self._current_job, job_status, self)) yield From(self.parent_manager.job_completed(self._current_job, job_status, self))
self._current_job = None
# Set the component back to a running state. # Set the component back to a running state.
yield From(self._set_status(ComponentStatus.RUNNING)) yield From(self._set_status(ComponentStatus.RUNNING))
@ -482,8 +412,9 @@ class BuildComponent(BaseComponent):
raise Return() raise Return()
# If there is an active build, write the heartbeat to its status. # If there is an active build, write the heartbeat to its status.
if self._build_status is not None: build_status = self._build_status
with self._build_status as status_dict: if build_status is not None:
with build_status as status_dict:
status_dict['heartbeat'] = int(time.time()) status_dict['heartbeat'] = int(time.time())
# Mark the build item. # Mark the build item.
@ -517,23 +448,13 @@ class BuildComponent(BaseComponent):
# If we still have a running job, then it has not completed and we need to tell the parent # If we still have a running job, then it has not completed and we need to tell the parent
# manager. # manager.
if self._current_job is not None: if self._current_job is not None:
yield From(self._build_status.set_error('Build worker timed out', internal_error=True, self._build_status.set_error('Build worker timed out', internal_error=True,
requeued=self._current_job.has_retries_remaining())) requeued=self._current_job.has_retries_remaining())
build_id = self._current_job.build_uuid
logger.error('[BUILD INTERNAL ERROR: Timeout] Build ID: %s', build_id)
yield From(self.parent_manager.job_completed(self._current_job, yield From(self.parent_manager.job_completed(self._current_job,
BuildJobResult.INCOMPLETE, BuildJobResult.INCOMPLETE,
self)) self))
self._current_job = None
# Unregister the current component so that it cannot be invoked again. # Unregister the current component so that it cannot be invoked again.
self.parent_manager.build_component_disposed(self, True) self.parent_manager.build_component_disposed(self, True)
# Remove the job reference.
self._current_job = None
@trollius.coroutine
def cancel_build(self):
self.parent_manager.build_component_disposed(self, True)
self._current_job = None
yield From(self._set_status(ComponentStatus.RUNNING))

View file

@ -1,15 +0,0 @@
import re
def extract_current_step(current_status_string):
""" Attempts to extract the current step numeric identifier from the given status string. Returns the step
number or None if none.
"""
# Older format: `Step 12 :`
# Newer format: `Step 4/13 :`
step_increment = re.search(r'Step ([0-9]+)/([0-9]+) :', current_status_string)
if step_increment:
return int(step_increment.group(1))
step_increment = re.search(r'Step ([0-9]+) :', current_status_string)
if step_increment:
return int(step_increment.group(1))

View file

@ -1,36 +0,0 @@
import pytest
from buildman.component.buildcomponent import BuildComponent
@pytest.mark.parametrize('input,expected_path,expected_file', [
("", "/", "Dockerfile"),
("/", "/", "Dockerfile"),
("/Dockerfile", "/", "Dockerfile"),
("/server.Dockerfile", "/", "server.Dockerfile"),
("/somepath", "/somepath", "Dockerfile"),
("/somepath/", "/somepath", "Dockerfile"),
("/somepath/Dockerfile", "/somepath", "Dockerfile"),
("/somepath/server.Dockerfile", "/somepath", "server.Dockerfile"),
("/somepath/some_other_path", "/somepath/some_other_path", "Dockerfile"),
("/somepath/some_other_path/", "/somepath/some_other_path", "Dockerfile"),
("/somepath/some_other_path/Dockerfile", "/somepath/some_other_path", "Dockerfile"),
("/somepath/some_other_path/server.Dockerfile", "/somepath/some_other_path", "server.Dockerfile"),
])
def test_path_is_dockerfile(input, expected_path, expected_file):
actual_path, actual_file = BuildComponent.name_and_path(input)
assert actual_path == expected_path
assert actual_file == expected_file
@pytest.mark.parametrize('build_config,context,dockerfile_path', [
({}, '', ''),
({'build_subdir': '/builddir/Dockerfile'}, '', '/builddir/Dockerfile'),
({'context': '/builddir'}, '/builddir', ''),
({'context': '/builddir', 'build_subdir': '/builddir/Dockerfile'}, '/builddir', 'Dockerfile'),
({'context': '/some_other_dir/Dockerfile', 'build_subdir': '/builddir/Dockerfile'}, '/builddir', 'Dockerfile'),
({'context': '/', 'build_subdir':'Dockerfile'}, '/', 'Dockerfile')
])
def test_extract_dockerfile_args(build_config, context, dockerfile_path):
actual_context, actual_dockerfile_path = BuildComponent.extract_dockerfile_args(build_config)
assert context == actual_context
assert dockerfile_path == actual_dockerfile_path

View file

@ -1,16 +0,0 @@
import pytest
from buildman.component.buildparse import extract_current_step
@pytest.mark.parametrize('input,expected_step', [
("", None),
("Step a :", None),
("Step 1 :", 1),
("Step 1 : ", 1),
("Step 1/2 : ", 1),
("Step 2/17 : ", 2),
("Step 4/13 : ARG somearg=foo", 4),
])
def test_extract_current_step(input, expected_step):
assert extract_current_step(input) == expected_step

View file

@ -1,13 +1,10 @@
import json import json
import logging import logging
from app import app from cachetools import lru_cache
from cachetools.func import lru_cache from endpoints.notificationhelper import spawn_notification
from notifications import spawn_notification
from data import model from data import model
from data.registry_model import registry_model from util.imagetree import ImageTree
from data.registry_model.datatypes import RepositoryReference
from data.database import UseThenDisconnect
from util.morecollections import AttrDict from util.morecollections import AttrDict
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -17,7 +14,6 @@ class BuildJobLoadException(Exception):
""" Exception raised if a build job could not be instantiated for some reason. """ """ Exception raised if a build job could not be instantiated for some reason. """
pass pass
class BuildJob(object): class BuildJob(object):
""" Represents a single in-progress build job. """ """ Represents a single in-progress build job. """
def __init__(self, job_item): def __init__(self, job_item):
@ -25,10 +21,9 @@ class BuildJob(object):
try: try:
self.job_details = json.loads(job_item.body) self.job_details = json.loads(job_item.body)
self.build_notifier = BuildJobNotifier(self.build_uuid)
except ValueError: except ValueError:
raise BuildJobLoadException( raise BuildJobLoadException(
'Could not parse build queue item config with ID %s' % self.job_details['build_uuid'] 'Could not parse build queue item config with ID %s' % self.job_details['build_uuid']
) )
@property @property
@ -39,16 +34,43 @@ class BuildJob(object):
return self.job_item.retries_remaining > 0 return self.job_item.retries_remaining > 0
def send_notification(self, kind, error_message=None, image_id=None, manifest_digests=None): def send_notification(self, kind, error_message=None, image_id=None, manifest_digests=None):
self.build_notifier.send_notification(kind, error_message, image_id, manifest_digests) tags = self.build_config.get('docker_tags', ['latest'])
event_data = {
'build_id': self.repo_build.uuid,
'build_name': self.repo_build.display_name,
'docker_tags': tags,
'trigger_id': self.repo_build.trigger.uuid,
'trigger_kind': self.repo_build.trigger.service.name,
'trigger_metadata': self.build_config.get('trigger_metadata', {})
}
if image_id is not None:
event_data['image_id'] = image_id
if manifest_digests:
event_data['manifest_digests'] = manifest_digests
if error_message is not None:
event_data['error_message'] = error_message
# TODO(jzelinskie): remove when more endpoints have been converted to using
# interfaces
repo = AttrDict({
'namespace_name': self.repo_build.repository.namespace_user.username,
'name': self.repo_build.repository.name,
})
spawn_notification(repo, kind, event_data,
subpage='build/%s' % self.repo_build.uuid,
pathargs=['build', self.repo_build.uuid])
@lru_cache(maxsize=1) @lru_cache(maxsize=1)
def _load_repo_build(self): def _load_repo_build(self):
with UseThenDisconnect(app.config): try:
try: return model.build.get_repository_build(self.build_uuid)
return model.build.get_repository_build(self.build_uuid) except model.InvalidRepositoryBuildException:
except model.InvalidRepositoryBuildException: raise BuildJobLoadException(
raise BuildJobLoadException( 'Could not load repository build with ID %s' % self.build_uuid)
'Could not load repository build with ID %s' % self.build_uuid)
@property @property
def build_uuid(self): def build_uuid(self):
@ -78,7 +100,7 @@ class BuildJob(object):
if not self.repo_build.resource_key: if not self.repo_build.resource_key:
return '' return ''
return user_files.get_file_url(self.repo_build.resource_key, '127.0.0.1', requires_cors=False) return user_files.get_file_url(self.repo_build.resource_key, requires_cors=False)
@property @property
def pull_credentials(self): def pull_credentials(self):
@ -96,88 +118,67 @@ class BuildJob(object):
def determine_cached_tag(self, base_image_id=None, cache_comments=None): def determine_cached_tag(self, base_image_id=None, cache_comments=None):
""" Returns the tag to pull to prime the cache or None if none. """ """ Returns the tag to pull to prime the cache or None if none. """
cached_tag = self._determine_cached_tag_by_tag() cached_tag = None
if base_image_id and cache_comments:
cached_tag = self._determine_cached_tag_by_comments(base_image_id, cache_comments)
if not cached_tag:
cached_tag = self._determine_cached_tag_by_tag()
logger.debug('Determined cached tag %s for %s: %s', cached_tag, base_image_id, cache_comments) logger.debug('Determined cached tag %s for %s: %s', cached_tag, base_image_id, cache_comments)
return cached_tag return cached_tag
def _determine_cached_tag_by_comments(self, base_image_id, cache_commands):
""" Determines the tag to use for priming the cache for this build job, by matching commands
starting at the given base_image_id. This mimics the Docker cache checking, so it should,
in theory, provide "perfect" caching.
"""
# Lookup the base image in the repository. If it doesn't exist, nothing more to do.
repo_build = self.repo_build
repo_namespace = repo_build.repository.namespace_user.username
repo_name = repo_build.repository.name
base_image = model.image.get_image(repo_build.repository, base_image_id)
if base_image is None:
return None
# Build an in-memory tree of the full heirarchy of images in the repository.
all_images = model.image.get_repository_images_without_placements(repo_build.repository,
with_ancestor=base_image)
all_tags = model.tag.list_repository_tags(repo_namespace, repo_name)
tree = ImageTree(all_images, all_tags, base_filter=base_image.id)
# Find a path in the tree, starting at the base image, that matches the cache comments
# or some subset thereof.
def checker(step, image):
if step >= len(cache_commands):
return False
full_command = '["/bin/sh", "-c", "%s"]' % cache_commands[step]
logger.debug('Checking step #%s: %s, %s == %s', step, image.id, image.command, full_command)
return image.command == full_command
path = tree.find_longest_path(base_image.id, checker)
if not path:
return None
# Find any tag associated with the last image in the path.
return tree.tag_containing_image(path[-1])
def _determine_cached_tag_by_tag(self): def _determine_cached_tag_by_tag(self):
""" Determines the cached tag by looking for one of the tags being built, and seeing if it """ Determines the cached tag by looking for one of the tags being built, and seeing if it
exists in the repository. This is a fallback for when no comment information is available. exists in the repository. This is a fallback for when no comment information is available.
""" """
with UseThenDisconnect(app.config): tags = self.build_config.get('docker_tags', ['latest'])
tags = self.build_config.get('docker_tags', ['latest']) repository = self.repo_build.repository
repository = RepositoryReference.for_repo_obj(self.repo_build.repository) existing_tags = model.tag.list_repository_tags(repository.namespace_user.username,
matching_tag = registry_model.find_matching_tag(repository, tags) repository.name)
if matching_tag is not None: cached_tags = set(tags) & set([tag.name for tag in existing_tags])
return matching_tag.name if cached_tags:
return list(cached_tags)[0]
most_recent_tag = registry_model.get_most_recent_tag(repository) return None
if most_recent_tag is not None:
return most_recent_tag.name
return None
class BuildJobNotifier(object):
""" A class for sending notifications to a job that only relies on the build_uuid """
def __init__(self, build_uuid):
self.build_uuid = build_uuid
@property
def repo_build(self):
return self._load_repo_build()
@lru_cache(maxsize=1)
def _load_repo_build(self):
try:
return model.build.get_repository_build(self.build_uuid)
except model.InvalidRepositoryBuildException:
raise BuildJobLoadException(
'Could not load repository build with ID %s' % self.build_uuid)
@property
def build_config(self):
try:
return json.loads(self.repo_build.job_config)
except ValueError:
raise BuildJobLoadException(
'Could not parse repository build job config with ID %s' % self.repo_build.uuid
)
def send_notification(self, kind, error_message=None, image_id=None, manifest_digests=None):
with UseThenDisconnect(app.config):
tags = self.build_config.get('docker_tags', ['latest'])
trigger = self.repo_build.trigger
if trigger is not None and trigger.id is not None:
trigger_kind = trigger.service.name
else:
trigger_kind = None
event_data = {
'build_id': self.repo_build.uuid,
'build_name': self.repo_build.display_name,
'docker_tags': tags,
'trigger_id': trigger.uuid if trigger is not None else None,
'trigger_kind': trigger_kind,
'trigger_metadata': self.build_config.get('trigger_metadata', {})
}
if image_id is not None:
event_data['image_id'] = image_id
if manifest_digests:
event_data['manifest_digests'] = manifest_digests
if error_message is not None:
event_data['error_message'] = error_message
# TODO: remove when more endpoints have been converted to using
# interfaces
repo = AttrDict({
'namespace_name': self.repo_build.repository.namespace_user.username,
'name': self.repo_build.repository.name,
})
spawn_notification(repo, kind, event_data,
subpage='build/%s' % self.repo_build.uuid,
pathargs=['build', self.repo_build.uuid])

View file

@ -1,17 +1,12 @@
from data.database import BUILD_PHASE
from data import model
from redis import RedisError
import datetime import datetime
import logging import logging
from redis import RedisError
from trollius import From, Return, coroutine
from data.database import BUILD_PHASE
from data import model
from buildman.asyncutil import AsyncWrapper
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class StatusHandler(object): class StatusHandler(object):
""" Context wrapper for writing status to build logs. """ """ Context wrapper for writing status to build logs. """
@ -19,70 +14,65 @@ class StatusHandler(object):
self._current_phase = None self._current_phase = None
self._current_command = None self._current_command = None
self._uuid = repository_build_uuid self._uuid = repository_build_uuid
self._build_logs = AsyncWrapper(build_logs) self._build_logs = build_logs
self._sync_build_logs = build_logs
self._build_model = AsyncWrapper(model.build)
self._status = { self._status = {
'total_commands': 0, 'total_commands': 0,
'current_command': None, 'current_command': None,
'push_completion': 0.0, 'push_completion': 0.0,
'pull_completion': 0.0, 'pull_completion': 0.0,
} }
# Write the initial status. # Write the initial status.
self.__exit__(None, None, None) self.__exit__(None, None, None)
@coroutine
def _append_log_message(self, log_message, log_type=None, log_data=None): def _append_log_message(self, log_message, log_type=None, log_data=None):
log_data = log_data or {} log_data = log_data or {}
log_data['datetime'] = str(datetime.datetime.now()) log_data['datetime'] = str(datetime.datetime.now())
try: try:
yield From(self._build_logs.append_log_message(self._uuid, log_message, log_type, log_data)) self._build_logs.append_log_message(self._uuid, log_message, log_type, log_data)
except RedisError: except RedisError:
logger.exception('Could not save build log for build %s: %s', self._uuid, log_message) logger.exception('Could not save build log for build %s: %s', self._uuid, log_message)
@coroutine
def append_log(self, log_message, extra_data=None): def append_log(self, log_message, extra_data=None):
if log_message is None: if log_message is None:
return return
yield From(self._append_log_message(log_message, log_data=extra_data)) self._append_log_message(log_message, log_data=extra_data)
@coroutine
def set_command(self, command, extra_data=None): def set_command(self, command, extra_data=None):
if self._current_command == command: if self._current_command == command:
raise Return() return
self._current_command = command self._current_command = command
yield From(self._append_log_message(command, self._build_logs.COMMAND, extra_data)) self._append_log_message(command, self._build_logs.COMMAND, extra_data)
@coroutine
def set_error(self, error_message, extra_data=None, internal_error=False, requeued=False): def set_error(self, error_message, extra_data=None, internal_error=False, requeued=False):
error_phase = BUILD_PHASE.INTERNAL_ERROR if internal_error and requeued else BUILD_PHASE.ERROR self.set_phase(BUILD_PHASE.INTERNAL_ERROR if internal_error and requeued else BUILD_PHASE.ERROR)
yield From(self.set_phase(error_phase))
extra_data = extra_data or {} extra_data = extra_data or {}
extra_data['internal_error'] = internal_error extra_data['internal_error'] = internal_error
yield From(self._append_log_message(error_message, self._build_logs.ERROR, extra_data)) self._append_log_message(error_message, self._build_logs.ERROR, extra_data)
@coroutine
def set_phase(self, phase, extra_data=None): def set_phase(self, phase, extra_data=None):
if phase == self._current_phase: if phase == self._current_phase:
raise Return(False) return False
self._current_phase = phase self._current_phase = phase
yield From(self._append_log_message(phase, self._build_logs.PHASE, extra_data)) self._append_log_message(phase, self._build_logs.PHASE, extra_data)
# Update the repository build with the new phase # Update the repository build with the new phase
raise Return(self._build_model.update_phase_then_close(self._uuid, phase)) repo_build = model.build.get_repository_build(self._uuid)
repo_build.phase = phase
repo_build.save()
return True
def __enter__(self): def __enter__(self):
return self._status return self._status
def __exit__(self, exc_type, value, traceback): def __exit__(self, exc_type, value, traceback):
try: try:
self._sync_build_logs.set_status(self._uuid, self._status) self._build_logs.set_status(self._uuid, self._status)
except RedisError: except RedisError:
logger.exception('Could not set status of build %s to %s', self._uuid, self._status) logger.exception('Could not set status of build %s to %s', self._uuid, self._status)

View file

@ -5,91 +5,79 @@ class WorkerError(object):
self._base_message = base_message self._base_message = base_message
self._error_handlers = { self._error_handlers = {
'io.quay.builder.buildpackissue': { 'io.quay.builder.buildpackissue': {
'message': 'Could not load build package', 'message': 'Could not load build package',
'is_internal': True, 'is_internal': True,
}, },
'io.quay.builder.gitfailure': { 'io.quay.builder.gitfailure': {
'message': 'Could not clone git repository', 'message': 'Could not clone git repository',
'show_base_error': True, 'show_base_error': True,
}, },
'io.quay.builder.gitcheckout': { 'io.quay.builder.gitcheckout': {
'message': 'Could not checkout git ref. If you force pushed recently, ' + 'message': 'Could not checkout git ref. Have you force pushed recently?',
'the commit may be missing.', },
'show_base_error': True,
},
'io.quay.builder.cannotextractbuildpack': { 'io.quay.builder.cannotextractbuildpack': {
'message': 'Could not extract the contents of the build package' 'message': 'Could not extract the contents of the build package'
}, },
'io.quay.builder.cannotpullforcache': { 'io.quay.builder.cannotpullforcache': {
'message': 'Could not pull cached image', 'message': 'Could not pull cached image',
'is_internal': True 'is_internal': True
}, },
'io.quay.builder.dockerfileissue': { 'io.quay.builder.dockerfileissue': {
'message': 'Could not find or parse Dockerfile', 'message': 'Could not find or parse Dockerfile',
'show_base_error': True 'show_base_error': True
}, },
'io.quay.builder.cannotpullbaseimage': { 'io.quay.builder.cannotpullbaseimage': {
'message': 'Could not pull base image', 'message': 'Could not pull base image',
'show_base_error': True 'show_base_error': True
}, },
'io.quay.builder.internalerror': { 'io.quay.builder.internalerror': {
'message': 'An internal error occurred while building. Please submit a ticket.', 'message': 'An internal error occurred while building. Please submit a ticket.',
'is_internal': True 'is_internal': True
}, },
'io.quay.builder.buildrunerror': { 'io.quay.builder.buildrunerror': {
'message': 'Could not start the build process', 'message': 'Could not start the build process',
'is_internal': True 'is_internal': True
}, },
'io.quay.builder.builderror': { 'io.quay.builder.builderror': {
'message': 'A build step failed', 'message': 'A build step failed',
'show_base_error': True 'show_base_error': True
}, },
'io.quay.builder.tagissue': { 'io.quay.builder.tagissue': {
'message': 'Could not tag built image', 'message': 'Could not tag built image',
'is_internal': True 'is_internal': True
}, },
'io.quay.builder.pushissue': { 'io.quay.builder.pushissue': {
'message': 'Could not push built image', 'message': 'Could not push built image',
'show_base_error': True, 'show_base_error': True,
'is_internal': True 'is_internal': True
}, },
'io.quay.builder.dockerconnecterror': { 'io.quay.builder.dockerconnecterror': {
'message': 'Could not connect to Docker daemon', 'message': 'Could not connect to Docker daemon',
'is_internal': True 'is_internal': True
}, },
'io.quay.builder.missingorinvalidargument': { 'io.quay.builder.missingorinvalidargument': {
'message': 'Missing required arguments for builder', 'message': 'Missing required arguments for builder',
'is_internal': True 'is_internal': True
}, },
'io.quay.builder.cachelookupissue': { 'io.quay.builder.cachelookupissue': {
'message': 'Error checking for a cached tag', 'message': 'Error checking for a cached tag',
'is_internal': True 'is_internal': True
}, }
'io.quay.builder.errorduringphasetransition': {
'message': 'Error during phase transition. If this problem persists ' +
'please contact customer support.',
'is_internal': True
},
'io.quay.builder.clientrejectedtransition': {
'message': 'Build can not be finished due to user cancellation.',
}
} }
def is_internal_error(self): def is_internal_error(self):
@ -110,10 +98,10 @@ class WorkerError(object):
def extra_data(self): def extra_data(self):
if self._base_message: if self._base_message:
return { return {
'base_error': self._base_message, 'base_error': self._base_message,
'error_code': self._error_code 'error_code': self._error_code
} }
return { return {
'error_code': self._error_code 'error_code': self._error_code
} }

View file

@ -17,7 +17,7 @@ class BaseManager(object):
every few minutes. """ every few minutes. """
self.job_heartbeat_callback(build_job) self.job_heartbeat_callback(build_job)
def overall_setup_time(self): def setup_time(self):
""" Returns the number of seconds that the build system should wait before allowing the job """ Returns the number of seconds that the build system should wait before allowing the job
to be picked up again after called 'schedule'. to be picked up again after called 'schedule'.
""" """
@ -58,7 +58,7 @@ class BaseManager(object):
@coroutine @coroutine
def job_completed(self, build_job, job_status, build_component): def job_completed(self, build_job, job_status, build_component):
""" Method invoked once a job_item has completed, in some manner. The job_status will be """ Method invoked once a job_item has completed, in some manner. The job_status will be
one of: incomplete, error, complete. Implementations of this method should call coroutine one of: incomplete, error, complete. Implementations of this method should call
self.job_complete_callback with a status of Incomplete if they wish for the job to be self.job_complete_callback with a status of Incomplete if they wish for the job to be
automatically requeued. automatically requeued.
""" """

View file

@ -1,27 +0,0 @@
import logging
from buildman.manager.orchestrator_canceller import OrchestratorCanceller
from buildman.manager.noop_canceller import NoopCanceller
logger = logging.getLogger(__name__)
CANCELLERS = {'ephemeral': OrchestratorCanceller}
class BuildCanceller(object):
""" A class to manage cancelling a build """
def __init__(self, app=None):
self.build_manager_config = app.config.get('BUILD_MANAGER')
if app is None or self.build_manager_config is None:
self.handler = NoopCanceller()
else:
self.handler = None
def try_cancel_build(self, uuid):
""" A method to kill a running build """
if self.handler is None:
canceller = CANCELLERS.get(self.build_manager_config[0], NoopCanceller)
self.handler = canceller(self.build_manager_config[1])
return self.handler.try_cancel_build(uuid)

View file

@ -45,7 +45,7 @@ class EnterpriseManager(BaseManager):
# production, build workers in enterprise are long-lived and register dynamically. # production, build workers in enterprise are long-lived and register dynamically.
self.register_component(REGISTRATION_REALM, DynamicRegistrationComponent) self.register_component(REGISTRATION_REALM, DynamicRegistrationComponent)
def overall_setup_time(self): def setup_time(self):
# Builders are already registered, so the setup time should be essentially instant. We therefore # Builders are already registered, so the setup time should be essentially instant. We therefore
# only return a minute here. # only return a minute here.
return 60 return 60
@ -79,7 +79,7 @@ class EnterpriseManager(BaseManager):
@coroutine @coroutine
def job_completed(self, build_job, job_status, build_component): def job_completed(self, build_job, job_status, build_component):
yield From(self.job_complete_callback(build_job, job_status)) self.job_complete_callback(build_job, job_status)
def build_component_disposed(self, build_component, timed_out): def build_component_disposed(self, build_component, timed_out):
self.all_components.remove(build_component) self.all_components.remove(build_component)

View file

@ -1,49 +1,56 @@
import logging import logging
import etcd
import uuid import uuid
import calendar import calendar
import os.path
import json import json
import time import time
from collections import namedtuple from collections import namedtuple
from datetime import datetime, timedelta from datetime import datetime, timedelta
from six import iteritems from trollius import From, coroutine, Return, async
from concurrent.futures import ThreadPoolExecutor
from trollius import From, coroutine, Return, async, sleep from urllib3.exceptions import ReadTimeoutError, ProtocolError
from app import metric_queue from app import metric_queue
from buildman.orchestrator import (orchestrator_from_config, KeyEvent,
OrchestratorError, OrchestratorConnectionError,
ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION)
from buildman.manager.basemanager import BaseManager from buildman.manager.basemanager import BaseManager
from buildman.manager.executor import PopenExecutor, EC2Executor, KubernetesExecutor from buildman.manager.executor import PopenExecutor, EC2Executor, KubernetesExecutor
from buildman.component.buildcomponent import BuildComponent from buildman.component.buildcomponent import BuildComponent
from buildman.jobutil.buildjob import BuildJob from buildman.jobutil.buildjob import BuildJob
from buildman.asyncutil import AsyncWrapper
from buildman.server import BuildJobResult from buildman.server import BuildJobResult
from util import slash_join
from util.morecollections import AttrDict from util.morecollections import AttrDict
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
JOB_PREFIX = 'building/' ETCD_MAX_WATCH_TIMEOUT = 30
LOCK_PREFIX = 'lock/' ETCD_ATOMIC_OP_TIMEOUT = 10000
REALM_PREFIX = 'realm/' RETRY_IMMEDIATELY_TIMEOUT = 0
CANCEL_PREFIX = 'cancel/' NO_WORKER_AVAILABLE_TIMEOUT = 10
METRIC_PREFIX = 'metric/' DEFAULT_EPHEMERAL_API_TIMEOUT = 20
DEFAULT_EPHEMERAL_SETUP_TIMEOUT = 500
CANCELED_LOCK_PREFIX = slash_join(LOCK_PREFIX, 'job-cancelled')
EXPIRED_LOCK_PREFIX = slash_join(LOCK_PREFIX, 'job-expired')
EPHEMERAL_API_TIMEOUT = 20
EPHEMERAL_SETUP_TIMEOUT = 500
RETRY_IMMEDIATELY_SLEEP_DURATION = 0
TOO_MANY_WORKERS_SLEEP_DURATION = 10
class EtcdAction(object):
""" Enumeration of the various kinds of etcd actions we can observe via a watch. """
GET = 'get'
SET = 'set'
EXPIRE = 'expire'
UPDATE = 'update'
DELETE = 'delete'
CREATE = 'create'
COMPARE_AND_SWAP = 'compareAndSwap'
COMPARE_AND_DELETE = 'compareAndDelete'
BuildInfo = namedtuple('BuildInfo', ['component', 'build_job', 'execution_id', 'executor_name']) BuildInfo = namedtuple('BuildInfo', ['component', 'build_job', 'execution_id', 'executor_name'])
def _create_async_etcd_client(worker_threads=1, **kwargs):
client = etcd.Client(**kwargs)
async_executor = ThreadPoolExecutor(worker_threads)
return AsyncWrapper(client, executor=async_executor), async_executor
class EphemeralBuilderManager(BaseManager): class EphemeralBuilderManager(BaseManager):
""" Build manager implementation for the Enterprise Registry. """ """ Build manager implementation for the Enterprise Registry. """
@ -55,12 +62,23 @@ class EphemeralBuilderManager(BaseManager):
} }
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
self._etcd_client_creator = kwargs.pop('etcd_creator', _create_async_etcd_client)
super(EphemeralBuilderManager, self).__init__(*args, **kwargs) super(EphemeralBuilderManager, self).__init__(*args, **kwargs)
self._shutting_down = False self._shutting_down = False
self._manager_config = None self._manager_config = None
self._orchestrator = None self._async_thread_executor = None
self._etcd_client = None
self._etcd_realm_prefix = None
self._etcd_job_prefix = None
self._etcd_lock_prefix = None
self._etcd_metric_prefix = None
self._ephemeral_api_timeout = DEFAULT_EPHEMERAL_API_TIMEOUT
self._ephemeral_setup_timeout = DEFAULT_EPHEMERAL_SETUP_TIMEOUT
# The registered executors available for running jobs, in order. # The registered executors available for running jobs, in order.
self._ordered_executors = [] self._ordered_executors = []
@ -68,113 +86,156 @@ class EphemeralBuilderManager(BaseManager):
# The registered executors, mapped by their unique name. # The registered executors, mapped by their unique name.
self._executor_name_to_executor = {} self._executor_name_to_executor = {}
# Map of etcd keys being watched to the tasks watching them
self._watch_tasks = {}
# Map from builder component to its associated job. # Map from builder component to its associated job.
self._component_to_job = {} self._component_to_job = {}
# Map from build UUID to a BuildInfo tuple with information about the build. # Map from build UUID to a BuildInfo tuple with information about the build.
self._build_uuid_to_info = {} self._build_uuid_to_info = {}
def overall_setup_time(self): def _watch_etcd(self, etcd_key, change_coroutine_callback, start_index=None, recursive=True,
return EPHEMERAL_SETUP_TIMEOUT restarter=None):
watch_task_key = (etcd_key, recursive)
def callback_wrapper(changed_key_future):
new_index = start_index
etcd_result = None
if not changed_key_future.cancelled():
try:
etcd_result = changed_key_future.result()
existing_index = getattr(etcd_result, 'etcd_index', None)
new_index = etcd_result.modifiedIndex + 1
logger.debug('Got watch of key: %s%s at #%s with result: %s', etcd_key,
'*' if recursive else '', existing_index, etcd_result)
except ReadTimeoutError:
logger.debug('Read-timeout on etcd watch %s, rescheduling', etcd_key)
except etcd.EtcdEventIndexCleared:
# This happens if etcd2 has moved forward too fast for us to start watching
# at the index we retrieved. We therefore start a new watch at HEAD and
# (if specified) call the restarter method which should conduct a read and
# reset the state of the manager.
logger.exception('Etcd moved forward too quickly. Restarting watch cycle.')
new_index = None
if restarter is not None:
async(restarter())
except (KeyError, etcd.EtcdKeyError):
logger.debug('Etcd key already cleared: %s', etcd_key)
return
except etcd.EtcdException as eex:
# TODO(jschorr): This is a quick and dirty hack and should be replaced
# with a proper exception check.
if str(eex.message).find('Read timed out') >= 0:
logger.debug('Read-timeout on etcd watch %s, rescheduling', etcd_key)
else:
logger.exception('Exception on etcd watch: %s', etcd_key)
except ProtocolError:
logger.exception('Exception on etcd watch: %s', etcd_key)
if watch_task_key not in self._watch_tasks or self._watch_tasks[watch_task_key].done():
self._watch_etcd(etcd_key, change_coroutine_callback, start_index=new_index,
restarter=restarter)
if etcd_result:
async(change_coroutine_callback(etcd_result))
if not self._shutting_down:
logger.debug('Scheduling watch of key: %s%s at start index %s', etcd_key,
'*' if recursive else '', start_index)
watch_future = self._etcd_client.watch(etcd_key, recursive=recursive, index=start_index,
timeout=ETCD_MAX_WATCH_TIMEOUT)
watch_future.add_done_callback(callback_wrapper)
self._watch_tasks[watch_task_key] = async(watch_future)
@coroutine @coroutine
def _mark_job_incomplete(self, build_job, build_info): def _handle_job_change(self, etcd_result):
""" Marks a job as incomplete, in response to a failure to start or a timeout. """ """ Handler invoked whenever a job expires or is deleted in etcd. """
executor_name = build_info.executor_name if etcd_result is None:
execution_id = build_info.execution_id
logger.warning('Build executor failed to successfully boot with execution id %s',
execution_id)
# Take a lock to ensure that only one manager reports the build as incomplete for this
# execution.
lock_key = slash_join(self._expired_lock_prefix, build_job.build_uuid, execution_id)
acquired_lock = yield From(self._orchestrator.lock(lock_key))
if acquired_lock:
try:
# Clean up the bookkeeping for the job.
yield From(self._orchestrator.delete_key(self._job_key(build_job)))
except KeyError:
logger.debug('Could not delete job key %s; might have been removed already',
build_job.build_uuid)
logger.error('[BUILD INTERNAL ERROR] Build ID: %s. Exec name: %s. Exec ID: %s',
build_job.build_uuid, executor_name, execution_id)
yield From(self.job_complete_callback(build_job, BuildJobResult.INCOMPLETE, executor_name,
update_phase=True))
else:
logger.debug('Did not get lock for job-expiration for job %s', build_job.build_uuid)
@coroutine
def _job_callback(self, key_change):
"""
This is the callback invoked when keys related to jobs are changed.
It ignores all events related to the creation of new jobs.
Deletes or expirations cause checks to ensure they've been properly marked as completed.
:param key_change: the event and value produced by a key changing in the orchestrator
:type key_change: :class:`KeyChange`
"""
if key_change.event in (KeyEvent.CREATE, KeyEvent.SET):
raise Return() raise Return()
elif key_change.event in (KeyEvent.DELETE, KeyEvent.EXPIRE): if etcd_result.action in (EtcdAction.CREATE, EtcdAction.SET):
raise Return()
elif etcd_result.action in (EtcdAction.DELETE, EtcdAction.EXPIRE):
# Handle the expiration/deletion. # Handle the expiration/deletion.
job_metadata = json.loads(key_change.value) job_metadata = json.loads(etcd_result._prev_node.value)
build_job = BuildJob(AttrDict(job_metadata['job_queue_item'])) build_job = BuildJob(AttrDict(job_metadata['job_queue_item']))
logger.debug('Got "%s" of job %s', key_change.event, build_job.build_uuid) logger.debug('Got "%s" of job %s', etcd_result.action, build_job.build_uuid)
# Get the build info. # Get the build info.
build_info = self._build_uuid_to_info.get(build_job.build_uuid, None) build_info = self._build_uuid_to_info.get(build_job.build_uuid, None)
if build_info is None: if build_info is None:
logger.debug('No build info for "%s" job %s (%s); probably already deleted by this manager', logger.debug('No build info for "%s" job %s (%s); probably already deleted by this manager',
key_change.event, build_job.build_uuid, job_metadata) etcd_result.action, build_job.build_uuid, job_metadata)
raise Return() raise Return()
if key_change.event != KeyEvent.EXPIRE: # If the etcd action was not an expiration, then it was already deleted by some manager and
# If the etcd action was not an expiration, then it was already deleted by some manager and # the execution was therefore already shutdown.
# the execution was therefore already shutdown. All that's left is to remove the build info. if etcd_result.action != EtcdAction.EXPIRE:
# Build information will no longer be needed; pop it off.
self._build_uuid_to_info.pop(build_job.build_uuid, None) self._build_uuid_to_info.pop(build_job.build_uuid, None)
raise Return() raise Return()
logger.debug('got expiration for job %s with metadata: %s', build_job.build_uuid, executor_name = build_info.executor_name
job_metadata) execution_id = build_info.execution_id
# If we have not yet received a heartbeat, then the node failed to boot in some way. We mark
# the job as incomplete here.
if not job_metadata.get('had_heartbeat', False): if not job_metadata.get('had_heartbeat', False):
# If we have not yet received a heartbeat, then the node failed to boot in some way. logger.warning('Build executor failed to successfully boot with execution id %s',
# We mark the job as incomplete here. execution_id)
yield From(self._mark_job_incomplete(build_job, build_info))
# Take a lock to ensure that only one manager reports the build as incomplete for this
# execution.
got_lock = yield From(self._take_etcd_atomic_lock('job-expired', build_job.build_uuid,
execution_id))
if got_lock:
logger.warning('Marking job %s as incomplete', build_job.build_uuid)
self.job_complete_callback(build_job, BuildJobResult.INCOMPLETE, executor_name,
update_phase=True)
# Finally, we terminate the build execution for the job. We don't do this under a lock as # Finally, we terminate the build execution for the job. We don't do this under a lock as
# terminating a node is an atomic operation; better to make sure it is terminated than not. # terminating a node is an atomic operation; better to make sure it is terminated than not.
logger.info('Terminating expired build executor for job %s with execution id %s', logger.info('Terminating expired build executor for job %s with execution id %s',
build_job.build_uuid, build_info.execution_id) build_job.build_uuid, execution_id)
yield From(self.kill_builder_executor(build_job.build_uuid)) yield From(self.kill_builder_executor(build_job.build_uuid))
else: else:
logger.warning('Unexpected KeyEvent (%s) on job key: %s', key_change.event, key_change.key) logger.warning('Unexpected action (%s) on job key: %s', etcd_result.action, etcd_result.key)
@coroutine @coroutine
def _realm_callback(self, key_change): def _handle_realm_change(self, etcd_result):
logger.debug('realm callback for key: %s', key_change.key) if etcd_result is None:
if key_change.event == KeyEvent.CREATE: raise Return()
# Listen on the realm created by ourselves or another worker.
realm_spec = json.loads(key_change.value) if etcd_result.action == EtcdAction.CREATE:
# We must listen on the realm created by ourselves or another worker
realm_spec = json.loads(etcd_result.value)
self._register_realm(realm_spec) self._register_realm(realm_spec)
elif key_change.event in (KeyEvent.DELETE, KeyEvent.EXPIRE): elif etcd_result.action in (EtcdAction.DELETE, EtcdAction.EXPIRE):
# Stop listening for new connections on the realm, if we did not get the connection. # We must stop listening for new connections on the specified realm, if we did not get the
realm_spec = json.loads(key_change.value) # connection
realm_spec = json.loads(etcd_result._prev_node.value)
realm_id = realm_spec['realm'] realm_id = realm_spec['realm']
build_job = BuildJob(AttrDict(realm_spec['job_queue_item'])) build_job = BuildJob(AttrDict(realm_spec['job_queue_item']))
build_uuid = build_job.build_uuid build_uuid = build_job.build_uuid
logger.debug('Realm key %s for build %s was %s', realm_id, build_uuid, key_change.event) logger.debug('Realm key %s for build %s was %s', realm_id, build_uuid, etcd_result.action)
build_info = self._build_uuid_to_info.get(build_uuid, None) build_info = self._build_uuid_to_info.get(build_uuid, None)
if build_info is not None: if build_info is not None:
# Pop off the component and if we find one, then the build has not connected to this # Pop the component off. If we find one, then the build has not connected to this manager,
# manager, so we can safely unregister its component. # so we can safely unregister its component.
component = self._component_to_job.pop(build_info.component, None) component = self._component_to_job.pop(build_info.component, None)
if component is not None: if component is not None:
# We were not the manager which the worker connected to, remove the bookkeeping for it # We were not the manager which the worker connected to, remove the bookkeeping for it
@ -182,23 +243,16 @@ class EphemeralBuilderManager(BaseManager):
self.unregister_component(build_info.component) self.unregister_component(build_info.component)
# If the realm has expired, then perform cleanup of the executor. # If the realm has expired, then perform cleanup of the executor.
if key_change.event == KeyEvent.EXPIRE: if etcd_result.action == EtcdAction.EXPIRE:
execution_id = realm_spec.get('execution_id', None) execution_id = realm_spec.get('execution_id', None)
executor_name = realm_spec.get('executor_name', 'EC2Executor') executor_name = realm_spec.get('executor_name', 'EC2Executor')
# Cleanup the job, since it never started.
logger.debug('Job %s for incomplete marking: %s', build_uuid, build_info)
if build_info is not None:
yield From(self._mark_job_incomplete(build_job, build_info))
# Cleanup the executor.
logger.info('Realm %s expired for job %s, terminating executor %s with execution id %s', logger.info('Realm %s expired for job %s, terminating executor %s with execution id %s',
realm_id, build_uuid, executor_name, execution_id) realm_id, build_uuid, executor_name, execution_id)
yield From(self.terminate_executor(executor_name, execution_id)) yield From(self.terminate_executor(executor_name, execution_id))
else: else:
logger.warning('Unexpected action (%s) on realm key: %s', key_change.event, key_change.key) logger.warning('Unexpected action (%s) on realm key: %s', etcd_result.action, etcd_result.key)
def _register_realm(self, realm_spec): def _register_realm(self, realm_spec):
logger.debug('Got call to register realm %s with manager', realm_spec['realm']) logger.debug('Got call to register realm %s with manager', realm_spec['realm'])
@ -228,19 +282,23 @@ class EphemeralBuilderManager(BaseManager):
@coroutine @coroutine
def _register_existing_realms(self): def _register_existing_realms(self):
try: try:
all_realms = yield From(self._orchestrator.get_prefixed_keys(self._realm_prefix)) all_realms = yield From(self._etcd_client.read(self._etcd_realm_prefix, recursive=True))
# Register all existing realms found. # Register all existing realms found.
encountered = {self._register_realm(json.loads(realm_data)) encountered = set()
for _realm, realm_data in all_realms} for realm in all_realms.children:
if not realm.dir:
component = self._register_realm(json.loads(realm.value))
encountered.add(component)
# Remove any components not encountered so we can clean up. # Remove any components not encountered so we can clean up.
for component, job in iteritems(self._component_to_job): for component, job in list(self._component_to_job.items()):
if not component in encountered: if not component in encountered:
self._component_to_job.pop(component, None) self._component_to_job.pop(component, None)
self._build_uuid_to_info.pop(job.build_uuid, None) self._build_uuid_to_info.pop(job.build_uuid, None)
except KeyError: except (KeyError, etcd.EtcdKeyError):
# no realms have been registered yet
pass pass
def _load_executor(self, executor_kind_name, executor_config): def _load_executor(self, executor_kind_name, executor_config):
@ -256,71 +314,6 @@ class EphemeralBuilderManager(BaseManager):
self._ordered_executors.append(executor) self._ordered_executors.append(executor)
self._executor_name_to_executor[executor.name] = executor self._executor_name_to_executor[executor.name] = executor
def _config_prefix(self, key):
if self._manager_config.get('ORCHESTRATOR') is None:
return key
prefix = self._manager_config.get('ORCHESTRATOR_PREFIX', '')
return slash_join(prefix, key).lstrip('/') + '/'
@property
def _job_prefix(self):
return self._config_prefix(JOB_PREFIX)
@property
def _realm_prefix(self):
return self._config_prefix(REALM_PREFIX)
@property
def _cancel_prefix(self):
return self._config_prefix(CANCEL_PREFIX)
@property
def _metric_prefix(self):
return self._config_prefix(METRIC_PREFIX)
@property
def _expired_lock_prefix(self):
return self._config_prefix(EXPIRED_LOCK_PREFIX)
@property
def _canceled_lock_prefix(self):
return self._config_prefix(CANCELED_LOCK_PREFIX)
def _metric_key(self, realm):
"""
Create a key which is used to track a job in the Orchestrator.
:param realm: realm for the build
:type realm: str
:returns: key used to track jobs
:rtype: str
"""
return slash_join(self._metric_prefix, realm)
def _job_key(self, build_job):
"""
Creates a key which is used to track a job in the Orchestrator.
:param build_job: unique job identifier for a build
:type build_job: str
:returns: key used to track the job
:rtype: str
"""
return slash_join(self._job_prefix, build_job.job_details['build_uuid'])
def _realm_key(self, realm):
"""
Create a key which is used to track an incoming connection on a realm.
:param realm: realm for the build
:type realm: str
:returns: key used to track the connection to the realm
:rtype: str
"""
return slash_join(self._realm_prefix, realm)
def initialize(self, manager_config): def initialize(self, manager_config):
logger.debug('Calling initialize') logger.debug('Calling initialize')
self._manager_config = manager_config self._manager_config = manager_config
@ -334,55 +327,93 @@ class EphemeralBuilderManager(BaseManager):
else: else:
self._load_executor(manager_config.get('EXECUTOR'), manager_config.get('EXECUTOR_CONFIG')) self._load_executor(manager_config.get('EXECUTOR'), manager_config.get('EXECUTOR_CONFIG'))
logger.debug('calling orchestrator_from_config') etcd_host = self._manager_config.get('ETCD_HOST', '127.0.0.1')
self._orchestrator = orchestrator_from_config(manager_config) etcd_port = self._manager_config.get('ETCD_PORT', 2379)
etcd_ca_cert = self._manager_config.get('ETCD_CA_CERT', None)
logger.debug('setting on_key_change callbacks for job, cancel, realm') etcd_auth = self._manager_config.get('ETCD_CERT_AND_KEY', None)
self._orchestrator.on_key_change(self._job_prefix, self._job_callback) if etcd_auth is not None:
self._orchestrator.on_key_change(self._cancel_prefix, self._cancel_callback) etcd_auth = tuple(etcd_auth) # Convert YAML list to a tuple
self._orchestrator.on_key_change(self._realm_prefix, self._realm_callback,
restarter=self._register_existing_realms) etcd_protocol = 'http' if etcd_auth is None else 'https'
logger.debug('Connecting to etcd on %s:%s', etcd_host, etcd_port)
worker_threads = self._manager_config.get('ETCD_WORKER_THREADS', 5)
(self._etcd_client, self._async_thread_executor) = self._etcd_client_creator(
worker_threads,
host=etcd_host,
port=etcd_port,
cert=etcd_auth,
ca_cert=etcd_ca_cert,
protocol=etcd_protocol,
read_timeout=5,
)
self._etcd_job_prefix = self._manager_config.get('ETCD_BUILDER_PREFIX', 'building/')
self._watch_etcd(self._etcd_job_prefix, self._handle_job_change)
self._etcd_realm_prefix = self._manager_config.get('ETCD_REALM_PREFIX', 'realm/')
self._watch_etcd(self._etcd_realm_prefix, self._handle_realm_change,
restarter=self._register_existing_realms)
self._etcd_lock_prefix = self._manager_config.get('ETCD_LOCK_PREFIX', 'lock/')
self._etcd_metric_prefix = self._manager_config.get('ETCD_METRIC_PREFIX', 'metric/')
self._ephemeral_api_timeout = self._manager_config.get('API_TIMEOUT',
DEFAULT_EPHEMERAL_API_TIMEOUT)
self._ephemeral_setup_timeout = self._manager_config.get('SETUP_TIMEOUT',
DEFAULT_EPHEMERAL_SETUP_TIMEOUT)
# Load components for all realms currently known to the cluster # Load components for all realms currently known to the cluster
async(self._register_existing_realms()) async(self._register_existing_realms())
def setup_time(self):
return self._ephemeral_setup_timeout
def shutdown(self): def shutdown(self):
logger.debug('Shutting down worker.') logger.debug('Shutting down worker.')
if self._orchestrator is not None: self._shutting_down = True
self._orchestrator.shutdown()
for (etcd_key, _), task in self._watch_tasks.items():
if not task.done():
logger.debug('Canceling watch task for %s', etcd_key)
task.cancel()
if self._async_thread_executor is not None:
logger.debug('Shutting down thread pool executor.')
self._async_thread_executor.shutdown()
@coroutine @coroutine
def schedule(self, build_job): def schedule(self, build_job):
build_uuid = build_job.job_details['build_uuid'] build_uuid = build_job.job_details['build_uuid']
logger.debug('Calling schedule with job: %s', build_uuid) logger.debug('Calling schedule with job: %s', build_uuid)
# Check if there are worker slots available by checking the number of jobs in the orchestrator # Check if there are worker slots available by checking the number of jobs in etcd
allowed_worker_count = self._manager_config.get('ALLOWED_WORKER_COUNT', 1) allowed_worker_count = self._manager_config.get('ALLOWED_WORKER_COUNT', 1)
try: try:
active_jobs = yield From(self._orchestrator.get_prefixed_keys(self._job_prefix)) active_jobs = yield From(self._etcd_client.read(self._etcd_job_prefix, recursive=True))
workers_alive = len(active_jobs) workers_alive = sum(1 for child in active_jobs.children if not child.dir)
except KeyError: except (KeyError, etcd.EtcdKeyError):
workers_alive = 0 workers_alive = 0
except OrchestratorConnectionError: except etcd.EtcdException:
logger.exception('Could not read job count from orchestrator for job due to orchestrator being down') logger.exception('Exception when reading job count from etcd for job: %s', build_uuid)
raise Return(False, ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION) raise Return(False, RETRY_IMMEDIATELY_TIMEOUT)
except OrchestratorError:
logger.exception('Exception when reading job count from orchestrator for job: %s', build_uuid)
raise Return(False, RETRY_IMMEDIATELY_SLEEP_DURATION)
logger.debug('Total jobs (scheduling job %s): %s', build_uuid, workers_alive) logger.debug('Total jobs (scheduling job %s): %s', build_uuid, workers_alive)
if workers_alive >= allowed_worker_count: if workers_alive >= allowed_worker_count:
logger.info('Too many workers alive, unable to start new worker for build job: %s. %s >= %s', logger.info('Too many workers alive, unable to start new worker for build job: %s. %s >= %s',
build_uuid, workers_alive, allowed_worker_count) build_uuid, workers_alive, allowed_worker_count)
raise Return(False, TOO_MANY_WORKERS_SLEEP_DURATION) raise Return(False, NO_WORKER_AVAILABLE_TIMEOUT)
job_key = self._job_key(build_job) job_key = self._etcd_job_key(build_job)
# First try to take a lock for this job, meaning we will be responsible for its lifeline # First try to take a lock for this job, meaning we will be responsible for its lifeline
realm = str(uuid.uuid4()) realm = str(uuid.uuid4())
token = str(uuid.uuid4()) token = str(uuid.uuid4())
nonce = str(uuid.uuid4()) nonce = str(uuid.uuid4())
setup_time = self.setup_time()
machine_max_expiration = self._manager_config.get('MACHINE_MAX_TIME', 7200) machine_max_expiration = self._manager_config.get('MACHINE_MAX_TIME', 7200)
max_expiration = datetime.utcnow() + timedelta(seconds=machine_max_expiration) max_expiration = datetime.utcnow() + timedelta(seconds=machine_max_expiration)
@ -396,22 +427,17 @@ class EphemeralBuilderManager(BaseManager):
lock_payload = json.dumps(payload) lock_payload = json.dumps(payload)
logger.debug('Writing key for job %s with expiration in %s seconds', build_uuid, logger.debug('Writing key for job %s with expiration in %s seconds', build_uuid,
EPHEMERAL_SETUP_TIMEOUT) self._ephemeral_setup_timeout)
try: try:
yield From(self._orchestrator.set_key(job_key, lock_payload, overwrite=False, yield From(self._etcd_client.write(job_key, lock_payload, prevExist=False,
expiration=EPHEMERAL_SETUP_TIMEOUT)) ttl=self._ephemeral_setup_timeout))
except KeyError: except (KeyError, etcd.EtcdKeyError):
logger.warning('Job: %s already exists in orchestrator, timeout may be misconfigured', # The job was already taken by someone else, we are probably a retry
build_uuid) logger.warning('Job: %s already exists in etcd, timeout may be misconfigured', build_uuid)
raise Return(False, EPHEMERAL_API_TIMEOUT) raise Return(False, self._ephemeral_api_timeout)
except OrchestratorConnectionError: except etcd.EtcdException:
logger.exception('Exception when writing job %s to orchestrator; could not connect', logger.exception('Exception when writing job %s to etcd', build_uuid)
build_uuid) raise Return(False, RETRY_IMMEDIATELY_TIMEOUT)
raise Return(False, ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION)
except OrchestratorError:
logger.exception('Exception when writing job %s to orchestrator', build_uuid)
raise Return(False, RETRY_IMMEDIATELY_SLEEP_DURATION)
# Got a lock, now lets boot the job via one of the registered executors. # Got a lock, now lets boot the job via one of the registered executors.
started_with_executor = None started_with_executor = None
@ -440,24 +466,12 @@ class EphemeralBuilderManager(BaseManager):
try: try:
execution_id = yield From(executor.start_builder(realm, token, build_uuid)) execution_id = yield From(executor.start_builder(realm, token, build_uuid))
except: except:
try:
metric_queue.build_start_failure.Inc(labelvalues=[executor.name])
metric_queue.put_deprecated(('ExecutorFailure-%s' % executor.name), 1, unit='Count')
except:
logger.exception('Exception when writing failure metric for execution %s for job %s',
execution_id, build_uuid)
logger.exception('Exception when starting builder for job: %s', build_uuid) logger.exception('Exception when starting builder for job: %s', build_uuid)
continue continue
try: try:
metric_queue.build_start_success.Inc(labelvalues=[executor.name]) metric_queue.put_deprecated('EphemeralBuilderStarted', 1, unit='Count')
except: metric_queue.ephemeral_build_workers.Inc(labelvalues=[execution_id, build_uuid])
logger.exception('Exception when writing success metric for execution %s for job %s',
execution_id, build_uuid)
try:
metric_queue.ephemeral_build_workers.Inc()
except: except:
logger.exception('Exception when writing start metrics for execution %s for job %s', logger.exception('Exception when writing start metrics for execution %s for job %s',
execution_id, build_uuid) execution_id, build_uuid)
@ -472,8 +486,12 @@ class EphemeralBuilderManager(BaseManager):
logger.error('Could not start ephemeral worker for build %s', build_uuid) logger.error('Could not start ephemeral worker for build %s', build_uuid)
# Delete the associated build job record. # Delete the associated build job record.
yield From(self._orchestrator.delete_key(job_key)) try:
raise Return(False, EPHEMERAL_API_TIMEOUT) yield From(self._etcd_client.delete(job_key))
except (KeyError, etcd.EtcdKeyError):
logger.warning('Could not delete job key %s', job_key)
raise Return(False, self._ephemeral_api_timeout)
# Job was started! # Job was started!
logger.debug('Started execution with ID %s for job: %s with executor: %s', logger.debug('Started execution with ID %s for job: %s with executor: %s',
@ -484,16 +502,14 @@ class EphemeralBuilderManager(BaseManager):
'executor_name': started_with_executor.name, 'executor_name': started_with_executor.name,
'start_time': time.time(), 'start_time': time.time(),
}) })
try: try:
yield From(self._orchestrator.set_key(self._metric_key(realm), metric_spec, overwrite=False, yield From(self._etcd_client.write(self._etcd_metric_key(realm), metric_spec, prevExist=False,
expiration=machine_max_expiration + 10)) ttl=machine_max_expiration + 10))
except KeyError: except (KeyError, etcd.EtcdKeyError):
logger.error('Realm %s already exists in orchestrator for job %s ' + logger.error('Realm %s already exists in etcd for job %s ' +
'UUID collision or something is very very wrong.', realm, build_uuid) 'UUID collision or something is very very wrong.', realm, build_uuid)
except OrchestratorError: except etcd.EtcdException:
logger.exception('Exception when writing realm %s to orchestrator for job %s', logger.exception('Exception when writing realm %s to etcd for job %s', realm, build_uuid)
realm, build_uuid)
# Store the realm spec which will allow any manager to accept this builder when it connects # Store the realm spec which will allow any manager to accept this builder when it connects
realm_spec = json.dumps({ realm_spec = json.dumps({
@ -505,34 +521,30 @@ class EphemeralBuilderManager(BaseManager):
}) })
try: try:
setup_time = started_with_executor.setup_time or self.overall_setup_time() yield From(self._etcd_client.write(self._etcd_realm_key(realm), realm_spec, prevExist=False,
logger.debug('Writing job key for job %s using executor %s with ID %s and ttl %s', build_uuid, ttl=setup_time))
started_with_executor.name, execution_id, setup_time) except (KeyError, etcd.EtcdKeyError):
yield From(self._orchestrator.set_key(self._realm_key(realm), realm_spec, logger.error('Realm %s already exists in etcd for job %s ' +
expiration=setup_time)) 'UUID collision or something is very very wrong.', realm, build_uuid)
except OrchestratorConnectionError: raise Return(False, setup_time)
logger.exception('Exception when writing realm %s to orchestrator for job %s', except etcd.EtcdException:
realm, build_uuid) logger.exception('Exception when writing realm %s to etcd for job %s', realm, build_uuid)
raise Return(False, ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION)
except OrchestratorError:
logger.exception('Exception when writing realm %s to orchestrator for job %s',
realm, build_uuid)
raise Return(False, setup_time) raise Return(False, setup_time)
logger.debug('Builder spawn complete for job %s using executor %s with ID %s ', logger.debug('Builder spawn complete for job %s using executor %s with ID %s ', build_uuid,
build_uuid, started_with_executor.name, execution_id) started_with_executor.name, execution_id)
raise Return(True, None) raise Return(True, None)
@coroutine @coroutine
def build_component_ready(self, build_component): def build_component_ready(self, build_component):
logger.debug('Got component ready for component with realm %s', build_component.builder_realm) logger.debug('Got component ready for component with realm %s', build_component.builder_realm)
# Pop off the job for the component. # Pop off the job for the component. We do so before we send out the etcd watch below,
# We do so before we send out the watch below, as it will also remove this mapping. # as it will also remove this mapping.
job = self._component_to_job.pop(build_component, None) job = self._component_to_job.pop(build_component, None)
if job is None: if job is None:
# This will occur once the build finishes, so no need to worry about it. # This will occur once the build finishes, so no need to worry about it. We log in case it
# We log in case it happens outside of the expected flow. # happens outside of the expected flow.
logger.debug('Could not find job for the build component on realm %s; component is ready', logger.debug('Could not find job for the build component on realm %s; component is ready',
build_component.builder_realm) build_component.builder_realm)
raise Return() raise Return()
@ -545,10 +557,10 @@ class EphemeralBuilderManager(BaseManager):
yield From(self._write_duration_metric(metric_queue.builder_time_to_build, yield From(self._write_duration_metric(metric_queue.builder_time_to_build,
build_component.builder_realm)) build_component.builder_realm))
# Clean up the bookkeeping for allowing any manager to take the job.
try: try:
yield From(self._orchestrator.delete_key(self._realm_key(build_component.builder_realm))) # Clean up the bookkeeping for allowing any manager to take the job.
except KeyError: yield From(self._etcd_client.delete(self._etcd_realm_key(build_component.builder_realm)))
except (KeyError, etcd.EtcdKeyError):
logger.warning('Could not delete realm key %s', build_component.builder_realm) logger.warning('Could not delete realm key %s', build_component.builder_realm)
def build_component_disposed(self, build_component, timed_out): def build_component_disposed(self, build_component, timed_out):
@ -566,32 +578,24 @@ class EphemeralBuilderManager(BaseManager):
# to ask for the phase to be updated as well. # to ask for the phase to be updated as well.
build_info = self._build_uuid_to_info.get(build_job.build_uuid, None) build_info = self._build_uuid_to_info.get(build_job.build_uuid, None)
executor_name = build_info.executor_name if build_info else None executor_name = build_info.executor_name if build_info else None
yield From(self.job_complete_callback(build_job, job_status, executor_name, update_phase=False)) self.job_complete_callback(build_job, job_status, executor_name, update_phase=False)
# Kill the ephemeral builder. # Kill the ephemeral builder.
yield From(self.kill_builder_executor(build_job.build_uuid)) yield From(self.kill_builder_executor(build_job.build_uuid))
# Delete the build job from the orchestrator. # Delete the build job from etcd.
job_key = self._etcd_job_key(build_job)
try: try:
job_key = self._job_key(build_job) yield From(self._etcd_client.delete(job_key))
yield From(self._orchestrator.delete_key(job_key)) except (KeyError, etcd.EtcdKeyError):
except KeyError:
logger.debug('Builder is asking for job to be removed, but work already completed') logger.debug('Builder is asking for job to be removed, but work already completed')
except OrchestratorConnectionError:
logger.exception('Could not remove job key as orchestrator is not available')
yield From(sleep(ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION))
raise Return()
# Delete the metric from the orchestrator. # Delete the metric from etcd
metric_key = self._etcd_metric_key(build_component.builder_realm)
try: try:
metric_key = self._metric_key(build_component.builder_realm) yield From(self._etcd_client.delete(metric_key))
yield From(self._orchestrator.delete_key(metric_key)) except (KeyError, etcd.EtcdKeyError):
except KeyError:
logger.debug('Builder is asking for metric to be removed, but key not found') logger.debug('Builder is asking for metric to be removed, but key not found')
except OrchestratorConnectionError:
logger.exception('Could not remove metric key as orchestrator is not available')
yield From(sleep(ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION))
raise Return()
logger.debug('job_completed for job %s with status: %s', build_job.build_uuid, job_status) logger.debug('job_completed for job %s with status: %s', build_job.build_uuid, job_status)
@ -622,24 +626,19 @@ class EphemeralBuilderManager(BaseManager):
@coroutine @coroutine
def job_heartbeat(self, build_job): def job_heartbeat(self, build_job):
""" # Extend the queue item.
:param build_job: the identifier for the build
:type build_job: str
"""
self.job_heartbeat_callback(build_job) self.job_heartbeat_callback(build_job)
self._extend_job_in_orchestrator(build_job)
@coroutine # Extend the deadline in etcd.
def _extend_job_in_orchestrator(self, build_job): job_key = self._etcd_job_key(build_job)
try: try:
job_data = yield From(self._orchestrator.get_key(self._job_key(build_job))) build_job_metadata_response = yield From(self._etcd_client.read(job_key))
except KeyError: except (KeyError, etcd.EtcdKeyError):
logger.info('Job %s no longer exists in the orchestrator', build_job.build_uuid) logger.info('Job %s no longer exists in etcd', build_job.build_uuid)
raise Return() raise Return()
except OrchestratorConnectionError:
logger.exception('failed to connect when attempted to extend job')
build_job_metadata = json.loads(job_data) build_job_metadata = json.loads(build_job_metadata_response.value)
max_expiration = datetime.utcfromtimestamp(build_job_metadata['max_expiration']) max_expiration = datetime.utcfromtimestamp(build_job_metadata['max_expiration'])
max_expiration_remaining = max_expiration - datetime.utcnow() max_expiration_remaining = max_expiration - datetime.utcnow()
@ -652,22 +651,34 @@ class EphemeralBuilderManager(BaseManager):
'had_heartbeat': True, 'had_heartbeat': True,
} }
# Note: A TTL of < 0 in etcd results in the key *never being expired*. We use a max here
# to ensure that if the TTL is < 0, the key will expire immediately.
etcd_ttl = max(ttl, 0)
yield From(self._etcd_client.write(job_key, json.dumps(payload), ttl=etcd_ttl))
@coroutine
def _take_etcd_atomic_lock(self, path, *args):
""" Takes a lock for atomic operations via etcd over the given path. Returns true if the lock
was granted and false otherwise.
"""
pieces = [self._etcd_lock_prefix, path]
pieces.extend(args)
lock_key = os.path.join(*pieces)
try: try:
yield From(self._orchestrator.set_key(self._job_key(build_job), json.dumps(payload), yield From(self._etcd_client.write(lock_key, {}, prevExist=False, ttl=ETCD_ATOMIC_OP_TIMEOUT))
expiration=ttl)) raise Return(True)
except OrchestratorConnectionError: except (KeyError, etcd.EtcdKeyError):
logger.exception('Could not update heartbeat for job as the orchestrator is not available') raise Return(False)
yield From(sleep(ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION))
@coroutine @coroutine
def _write_duration_metric(self, metric, realm): def _write_duration_metric(self, metric, realm):
""" """ Returns true if the metric was written and and false otherwise.
:returns: True if the metric was written, otherwise False
:rtype: bool
""" """
try: try:
metric_data = yield From(self._orchestrator.get_key(self._metric_key(realm))) metric_data = yield From(self._etcd_client.read(self._etcd_metric_key(realm)))
parsed_metric_data = json.loads(metric_data) parsed_metric_data = json.loads(metric_data.value)
start_time = parsed_metric_data['start_time'] start_time = parsed_metric_data['start_time']
metric.Observe(time.time() - start_time, metric.Observe(time.time() - start_time,
labelvalues=[parsed_metric_data.get('executor_name', labelvalues=[parsed_metric_data.get('executor_name',
@ -675,36 +686,22 @@ class EphemeralBuilderManager(BaseManager):
except Exception: except Exception:
logger.exception("Could not write metric for realm %s", realm) logger.exception("Could not write metric for realm %s", realm)
def num_workers(self): def _etcd_metric_key(self, realm):
""" Create a key which is used to track a job in etcd.
""" """
The number of workers we're managing locally. return os.path.join(self._etcd_metric_prefix, realm)
:returns: the number of the workers locally managed def _etcd_job_key(self, build_job):
:rtype: int """ Create a key which is used to track a job in etcd.
"""
return os.path.join(self._etcd_job_prefix, build_job.job_details['build_uuid'])
def _etcd_realm_key(self, realm):
""" Create a key which is used to track an incoming connection on a realm.
"""
return os.path.join(self._etcd_realm_prefix, realm)
def num_workers(self):
""" Return the number of workers we're managing locally.
""" """
return len(self._component_to_job) return len(self._component_to_job)
@coroutine
def _cancel_callback(self, key_change):
if key_change.event not in (KeyEvent.CREATE, KeyEvent.SET):
raise Return()
build_uuid = key_change.value
build_info = self._build_uuid_to_info.get(build_uuid, None)
if build_info is None:
logger.debug('No build info for "%s" job %s', key_change.event, build_uuid)
raise Return(False)
lock_key = slash_join(self._canceled_lock_prefix,
build_uuid, build_info.execution_id)
lock_acquired = yield From(self._orchestrator.lock(lock_key))
if lock_acquired:
builder_realm = build_info.component.builder_realm
yield From(self.kill_builder_executor(build_uuid))
yield From(self._orchestrator.delete_key(self._realm_key(builder_realm)))
yield From(self._orchestrator.delete_key(self._metric_key(builder_realm)))
yield From(self._orchestrator.delete_key(slash_join(self._job_prefix, build_uuid)))
# This is outside the lock so we can un-register the component wherever it is registered to.
yield From(build_info.component.cancel_build())

View file

@ -1,37 +0,0 @@
import logging
import etcd
logger = logging.getLogger(__name__)
class EtcdCanceller(object):
""" A class that sends a message to etcd to cancel a build """
def __init__(self, config):
etcd_host = config.get('ETCD_HOST', '127.0.0.1')
etcd_port = config.get('ETCD_PORT', 2379)
etcd_ca_cert = config.get('ETCD_CA_CERT', None)
etcd_auth = config.get('ETCD_CERT_AND_KEY', None)
if etcd_auth is not None:
etcd_auth = tuple(etcd_auth)
etcd_protocol = 'http' if etcd_auth is None else 'https'
logger.debug('Connecting to etcd on %s:%s', etcd_host, etcd_port)
self._cancel_prefix = config.get('ETCD_CANCEL_PREFIX', 'cancel/')
self._etcd_client = etcd.Client(
host=etcd_host,
port=etcd_port,
cert=etcd_auth,
ca_cert=etcd_ca_cert,
protocol=etcd_protocol,
read_timeout=5)
def try_cancel_build(self, build_uuid):
""" Writes etcd message to cancel build_uuid. """
logger.info("Cancelling build %s".format(build_uuid))
try:
self._etcd_client.write("{}{}".format(self._cancel_prefix, build_uuid), build_uuid, ttl=60)
return True
except etcd.EtcdException:
logger.exception("Failed to write to etcd client %s", build_uuid)
return False

View file

@ -1,29 +1,24 @@
import datetime
import hashlib
import logging import logging
import os import os
import socket
import subprocess
import threading
import uuid import uuid
import threading
from functools import partial
import boto.ec2 import boto.ec2
import cachetools.func
import requests import requests
import cachetools
import trollius import trollius
import datetime
import release
import socket
import hashlib
from container_cloud_config import CloudConfigContext
from jinja2 import FileSystemLoader, Environment from jinja2 import FileSystemLoader, Environment
from trollius import coroutine, From, Return, get_event_loop from trollius import coroutine, From, Return, get_event_loop
from functools import partial
import release
from buildman.asyncutil import AsyncWrapper from buildman.asyncutil import AsyncWrapper
from container_cloud_config import CloudConfigContext
from app import metric_queue, app from app import metric_queue, app
from util.metrics.metricqueue import duration_collector_async from util.metrics.metricqueue import duration_collector_async
from _init import ROOT_DIR
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -34,7 +29,7 @@ ONE_HOUR = 60*60
_TAG_RETRY_COUNT = 3 # Number of times to retry adding tags. _TAG_RETRY_COUNT = 3 # Number of times to retry adding tags.
_TAG_RETRY_SLEEP = 2 # Number of seconds to wait between tag retries. _TAG_RETRY_SLEEP = 2 # Number of seconds to wait between tag retries.
ENV = Environment(loader=FileSystemLoader(os.path.join(ROOT_DIR, "buildman/templates"))) ENV = Environment(loader=FileSystemLoader('buildman/templates'))
TEMPLATE = ENV.get_template('cloudconfig.yaml') TEMPLATE = ENV.get_template('cloudconfig.yaml')
CloudConfigContext().populate_jinja_environment(ENV) CloudConfigContext().populate_jinja_environment(ENV)
@ -60,13 +55,6 @@ class BuilderExecutor(object):
""" Name returns the unique name for this executor. """ """ Name returns the unique name for this executor. """
return self.executor_config.get('NAME') or self.__class__.__name__ return self.executor_config.get('NAME') or self.__class__.__name__
@property
def setup_time(self):
""" Returns the amount of time (in seconds) to wait for the execution to start for the build.
If None, the manager's default will be used.
"""
return self.executor_config.get('SETUP_TIME')
@coroutine @coroutine
def start_builder(self, realm, token, build_uuid): def start_builder(self, realm, token, build_uuid):
""" Create a builder with the specified config. Returns a unique id which can be used to manage """ Create a builder with the specified config. Returns a unique id which can be used to manage
@ -104,9 +92,8 @@ class BuilderExecutor(object):
none. """ none. """
return self.executor_config.get('MINIMUM_RETRY_THRESHOLD', 0) return self.executor_config.get('MINIMUM_RETRY_THRESHOLD', 0)
def generate_cloud_config(self, realm, token, build_uuid, coreos_channel, def generate_cloud_config(self, realm, token, coreos_channel, manager_hostname,
manager_hostname, quay_username=None, quay_username=None, quay_password=None):
quay_password=None):
if quay_username is None: if quay_username is None:
quay_username = self.executor_config['QUAY_USERNAME'] quay_username = self.executor_config['QUAY_USERNAME']
@ -114,20 +101,16 @@ class BuilderExecutor(object):
quay_password = self.executor_config['QUAY_PASSWORD'] quay_password = self.executor_config['QUAY_PASSWORD']
return TEMPLATE.render( return TEMPLATE.render(
realm=realm, realm=realm,
token=token, token=token,
build_uuid=build_uuid, quay_username=quay_username,
quay_username=quay_username, quay_password=quay_password,
quay_password=quay_password, manager_hostname=manager_hostname,
manager_hostname=manager_hostname, websocket_scheme=self.websocket_scheme,
websocket_scheme=self.websocket_scheme, coreos_channel=coreos_channel,
coreos_channel=coreos_channel, worker_tag=self.executor_config['WORKER_TAG'],
worker_image=self.executor_config.get('WORKER_IMAGE', 'quay.io/coreos/registry-build-worker'), logentries_token=self.executor_config.get('LOGENTRIES_TOKEN', None),
worker_tag=self.executor_config['WORKER_TAG'], volume_size=self.executor_config.get('VOLUME_SIZE', '42G'),
logentries_token=self.executor_config.get('LOGENTRIES_TOKEN', None),
volume_size=self.executor_config.get('VOLUME_SIZE', '42G'),
max_lifetime_s=self.executor_config.get('MAX_LIFETIME_S', 10800),
ssh_authorized_keys=self.executor_config.get('SSH_AUTHORIZED_KEYS', []),
) )
@ -151,7 +134,7 @@ class EC2Executor(BuilderExecutor):
)) ))
@classmethod @classmethod
@cachetools.func.ttl_cache(ttl=ONE_HOUR) @cachetools.ttl_cache(ttl=ONE_HOUR)
def _get_coreos_ami(cls, ec2_region, coreos_channel): def _get_coreos_ami(cls, ec2_region, coreos_channel):
""" Retrieve the CoreOS AMI id from the canonical listing. """ Retrieve the CoreOS AMI id from the canonical listing.
""" """
@ -170,7 +153,7 @@ class EC2Executor(BuilderExecutor):
get_ami_callable = partial(self._get_coreos_ami, region, channel) get_ami_callable = partial(self._get_coreos_ami, region, channel)
coreos_ami = yield From(self._loop.run_in_executor(None, get_ami_callable)) coreos_ami = yield From(self._loop.run_in_executor(None, get_ami_callable))
user_data = self.generate_cloud_config(realm, token, build_uuid, channel, self.manager_hostname) user_data = self.generate_cloud_config(realm, token, channel, self.manager_hostname)
logger.debug('Generated cloud config for build %s: %s', build_uuid, user_data) logger.debug('Generated cloud config for build %s: %s', build_uuid, user_data)
ec2_conn = self._get_conn() ec2_conn = self._get_conn()
@ -204,7 +187,8 @@ class EC2Executor(BuilderExecutor):
)) ))
except boto.exception.EC2ResponseError as ec2e: except boto.exception.EC2ResponseError as ec2e:
logger.exception('Unable to spawn builder instance') logger.exception('Unable to spawn builder instance')
metric_queue.ephemeral_build_worker_failure.Inc() metric_queue.put_deprecated('EC2BuildStartFailure', 1, unit='Count')
metric_queue.ephemeral_build_worker_failure.Inc(labelvalues=[build_uuid])
raise ec2e raise ec2e
if not reservation.instances: if not reservation.instances:
@ -273,6 +257,7 @@ class PopenExecutor(BuilderExecutor):
def start_builder(self, realm, token, build_uuid): def start_builder(self, realm, token, build_uuid):
# Now start a machine for this job, adding the machine id to the etcd information # Now start a machine for this job, adding the machine id to the etcd information
logger.debug('Forking process for build') logger.debug('Forking process for build')
import subprocess
ws_host = os.environ.get("BUILDMAN_WS_HOST", "localhost") ws_host = os.environ.get("BUILDMAN_WS_HOST", "localhost")
ws_port = os.environ.get("BUILDMAN_WS_PORT", "8787") ws_port = os.environ.get("BUILDMAN_WS_PORT", "8787")
@ -283,14 +268,10 @@ class PopenExecutor(BuilderExecutor):
'DOCKER_TLS_VERIFY': os.environ.get('DOCKER_TLS_VERIFY', ''), 'DOCKER_TLS_VERIFY': os.environ.get('DOCKER_TLS_VERIFY', ''),
'DOCKER_CERT_PATH': os.environ.get('DOCKER_CERT_PATH', ''), 'DOCKER_CERT_PATH': os.environ.get('DOCKER_CERT_PATH', ''),
'DOCKER_HOST': os.environ.get('DOCKER_HOST', ''), 'DOCKER_HOST': os.environ.get('DOCKER_HOST', ''),
'PATH': "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
} }
logpipe = LogPipe(logging.INFO) logpipe = LogPipe(logging.INFO)
spawned = subprocess.Popen(os.environ.get('BUILDER_BINARY_LOCATION', spawned = subprocess.Popen('/Users/jake/bin/quay-builder', stdout=logpipe, stderr=logpipe,
'/usr/local/bin/quay-builder'),
stdout=logpipe,
stderr=logpipe,
env=builder_env) env=builder_env)
builder_id = str(uuid.uuid4()) builder_id = str(uuid.uuid4())
@ -328,19 +309,13 @@ class KubernetesExecutor(BuilderExecutor):
tls_cert = self.executor_config.get('K8S_API_TLS_CERT') tls_cert = self.executor_config.get('K8S_API_TLS_CERT')
tls_key = self.executor_config.get('K8S_API_TLS_KEY') tls_key = self.executor_config.get('K8S_API_TLS_KEY')
tls_ca = self.executor_config.get('K8S_API_TLS_CA') tls_ca = self.executor_config.get('K8S_API_TLS_CA')
service_account_token = self.executor_config.get('SERVICE_ACCOUNT_TOKEN')
if 'timeout' not in request_options: if 'timeout' not in request_options:
request_options['timeout'] = self.executor_config.get("K8S_API_TIMEOUT", 20) request_options['timeout'] = self.executor_config.get("K8S_API_TIMEOUT", 20)
if service_account_token: if tls_cert and tls_key:
scheme = 'https'
request_options['headers'] = {'Authorization': 'Bearer ' + service_account_token}
logger.debug('Using service account token for Kubernetes authentication')
elif tls_cert and tls_key:
scheme = 'https' scheme = 'https'
request_options['cert'] = (tls_cert, tls_key) request_options['cert'] = (tls_cert, tls_key)
logger.debug('Using tls certificate and key for Kubernetes authentication')
if tls_ca: if tls_ca:
request_options['verify'] = tls_ca request_options['verify'] = tls_ca
else: else:
@ -361,75 +336,22 @@ class KubernetesExecutor(BuilderExecutor):
def _job_path(self, build_uuid): def _job_path(self, build_uuid):
return '%s/%s' % (self._jobs_path(), build_uuid) return '%s/%s' % (self._jobs_path(), build_uuid)
def _kubernetes_distribution(self): def _job_resource(self, build_uuid, user_data, coreos_channel='stable'):
return self.executor_config.get('KUBERNETES_DISTRIBUTION', 'basic').lower()
def _is_basic_kubernetes_distribution(self):
return self._kubernetes_distribution() == 'basic'
def _is_openshift_kubernetes_distribution(self):
return self._kubernetes_distribution() == 'openshift'
def _build_job_container_resources(self):
# Minimum acceptable free resources for this container to "fit" in a quota
# These may be lower than the absolute limits if the cluster is knowingly
# oversubscribed by some amount.
container_requests = {
'memory' : self.executor_config.get('CONTAINER_MEMORY_REQUEST', '3968Mi'),
}
container_limits = {
'memory' : self.executor_config.get('CONTAINER_MEMORY_LIMITS', '5120Mi'),
'cpu' : self.executor_config.get('CONTAINER_CPU_LIMITS', '1000m'),
}
resources = {
'requests': container_requests,
}
if self._is_openshift_kubernetes_distribution():
resources['requests']['cpu'] = self.executor_config.get('CONTAINER_CPU_REQUEST', '500m')
resources['limits'] = container_limits
return resources
def _build_job_containers(self, user_data):
vm_memory_limit = self.executor_config.get('VM_MEMORY_LIMIT', '4G') vm_memory_limit = self.executor_config.get('VM_MEMORY_LIMIT', '4G')
vm_volume_size = self.executor_config.get('VOLUME_SIZE', '32G') vm_volume_size = self.executor_config.get('VOLUME_SIZE', '32G')
container = { # Minimum acceptable free resources for this container to "fit" in a quota
'name': 'builder', # These may be lower than the aboslute limits if the cluster is knowingly
'imagePullPolicy': 'IfNotPresent', # oversubscribed by some amount.
'image': self.image, container_requests = {
'securityContext': {'privileged': True}, 'memory' : self.executor_config.get('CONTAINER_MEMORY_REQUEST', '3968Mi'),
'env': [
{'name': 'USERDATA', 'value': user_data},
{'name': 'VM_MEMORY', 'value': vm_memory_limit},
{'name': 'VM_VOLUME_SIZE', 'value': vm_volume_size},
],
'resources': self._build_job_container_resources(),
}
if self._is_basic_kubernetes_distribution():
container['volumeMounts'] = [{'name': 'secrets-mask','mountPath': '/var/run/secrets/kubernetes.io/serviceaccount'}]
return container
def _job_resource(self, build_uuid, user_data, coreos_channel='stable'):
image_pull_secret_name = self.executor_config.get('IMAGE_PULL_SECRET_NAME', 'builder')
service_account = self.executor_config.get('SERVICE_ACCOUNT_NAME', 'quay-builder-sa')
node_selector_label_key = self.executor_config.get('NODE_SELECTOR_LABEL_KEY', 'beta.kubernetes.io/instance-type')
node_selector_label_value = self.executor_config.get('NODE_SELECTOR_LABEL_VALUE', '')
node_selector = {
node_selector_label_key : node_selector_label_value
} }
release_sha = release.GIT_HEAD or 'none' release_sha = release.GIT_HEAD or 'none'
if ' ' in release_sha: if ' ' in release_sha:
release_sha = 'HEAD' release_sha = 'HEAD'
job_resource = { return {
'apiVersion': 'batch/v1', 'apiVersion': 'batch/v1',
'kind': 'Job', 'kind': 'Job',
'metadata': { 'metadata': {
@ -454,50 +376,37 @@ class KubernetesExecutor(BuilderExecutor):
}, },
}, },
'spec': { 'spec': {
'imagePullSecrets': [{ 'name': image_pull_secret_name }], 'containers': [
{
'name': 'builder',
'imagePullPolicy': 'Always',
'image': self.image,
'securityContext': {'privileged': True},
'env': [
{'name': 'USERDATA', 'value': user_data},
{'name': 'VM_MEMORY', 'value': vm_memory_limit},
{'name': 'VM_VOLUME_SIZE', 'value': vm_volume_size},
],
'resources': {
'requests': container_requests,
},
},
],
'imagePullSecrets': [{'name': 'builder'}],
'restartPolicy': 'Never', 'restartPolicy': 'Never',
'dnsPolicy': 'Default', 'dnsPolicy': 'Default',
'containers': [self._build_job_containers(user_data)],
}, },
}, },
}, },
} }
if self._is_openshift_kubernetes_distribution():
# Setting `automountServiceAccountToken` to false will prevent automounting API credentials for a service account.
job_resource['spec']['template']['spec']['automountServiceAccountToken'] = False
# Use dedicated service account that has no authorization to any resources.
job_resource['spec']['template']['spec']['serviceAccount'] = service_account
# Setting `enableServiceLinks` to false prevents information about other services from being injected into pod's
# environment variables. Pod has no visibility into other services on the cluster.
job_resource['spec']['template']['spec']['enableServiceLinks'] = False
if node_selector_label_value.strip() != '':
job_resource['spec']['template']['spec']['nodeSelector'] = node_selector
if self._is_basic_kubernetes_distribution():
# This volume is a hack to mask the token for the namespace's
# default service account, which is placed in a file mounted under
# `/var/run/secrets/kubernetes.io/serviceaccount` in all pods.
# There's currently no other way to just disable the service
# account at either the pod or namespace level.
#
# https://github.com/kubernetes/kubernetes/issues/16779
#
job_resource['spec']['template']['spec']['volumes'] = [{'name': 'secrets-mask','emptyDir': {'medium': 'Memory'}}]
return job_resource
@coroutine @coroutine
@duration_collector_async(metric_queue.builder_time_to_start, ['k8s']) @duration_collector_async(metric_queue.builder_time_to_start, ['k8s'])
def start_builder(self, realm, token, build_uuid): def start_builder(self, realm, token, build_uuid):
# generate resource # generate resource
channel = self.executor_config.get('COREOS_CHANNEL', 'stable') channel = self.executor_config.get('COREOS_CHANNEL', 'stable')
user_data = self.generate_cloud_config(realm, token, build_uuid, channel, self.manager_hostname) user_data = self.generate_cloud_config(realm, token, channel, self.manager_hostname)
resource = self._job_resource(build_uuid, user_data, channel) resource = self._job_resource(build_uuid, user_data, channel)
logger.debug('Using Kubernetes Distribution: %s', self._kubernetes_distribution())
logger.debug('Generated kubernetes resource:\n%s', resource) logger.debug('Generated kubernetes resource:\n%s', resource)
# schedule # schedule

View file

@ -1,8 +0,0 @@
class NoopCanceller(object):
""" A class that can not cancel a build """
def __init__(self, config=None):
pass
def try_cancel_build(self, uuid):
""" Does nothing and fails to cancel build. """
return False

View file

@ -1,26 +0,0 @@
import logging
from buildman.orchestrator import orchestrator_from_config, OrchestratorError
from util import slash_join
logger = logging.getLogger(__name__)
CANCEL_PREFIX = 'cancel/'
class OrchestratorCanceller(object):
""" An asynchronous way to cancel a build with any Orchestrator. """
def __init__(self, config):
self._orchestrator = orchestrator_from_config(config, canceller_only=True)
def try_cancel_build(self, build_uuid):
logger.info('Cancelling build %s', build_uuid)
cancel_key = slash_join(CANCEL_PREFIX, build_uuid)
try:
self._orchestrator.set_key_sync(cancel_key, build_uuid, expiration=60)
return True
except OrchestratorError:
logger.exception('Failed to write cancel action to redis with uuid %s', build_uuid)
return False

View file

@ -1,753 +0,0 @@
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import datetime
import json
import logging
import re
import time
from enum import IntEnum, unique
from six import add_metaclass, iteritems
from trollius import async, coroutine, From, Return
from urllib3.exceptions import ReadTimeoutError, ProtocolError
import etcd
import redis
from buildman.asyncutil import wrap_with_threadpool
from util import slash_join
from util.expiresdict import ExpiresDict
logger = logging.getLogger(__name__)
ONE_DAY = 60 * 60 * 24
ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION = 5
DEFAULT_LOCK_EXPIRATION = 10000
ETCD_READ_TIMEOUT = 5
ETCD_MAX_WATCH_TIMEOUT = 30
REDIS_EXPIRING_SUFFIX = '/expiring'
REDIS_DEFAULT_PUBSUB_KEY = 'orchestrator_events'
REDIS_EVENT_KIND_MESSAGE = 'message'
REDIS_EVENT_KIND_PMESSAGE = 'pmessage'
REDIS_NONEXPIRING_KEY = -1
# This constant defines the Redis configuration flags used to watch [K]eyspace and e[x]pired
# events on keys. For more info, see https://redis.io/topics/notifications#configuration
REDIS_KEYSPACE_EVENT_CONFIG_VALUE = 'Kx'
REDIS_KEYSPACE_EVENT_CONFIG_KEY = 'notify-keyspace-events'
REDIS_KEYSPACE_KEY_PATTERN = '__keyspace@%s__:%s'
REDIS_EXPIRED_KEYSPACE_PATTERN = slash_join(REDIS_KEYSPACE_KEY_PATTERN, REDIS_EXPIRING_SUFFIX)
REDIS_EXPIRED_KEYSPACE_REGEX = re.compile(REDIS_EXPIRED_KEYSPACE_PATTERN % (r'(\S+)', r'(\S+)'))
def orchestrator_from_config(manager_config, canceller_only=False):
"""
Allocates a new Orchestrator from the 'ORCHESTRATOR' block from provided manager config.
Checks for legacy configuration prefixed with 'ETCD_' when the 'ORCHESTRATOR' is not present.
:param manager_config: the configuration for the orchestrator
:type manager_config: dict
:rtype: :class: Orchestrator
"""
# Legacy codepath only knows how to configure etcd.
if manager_config.get('ORCHESTRATOR') is None:
manager_config['ORCHESTRATOR'] = {key: value
for (key, value) in iteritems(manager_config)
if key.startswith('ETCD_') and not key.endswith('_PREFIX')}
# Sanity check that legacy prefixes are no longer being used.
for key in manager_config['ORCHESTRATOR'].keys():
words = key.split('_')
if len(words) > 1 and words[-1].lower() == 'prefix':
raise AssertionError('legacy prefix used, use ORCHESTRATOR_PREFIX instead')
def _dict_key_prefix(d):
"""
:param d: the dict that has keys prefixed with underscore
:type d: {str: any}
:rtype: str
"""
return d.keys()[0].split('_', 1)[0].lower()
orchestrator_name = _dict_key_prefix(manager_config['ORCHESTRATOR'])
def format_key(key):
return key.lower().split('_', 1)[1]
orchestrator_kwargs = {format_key(key): value
for (key, value) in iteritems(manager_config['ORCHESTRATOR'])}
if manager_config.get('ORCHESTRATOR_PREFIX') is not None:
orchestrator_kwargs['orchestrator_prefix'] = manager_config['ORCHESTRATOR_PREFIX']
orchestrator_kwargs['canceller_only'] = canceller_only
logger.debug('attempting to create orchestrator %s with kwargs %s',
orchestrator_name, orchestrator_kwargs)
return orchestrator_by_name(orchestrator_name, **orchestrator_kwargs)
def orchestrator_by_name(name, **kwargs):
_ORCHESTRATORS = {
'etcd': Etcd2Orchestrator,
'mem': MemoryOrchestrator,
'redis': RedisOrchestrator,
}
return _ORCHESTRATORS.get(name, MemoryOrchestrator)(**kwargs)
class OrchestratorError(Exception):
pass
# TODO: replace with ConnectionError when this codebase is Python 3.
class OrchestratorConnectionError(OrchestratorError):
pass
@unique
class KeyEvent(IntEnum):
CREATE = 1
SET = 2
DELETE = 3
EXPIRE = 4
class KeyChange(namedtuple('KeyChange', ['event', 'key', 'value'])):
pass
@add_metaclass(ABCMeta)
class Orchestrator(object):
"""
Orchestrator is the interface that is used to synchronize the build states
across build managers.
This interface assumes that storage is being done by a key-value store
that supports watching for events on keys.
Missing keys should return KeyError; otherwise, errors should raise an
OrchestratorError.
:param key_prefix: the prefix of keys being watched
:type key_prefix: str
"""
@abstractmethod
def on_key_change(self, key, callback, restarter=None):
"""
The callback parameter takes in a KeyChange object as a parameter.
"""
pass
@abstractmethod
def get_prefixed_keys(self, prefix):
"""
:returns: a dict of key value pairs beginning with prefix
:rtype: {str: str}
"""
pass
@abstractmethod
def get_key(self, key):
"""
:returns: the value stored at the provided key
:rtype: str
"""
pass
@abstractmethod
def set_key(self, key, value, overwrite=False, expiration=None):
"""
:param key: the identifier for the value
:type key: str
:param value: the value being stored
:type value: str
:param overwrite: whether or not a KeyError is thrown if the key already exists
:type overwrite: bool
:param expiration: the duration in seconds that a key should be available
:type expiration: int
"""
pass
@abstractmethod
def set_key_sync(self, key, value, overwrite=False, expiration=None):
"""
set_key, but without trollius coroutines.
"""
pass
@abstractmethod
def delete_key(self, key):
"""
Deletes a key that has been set in the orchestrator.
:param key: the identifier for the key
:type key: str
"""
pass
@abstractmethod
def lock(self, key, expiration=DEFAULT_LOCK_EXPIRATION):
"""
Takes a lock for synchronizing exclusive operations cluster-wide.
:param key: the identifier for the lock
:type key: str
:param expiration: the duration until the lock expires
:type expiration: :class:`datetime.timedelta` or int (seconds)
:returns: whether or not the lock was acquired
:rtype: bool
"""
pass
@abstractmethod
def shutdown():
"""
This function should shutdown any final resources allocated by the Orchestrator.
"""
pass
def _sleep_orchestrator():
"""
This function blocks the trollius event loop by sleeping in order to backoff if a failure
such as a ConnectionError has occurred.
"""
logger.exception('Connecting to etcd failed; sleeping for %s and then trying again',
ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION)
time.sleep(ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION)
logger.exception('Connecting to etcd failed; slept for %s and now trying again',
ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION)
class EtcdAction(object):
""" Enumeration of the various kinds of etcd actions we can observe via a watch. """
GET = 'get'
SET = 'set'
EXPIRE = 'expire'
UPDATE = 'update'
DELETE = 'delete'
CREATE = 'create'
COMPARE_AND_SWAP = 'compareAndSwap'
COMPARE_AND_DELETE = 'compareAndDelete'
class Etcd2Orchestrator(Orchestrator):
def __init__(self, host='127.0.0.1', port=2379, cert_and_key=None, ca_cert=None,
client_threads=5, canceller_only=False, **kwargs):
self.is_canceller_only = canceller_only
logger.debug('initializing async etcd client')
self._sync_etcd_client = etcd.Client(
host=host,
port=port,
cert=tuple(cert_and_key) if cert_and_key is not None else None,
ca_cert=ca_cert,
protocol='http' if cert_and_key is None else 'https',
read_timeout=ETCD_READ_TIMEOUT,
)
if not self.is_canceller_only:
(self._etcd_client, self._async_executor) = wrap_with_threadpool(self._sync_etcd_client,
client_threads)
logger.debug('creating initial orchestrator state')
self._shutting_down = False
self._watch_tasks = {}
@staticmethod
def _sanity_check_ttl(ttl):
"""
A TTL of < 0 in etcd results in the key *never being expired*.
We use a max here to ensure that if the TTL is < 0, the key will expire immediately.
"""
return max(ttl, 0)
def _watch_etcd(self, key, callback, restarter=None, start_index=None):
def callback_wrapper(changed_key_future):
new_index = start_index
etcd_result = None
if not changed_key_future.cancelled():
try:
etcd_result = changed_key_future.result()
existing_index = getattr(etcd_result, 'etcd_index', None)
new_index = etcd_result.modifiedIndex + 1
logger.debug('Got watch of key: %s at #%s with result: %s',
key, existing_index, etcd_result)
except ReadTimeoutError:
logger.debug('Read-timeout on etcd watch %s, rescheduling', key)
except etcd.EtcdEventIndexCleared:
# This happens if etcd2 has moved forward too fast for us to start watching at the index
# we retrieved. We therefore start a new watch at HEAD and (if specified) call the
# restarter method which should conduct a read and reset the state of the manager.
logger.debug('Etcd moved forward too quickly. Restarting watch cycle.')
new_index = None
if restarter is not None:
async(restarter())
except (KeyError, etcd.EtcdKeyError):
logger.debug('Etcd key already cleared: %s', key)
return
except etcd.EtcdConnectionFailed:
_sleep_orchestrator()
except etcd.EtcdException as eex:
# TODO: This is a quick and dirty hack and should be replaced with a proper
# exception check.
if str(eex.message).find('Read timed out') >= 0:
logger.debug('Read-timeout on etcd watch %s, rescheduling', key)
else:
logger.exception('Exception on etcd watch: %s', key)
except ProtocolError:
logger.exception('Exception on etcd watch: %s', key)
if key not in self._watch_tasks or self._watch_tasks[key].done():
self._watch_etcd(key, callback, start_index=new_index, restarter=restarter)
if etcd_result and etcd_result.value is not None:
async(callback(self._etcd_result_to_keychange(etcd_result)))
if not self._shutting_down:
logger.debug('Scheduling watch of key: %s at start index %s', key, start_index)
watch_future = self._etcd_client.watch(key, recursive=True, index=start_index,
timeout=ETCD_MAX_WATCH_TIMEOUT)
watch_future.add_done_callback(callback_wrapper)
self._watch_tasks[key] = async(watch_future)
@staticmethod
def _etcd_result_to_keychange(etcd_result):
event = Etcd2Orchestrator._etcd_result_to_keyevent(etcd_result)
return KeyChange(event, etcd_result.key, etcd_result.value)
@staticmethod
def _etcd_result_to_keyevent(etcd_result):
if etcd_result.action == EtcdAction.CREATE:
return KeyEvent.CREATE
if etcd_result.action == EtcdAction.SET:
return KeyEvent.CREATE if etcd_result.createdIndex == etcd_result.modifiedIndex else KeyEvent.SET
if etcd_result.action == EtcdAction.DELETE:
return KeyEvent.DELETE
if etcd_result.action == EtcdAction.EXPIRE:
return KeyEvent.EXPIRE
raise AssertionError('etcd action must have equivalant keyevent')
def on_key_change(self, key, callback, restarter=None):
assert not self.is_canceller_only
logger.debug('creating watch on %s', key)
self._watch_etcd(key, callback, restarter=restarter)
@coroutine
def get_prefixed_keys(self, prefix):
assert not self.is_canceller_only
try:
etcd_result = yield From(self._etcd_client.read(prefix, recursive=True))
raise Return({leaf.key: leaf.value for leaf in etcd_result.leaves})
except etcd.EtcdKeyError:
raise KeyError
except etcd.EtcdConnectionFailed as ex:
raise OrchestratorConnectionError(ex)
except etcd.EtcdException as ex:
raise OrchestratorError(ex)
@coroutine
def get_key(self, key):
assert not self.is_canceller_only
try:
# Ignore pylint: the value property on EtcdResult is added dynamically using setattr.
etcd_result = yield From(self._etcd_client.read(key))
raise Return(etcd_result.value)
except etcd.EtcdKeyError:
raise KeyError
except etcd.EtcdConnectionFailed as ex:
raise OrchestratorConnectionError(ex)
except etcd.EtcdException as ex:
raise OrchestratorError(ex)
@coroutine
def set_key(self, key, value, overwrite=False, expiration=None):
assert not self.is_canceller_only
yield From(self._etcd_client.write(key, value, prevExists=overwrite,
ttl=self._sanity_check_ttl(expiration)))
def set_key_sync(self, key, value, overwrite=False, expiration=None):
self._sync_etcd_client.write(key, value, prevExists=overwrite,
ttl=self._sanity_check_ttl(expiration))
@coroutine
def delete_key(self, key):
assert not self.is_canceller_only
try:
yield From(self._etcd_client.delete(key))
except etcd.EtcdKeyError:
raise KeyError
except etcd.EtcdConnectionFailed as ex:
raise OrchestratorConnectionError(ex)
except etcd.EtcdException as ex:
raise OrchestratorError(ex)
@coroutine
def lock(self, key, expiration=DEFAULT_LOCK_EXPIRATION):
assert not self.is_canceller_only
try:
yield From(self._etcd_client.write(key, {}, prevExist=False,
ttl=self._sanity_check_ttl(expiration)))
raise Return(True)
except (KeyError, etcd.EtcdKeyError):
raise Return(False)
except etcd.EtcdConnectionFailed:
logger.exception('Could not get etcd atomic lock as etcd is down')
raise Return(False)
except etcd.EtcdException as ex:
raise OrchestratorError(ex)
def shutdown(self):
logger.debug('Shutting down etcd client.')
self._shutting_down = True
if self.is_canceller_only:
return
for (key, _), task in self._watch_tasks.items():
if not task.done():
logger.debug('Canceling watch task for %s', key)
task.cancel()
if self._async_executor is not None:
self._async_executor.shutdown()
class MemoryOrchestrator(Orchestrator):
def __init__(self, **kwargs):
self.state = ExpiresDict()
self.callbacks = {}
def _callbacks_prefixed(self, prefix):
return (callback for (key, callback) in iteritems(self.callbacks)
if key.startswith(prefix))
def on_key_change(self, key, callback, restarter=None):
self.callbacks[key] = callback
@coroutine
def get_prefixed_keys(self, prefix):
raise Return({k: value for (k, value) in self.state.items()
if k.startswith(prefix)})
@coroutine
def get_key(self, key):
raise Return(self.state[key])
@coroutine
def set_key(self, key, value, overwrite=False, expiration=None):
preexisting_key = 'key' in self.state
if preexisting_key and not overwrite:
raise KeyError
absolute_expiration = None
if expiration is not None:
absolute_expiration = datetime.datetime.now() + datetime.timedelta(seconds=expiration)
self.state.set(key, value, expires=absolute_expiration)
event = KeyEvent.CREATE if not preexisting_key else KeyEvent.SET
for callback in self._callbacks_prefixed(key):
yield From(callback(KeyChange(event, key, value)))
def set_key_sync(self, key, value, overwrite=False, expiration=None):
"""
set_key, but without trollius coroutines.
"""
preexisting_key = 'key' in self.state
if preexisting_key and not overwrite:
raise KeyError
absolute_expiration = None
if expiration is not None:
absolute_expiration = datetime.datetime.now() + datetime.timedelta(seconds=expiration)
self.state.set(key, value, expires=absolute_expiration)
event = KeyEvent.CREATE if not preexisting_key else KeyEvent.SET
for callback in self._callbacks_prefixed(key):
callback(KeyChange(event, key, value))
@coroutine
def delete_key(self, key):
value = self.state[key]
del self.state[key]
for callback in self._callbacks_prefixed(key):
yield From(callback(KeyChange(KeyEvent.DELETE, key, value)))
@coroutine
def lock(self, key, expiration=DEFAULT_LOCK_EXPIRATION):
if key in self.state:
raise Return(False)
self.state.set(key, None, expires=expiration)
raise Return(True)
def shutdown(self):
self.state = None
self.callbacks = None
class RedisOrchestrator(Orchestrator):
def __init__(self, host='127.0.0.1', port=6379, password=None, db=0, cert_and_key=None,
ca_cert=None, client_threads=5, ssl=False, skip_keyspace_event_setup=False,
canceller_only=False, **kwargs):
self.is_canceller_only = canceller_only
(cert, key) = tuple(cert_and_key) if cert_and_key is not None else (None, None)
self._sync_client = redis.StrictRedis(
host=host,
port=port,
password=password,
db=db,
ssl_certfile=cert,
ssl_keyfile=key,
ssl_ca_certs=ca_cert,
ssl=ssl,
)
self._shutting_down = False
self._tasks = {}
self._watched_keys = {}
self._pubsub_key = slash_join(kwargs.get('orchestrator_prefix', ''),
REDIS_DEFAULT_PUBSUB_KEY).lstrip('/')
if not self.is_canceller_only:
(self._client, self._async_executor) = wrap_with_threadpool(self._sync_client, client_threads)
# Configure a subscription to watch events that the orchestrator manually publishes.
logger.debug('creating pubsub with key %s', self._pubsub_key)
published_pubsub = self._sync_client.pubsub()
published_pubsub.subscribe(self._pubsub_key)
(self._pubsub, self._async_executor_pub) = wrap_with_threadpool(published_pubsub)
self._watch_published_key()
# Configure a subscription to watch expired keyspace events.
if not skip_keyspace_event_setup:
self._sync_client.config_set(REDIS_KEYSPACE_EVENT_CONFIG_KEY,
REDIS_KEYSPACE_EVENT_CONFIG_VALUE)
expiring_pubsub = self._sync_client.pubsub()
expiring_pubsub.psubscribe(REDIS_EXPIRED_KEYSPACE_PATTERN % (db, '*'))
(self._pubsub_expiring, self._async_executor_ex) = wrap_with_threadpool(expiring_pubsub)
self._watch_expiring_key()
def _watch_published_key(self):
def published_callback_wrapper(event_future):
logger.debug('published callback called')
event_result = None
if not event_future.cancelled():
try:
event_result = event_future.result()
(redis_event, event_key, event_value) = event_result
logger.debug('Got watch of key: (%s, %s, %s)', redis_event, event_key, event_value)
except redis.ConnectionError:
_sleep_orchestrator()
except redis.RedisError:
logger.exception('Exception watching redis publish: %s', event_key)
# Schedule creating a new future if this one has been consumed.
if 'pub' not in self._tasks or self._tasks['pub'].done():
self._watch_published_key()
if event_result is not None and redis_event == REDIS_EVENT_KIND_MESSAGE:
keychange = self._publish_to_keychange(event_value)
for watched_key, callback in iteritems(self._watched_keys):
if keychange.key.startswith(watched_key):
async(callback(keychange))
if not self._shutting_down:
logger.debug('Scheduling watch of publish stream')
watch_future = self._pubsub.parse_response()
watch_future.add_done_callback(published_callback_wrapper)
self._tasks['pub'] = async(watch_future)
def _watch_expiring_key(self):
def expiring_callback_wrapper(event_future):
logger.debug('expiring callback called')
event_result = None
if not event_future.cancelled():
try:
event_result = event_future.result()
if self._is_expired_keyspace_event(event_result):
# Get the value of the original key before the expiration happened.
key = self._key_from_expiration(event_future)
expired_value = yield From(self._client.get(key))
# $KEY/expiring is gone, but the original key still remains, set an expiration for it
# so that other managers have time to get the event and still read the expired value.
yield From(self._client.expire(key, ONE_DAY))
except redis.ConnectionError:
_sleep_orchestrator()
except redis.RedisError:
logger.exception('Exception watching redis expirations: %s', key)
# Schedule creating a new future if this one has been consumed.
if 'expire' not in self._tasks or self._tasks['expire'].done():
self._watch_expiring_key()
if self._is_expired_keyspace_event(event_result) and expired_value is not None:
for watched_key, callback in iteritems(self._watched_keys):
if key.startswith(watched_key):
async(callback(KeyChange(KeyEvent.EXPIRE, key, expired_value)))
if not self._shutting_down:
logger.debug('Scheduling watch of expiration')
watch_future = self._pubsub_expiring.parse_response()
watch_future.add_done_callback(expiring_callback_wrapper)
self._tasks['expire'] = async(watch_future)
def on_key_change(self, key, callback, restarter=None):
assert not self.is_canceller_only
logger.debug('watching key: %s', key)
self._watched_keys[key] = callback
@staticmethod
def _is_expired_keyspace_event(event_result):
"""
Sanity check that this isn't an unrelated keyspace event.
There could be a more efficient keyspace event config to avoid this client-side filter.
"""
if event_result is None:
return False
(redis_event, _pattern, matched_key, expired) = event_result
return (redis_event == REDIS_EVENT_KIND_PMESSAGE and
expired == 'expired' and
REDIS_EXPIRED_KEYSPACE_REGEX.match(matched_key) is not None)
@staticmethod
def _key_from_expiration(event_result):
(_redis_event, _pattern, matched_key, _expired) = event_result
return REDIS_EXPIRED_KEYSPACE_REGEX.match(matched_key).groups()[1]
@staticmethod
def _publish_to_keychange(event_value):
e = json.loads(event_value)
return KeyChange(KeyEvent(e['event']), e['key'], e['value'])
@coroutine
def get_prefixed_keys(self, prefix):
assert not self.is_canceller_only
# TODO: This can probably be done with redis pipelines to make it transactional.
keys = yield From(self._client.keys(prefix + '*'))
# Yielding to the event loop is required, thus this cannot be written as a dict comprehension.
results = {}
for key in keys:
if key.endswith(REDIS_EXPIRING_SUFFIX):
continue
ttl = yield From(self._client.ttl(key))
if ttl != REDIS_NONEXPIRING_KEY:
# Only redis keys without expirations are live build manager keys.
value = yield From(self._client.get(key))
results.update({key: value})
raise Return(results)
@coroutine
def get_key(self, key):
assert not self.is_canceller_only
value = yield From(self._client.get(key))
raise Return(value)
@coroutine
def set_key(self, key, value, overwrite=False, expiration=None):
assert not self.is_canceller_only
already_exists = yield From(self._client.exists(key))
yield From(self._client.set(key, value, xx=overwrite))
if expiration is not None:
yield From(self._client.set(slash_join(key, REDIS_EXPIRING_SUFFIX), value,
xx=overwrite, ex=expiration))
key_event = KeyEvent.SET if already_exists else KeyEvent.CREATE
yield From(self._publish(event=key_event, key=key, value=value))
def set_key_sync(self, key, value, overwrite=False, expiration=None):
already_exists = self._sync_client.exists(key)
self._sync_client.set(key, value, xx=overwrite)
if expiration is not None:
self._sync_client.set(slash_join(key, REDIS_EXPIRING_SUFFIX), value,
xx=overwrite, ex=expiration)
self._sync_client.publish(self._pubsub_key, json.dumps({
'event': int(KeyEvent.SET if already_exists else KeyEvent.CREATE),
'key': key,
'value': value,
}))
@coroutine
def _publish(self, **kwargs):
kwargs['event'] = int(kwargs['event'])
event_json = json.dumps(kwargs)
logger.debug('publishing event: %s', event_json)
yield From(self._client.publish(self._pubsub_key, event_json))
@coroutine
def delete_key(self, key):
assert not self.is_canceller_only
value = yield From(self._client.get(key))
yield From(self._client.delete(key))
yield From(self._client.delete(slash_join(key, REDIS_EXPIRING_SUFFIX)))
yield From(self._publish(event=KeyEvent.DELETE, key=key, value=value))
@coroutine
def lock(self, key, expiration=DEFAULT_LOCK_EXPIRATION):
assert not self.is_canceller_only
yield From(self.set_key(key, '', ex=expiration))
raise Return(True)
@coroutine
def shutdown(self):
logger.debug('Shutting down redis client.')
self._shutting_down = True
if self.is_canceller_only:
return
for key, task in iteritems(self._tasks):
if not task.done():
logger.debug('Canceling watch task for %s', key)
task.cancel()
if self._async_executor is not None:
self._async_executor.shutdown()
if self._async_executor_ex is not None:
self._async_executor_ex.shutdown()
if self._async_executor_pub is not None:
self._async_executor_pub.shutdown()

View file

@ -1,7 +1,6 @@
FROM debian FROM debian
RUN apt-get clean && apt-get update && apt-get upgrade -y # 03APR2017 RUN apt-get clean && apt-get update && apt-get install -y \
RUN apt-get install -y \
bzip2 \ bzip2 \
curl \ curl \
openssh-client \ openssh-client \

View file

@ -1,23 +1,23 @@
import logging import logging
import json
import trollius import trollius
import json
from threading import Event
from datetime import timedelta
from trollius.coroutines import From
from autobahn.asyncio.wamp import RouterFactory, RouterSessionFactory from autobahn.asyncio.wamp import RouterFactory, RouterSessionFactory
from autobahn.asyncio.websocket import WampWebSocketServerFactory from autobahn.asyncio.websocket import WampWebSocketServerFactory
from autobahn.wamp import types from autobahn.wamp import types
from aiowsgi import create_server as create_wsgi_server from aiowsgi import create_server as create_wsgi_server
from flask import Flask from flask import Flask
from threading import Event
from trollius.coroutines import From
from datetime import timedelta
from buildman.enums import BuildJobResult, BuildServerStatus, RESULT_PHASES from buildman.enums import BuildJobResult, BuildServerStatus, RESULT_PHASES
from buildman.jobutil.buildstatus import StatusHandler from buildman.jobutil.buildstatus import StatusHandler
from buildman.jobutil.buildjob import BuildJob, BuildJobLoadException from buildman.jobutil.buildjob import BuildJob, BuildJobLoadException
from data import database, model from data import database
from app import app, metric_queue from app import app, metric_queue
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
WORK_CHECK_TIMEOUT = 10 WORK_CHECK_TIMEOUT = 10
@ -28,7 +28,6 @@ MINIMUM_JOB_EXTENSION = timedelta(minutes=1)
HEARTBEAT_PERIOD_SEC = 30 HEARTBEAT_PERIOD_SEC = 30
class BuilderServer(object): class BuilderServer(object):
""" Server which handles both HTTP and WAMP requests, managing the full state of the build """ Server which handles both HTTP and WAMP requests, managing the full state of the build
controller. controller.
@ -47,12 +46,12 @@ class BuilderServer(object):
self._build_logs = build_logs self._build_logs = build_logs
self._user_files = user_files self._user_files = user_files
self._lifecycle_manager = lifecycle_manager_klass( self._lifecycle_manager = lifecycle_manager_klass(
self._register_component, self._register_component,
self._unregister_component, self._unregister_component,
self._job_heartbeat, self._job_heartbeat,
self._job_complete, self._job_complete,
manager_hostname, manager_hostname,
HEARTBEAT_PERIOD_SEC, HEARTBEAT_PERIOD_SEC,
) )
self._lifecycle_manager_config = lifecycle_manager_config self._lifecycle_manager_config = lifecycle_manager_config
@ -132,35 +131,25 @@ class BuilderServer(object):
def _unregister_component(self, component): def _unregister_component(self, component):
logger.debug('Unregistering component with realm %s and token %s', logger.debug('Unregistering component with realm %s and token %s',
component.builder_realm, component.expected_token) component.builder_realm, component.expected_token)
self._realm_map.pop(component.builder_realm, None) self._realm_map.pop(component.builder_realm)
self._current_components.remove(component)
if component in self._current_components: self._session_factory.remove(component)
self._current_components.remove(component)
self._session_factory.remove(component)
def _job_heartbeat(self, build_job): def _job_heartbeat(self, build_job):
self._queue.extend_processing(build_job.job_item, seconds_from_now=JOB_TIMEOUT_SECONDS, self._queue.extend_processing(build_job.job_item, seconds_from_now=JOB_TIMEOUT_SECONDS,
minimum_extension=MINIMUM_JOB_EXTENSION) minimum_extension=MINIMUM_JOB_EXTENSION)
@trollius.coroutine
def _job_complete(self, build_job, job_status, executor_name=None, update_phase=False): def _job_complete(self, build_job, job_status, executor_name=None, update_phase=False):
if job_status == BuildJobResult.INCOMPLETE: if job_status == BuildJobResult.INCOMPLETE:
logger.warning('[BUILD INCOMPLETE: job complete] Build ID: %s. No retry restore.',
build_job.repo_build.uuid)
self._queue.incomplete(build_job.job_item, restore_retry=False, retry_after=30) self._queue.incomplete(build_job.job_item, restore_retry=False, retry_after=30)
else: else:
self._queue.complete(build_job.job_item) self._queue.complete(build_job.job_item)
# Update the trigger failure tracking (if applicable).
if build_job.repo_build.trigger is not None:
model.build.update_trigger_disable_status(build_job.repo_build.trigger,
RESULT_PHASES[job_status])
if update_phase: if update_phase:
status_handler = StatusHandler(self._build_logs, build_job.repo_build.uuid) status_handler = StatusHandler(self._build_logs, build_job.repo_build.uuid)
yield From(status_handler.set_phase(RESULT_PHASES[job_status])) status_handler.set_phase(RESULT_PHASE[job_status])
self._job_count = self._job_count - 1 self._job_count = self._job_count - 1
@ -179,7 +168,7 @@ class BuilderServer(object):
logger.debug('Checking for more work for %d active workers', logger.debug('Checking for more work for %d active workers',
self._lifecycle_manager.num_workers()) self._lifecycle_manager.num_workers())
processing_time = self._lifecycle_manager.overall_setup_time() + SETUP_LEEWAY_SECONDS processing_time = self._lifecycle_manager.setup_time() + SETUP_LEEWAY_SECONDS
job_item = self._queue.get(processing_time=processing_time, ordering_required=True) job_item = self._queue.get(processing_time=processing_time, ordering_required=True)
if job_item is None: if job_item is None:
logger.debug('No additional work found. Going to sleep for %s seconds', WORK_CHECK_TIMEOUT) logger.debug('No additional work found. Going to sleep for %s seconds', WORK_CHECK_TIMEOUT)
@ -188,8 +177,6 @@ class BuilderServer(object):
try: try:
build_job = BuildJob(job_item) build_job = BuildJob(job_item)
except BuildJobLoadException as irbe: except BuildJobLoadException as irbe:
logger.warning('[BUILD INCOMPLETE: job load exception] Job data: %s. No retry restore.',
job_item.body)
logger.exception(irbe) logger.exception(irbe)
self._queue.incomplete(job_item, restore_retry=False) self._queue.incomplete(job_item, restore_retry=False)
continue continue
@ -200,8 +187,6 @@ class BuilderServer(object):
try: try:
schedule_success, retry_timeout = yield From(self._lifecycle_manager.schedule(build_job)) schedule_success, retry_timeout = yield From(self._lifecycle_manager.schedule(build_job))
except: except:
logger.warning('[BUILD INCOMPLETE: scheduling] Build ID: %s. Retry restored.',
build_job.repo_build.uuid)
logger.exception('Exception when scheduling job: %s', build_job.repo_build.uuid) logger.exception('Exception when scheduling job: %s', build_job.repo_build.uuid)
self._current_status = BuildServerStatus.EXCEPTION self._current_status = BuildServerStatus.EXCEPTION
self._queue.incomplete(job_item, restore_retry=True, retry_after=WORK_CHECK_TIMEOUT) self._queue.incomplete(job_item, restore_retry=True, retry_after=WORK_CHECK_TIMEOUT)
@ -210,14 +195,12 @@ class BuilderServer(object):
if schedule_success: if schedule_success:
logger.debug('Marking build %s as scheduled', build_job.repo_build.uuid) logger.debug('Marking build %s as scheduled', build_job.repo_build.uuid)
status_handler = StatusHandler(self._build_logs, build_job.repo_build.uuid) status_handler = StatusHandler(self._build_logs, build_job.repo_build.uuid)
yield From(status_handler.set_phase(database.BUILD_PHASE.BUILD_SCHEDULED)) status_handler.set_phase(database.BUILD_PHASE.BUILD_SCHEDULED)
self._job_count = self._job_count + 1 self._job_count = self._job_count + 1
logger.debug('Build job %s scheduled. Running: %s', build_job.repo_build.uuid, logger.debug('Build job %s scheduled. Running: %s', build_job.repo_build.uuid,
self._job_count) self._job_count)
else: else:
logger.warning('[BUILD INCOMPLETE: no schedule] Build ID: %s. Retry restored.',
build_job.repo_build.uuid)
logger.debug('All workers are busy for job %s Requeuing after %s seconds.', logger.debug('All workers are busy for job %s Requeuing after %s seconds.',
build_job.repo_build.uuid, retry_timeout) build_job.repo_build.uuid, retry_timeout)
self._queue.incomplete(job_item, restore_retry=True, retry_after=retry_timeout) self._queue.incomplete(job_item, restore_retry=True, retry_after=retry_timeout)

View file

@ -1,32 +1,21 @@
#cloud-config #cloud-config
hostname: {{ build_uuid | default('quay-builder', True) }}
users: users:
groups: groups:
- sudo - sudo
- docker - docker
{% if ssh_authorized_keys -%}
ssh_authorized_keys: ssh_authorized_keys:
{% for ssh_key in ssh_authorized_keys -%} - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCC0m+hVmyR3vn/xoxJe9+atRWBxSK+YXgyufNVDMcb7H00Jfnc341QH3kDVYZamUbhVh/nyc2RP7YbnZR5zORFtgOaNSdkMYrPozzBvxjnvSUokkCCWbLqXDHvIKiR12r+UTSijPJE/Yk702Mb2ejAFuae1C3Ec+qKAoOCagDjpQ3THyb5oaKE7VPHdwCWjWIQLRhC+plu77ObhoXIFJLD13gCi01L/rp4mYVCxIc2lX5A8rkK+bZHnIZwWUQ4t8SIjWxIaUo0FE7oZ83nKuNkYj5ngmLHQLY23Nx2WhE9H6NBthUpik9SmqQPtVYbhIG+bISPoH9Xs8CLrFb0VRjz JS Key
- {{ ssh_key }} - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCo6FhAP7mFFOAzM91gtaKW7saahtaN4lur42FMMztz6aqUycIltCmvxo+3FmrXgCG30maMNU36Vm1+9QRtVQEd+eRuoIWP28t+8MT01Fh4zPuE2Wca3pOHSNo3X81FfWJLzmwEHiQKs9HPQqUhezR9PcVWVkbMyAzw85c0UycGmHGFNb0UiRd9HFY6XbgbxhZv/mvKLZ99xE3xkOzS1PNsdSNvjUKwZR7pSUPqNS5S/1NXyR4GhFTU24VPH/bTATOv2ATH+PSzsZ7Qyz9UHj38tKC+ALJHEDJ4HXGzobyOUP78cHGZOfCB5FYubq0zmOudAjKIAhwI8XTFvJ2DX1P3 JZ Key
{%- endfor %} - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDNvw8qo9m8np7yQ/Smv/oklM8bo8VyNRZriGYBDuolWDL/mZpYCQnZJXphQo7RFdNABYistikjJlBuuwUohLf2uSq0iKoFa2TgwI43wViWzvuzU4nA02/ITD5BZdmWAFNyIoqeB50Ol4qUgDwLAZ+7Kv7uCi6chcgr9gTi99jY3GHyZjrMiXMHGVGi+FExFuzhVC2drKjbz5q6oRfQeLtNfG4psl5GU3MQU6FkX4fgoCx0r9R48/b7l4+TT7pWblJQiRfeldixu6308vyoTUEHasdkU3/X0OTaGz/h5XqTKnGQc6stvvoED3w+L3QFp0H5Z8sZ9stSsitmCBrmbcKZ JM Key
{%- endif %} - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAgEAo/JkbGO6R7g1ZxARi0xWVM7FOfN02snRAcIO6vT9M7xMUkWVLgD+hM/o91lk+UFiYdql0CATobpFWncRL36KaUqsbw9/1BlI40wg296XHXSSnxhxZ4L7ytf6G1tyN319HXlI2kh9vAf/fy++yDvkH8dI3k1oLoW+mZPET6Pff04/6AXXrRlS5mhmGv9irGwiDHtVKpj6lU8DN/UtOrv1tiQ0pgwEJq05fLGoQfgPNaBCnW2z4Ubpn2gyMcMBMpSwo4hCqJePd349e4bLmFcT+gXYg7Mnup1DoTDlowFFN56wpxQbdp96IxWzU+jYPaIAuRo+BJzCyOS8qBv0Z4RZrgop0qp2JYiVwmViO6TZhIDz6loQJXUOIleQmNgTbiZx8Bwv5GY2jMYoVwlBp7yy5bRjxfbFsJ0vU7TVzNAG7oEJy/74HmHmWzRQlSlQjesr8gRbm9zgR8wqc/L107UOWFg7Cgh8ZNjKuADbXqYuda1Y9m2upcfS26UPz5l5PW5uFRMHZSi8pb1XV6/0Z8H8vwsh37Ur6aLi/5jruRmKhdlsNrB1IiDicBsPW3yg7HHSIdPU4oBNPC77yDCT3l4CKr4el81RrZt7FbJPfY+Ig9Q5O+05f6I8+ZOlJGyZ/Qfyl2aVm1HnlJKuBqPxeic8tMng/9B5N7uZL6Y3k5jFU8c= QM Key
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC964SY8ojXZVfWknF+Pz+pTHpyb66VBH7OLYnGP+Tm452YKJVFb/rXCpZYHFlzSQtzz9hko8qBoEFXuD2humojx0P7nEtTy8wUClnKcifIqD5b/V1r7ZDa/5hL9Xog11gOXZ17TW1qjN+00qgXwoSh+jM8mAxD7V2ZLnanIDqmpYamT3ZlICz1k4bwYj35gnpSFpijAXeF9LXOEUfDtzNBjeaCvyniYlQyKzpKr8x+oIHumPlxwkFOzGhBMRGrCQ1Kzija8vVZQ6/Tjvxl19jwfgcNT0Zd9vLbHNowJPWQZhLYXdGIb3NxEfAqkGPvGCsaLfsfETYhcFwxr2g+zvf4xvyKgK35PHA/5t7TQryDSKDrQ1qTDUp3dAjzwsBFwEoQ0x68shGC661n/+APMNtj8qR5M9ueIH5WEqdRW10kKzlEm/ESvjyjEVRhXiwWyKkPch/OIUPKexKaEeOBdKocSnNx1+5ntk8OXWRQgjfwtQvm1NE/qD7fViBVUlTRk0c1SVpZaybIZkiMWmA1hzsdUbDP2mzPek1ydsVffw0I8z/dRo5gXQSPq06WfNIKpsiQF8LqP+KU+462A2tbHxFzq9VozI9PeFV+xO59wlJogv6q2yA0Jfv9BFgVgNzItIsUMvStrfkUBTYgaG9djp/vAm+SwMdnLSXILJtMO/3eRQ== EC Key
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC3Q9+JcjEck8CylGEekvskypE8lT3hYnCCfGUoMTAURokD8STtEaVxr197efitQkvwSYxOnDXo2Qr59FqlQ6QtFeCynX87VerN49LJ0pUA1NoYBUCvWRzwpaa8CXGhYPRpfku12mJ0qjqmGFaR5jqhXTNfXmRcWePsXqS+b3FFEqw8BhKg6By1z7NLvKeaEno4Kd0wPpxzs+hFRnk38k2p+1YO1vZzZ2mgEVp9/2577t4TmP8ucnsb9X4vURRpOJwjG8HIgmmQFUVxHRST8Zu3zOXfg9Yv/n3JYhXhvvPxkV4JB6ZbVq0cLHasexFAxz7nTmF1gDWaPbGxmdZtaDe/ CH Key
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDfuDFmwNaY2WlwVlGeG1pvGiU5KfqMbTwo38hO5bm3KutJtNe9Q2GgKXKbD4WCrpsa3QZPENzGWvkctORzaZNxQ8S4FxUV5M5NEVMs0vKa4a8TksqhoARP7eetvRF6leYtVYhtUyDmj1YzxJEMRbbs3SFhcSkA7HyWDAIi8rc4WCg+BDpmCyEshuuBE26+1g2R5lJTwVwmgMHs7p59Gop1Hbn33DNQyj9S8u24DxCJpnzkjegWiU4GA+pesgeWymxYhAKDfb2yWR6aBAvnZEn10evIfe9ORpnexmko4/DBgeweISCm16ffVhya4qNBrUxThKJU4286zwq/d0mDDU8x BI Key
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4uDl4WGNgsIjGeJbUYFKSn3nhdiZJHUE47JK3W6VTfWpd1JNWNZ1CW0mJ+y7dQl0vmQq5DHQguYQLdTP4m8Waswh/9ckoX7tErA2FEZUQTmUrpeXrc8n2E8OeHh1ooqyWXP5Oup3MKA7qwMrkktM+m/MEhVhg0GUgsMd5BriePDgpdlOblEVZx+5IY3/PJc3ng+PmJbHfCds6+HgPR2tY2n6y4Ir7+15mZWjpLo6BOZlHmWAsqb8lfyp+8hrkfr4bKhY3AE2SQCqkF5LGgI84gJ5ooFN0bL9rl3bW4UNCqSiYH/QjLH+yzs55/BRBpV89mqDyDFHvsJUXta/Vz/UJ CA Key
write_files: write_files:
- path: /root/disable-aws-metadata.sh
permission: '0755'
content: |
iptables -t nat -I PREROUTING -p tcp -d 169.254.169.254 --dport 80 -j DNAT --to-destination 1.1.1.1
- path: /etc/docker/daemon.json
permission: '0644'
content: |
{
"storage-driver": "overlay2"
}
- path: /root/overrides.list - path: /root/overrides.list
permission: '0644' permission: '0644'
content: | content: |
@ -43,10 +32,6 @@ coreos:
group: {{ coreos_channel }} group: {{ coreos_channel }}
units: units:
- name: update-engine.service
command: stop
- name: locksmithd.service
command: stop
- name: systemd-journal-gatewayd.socket - name: systemd-journal-gatewayd.socket
command: start command: start
enable: yes enable: yes
@ -59,7 +44,7 @@ coreos:
[Install] [Install]
WantedBy=sockets.target WantedBy=sockets.target
{{ dockersystemd('quay-builder', {{ dockersystemd('quay-builder',
worker_image, 'quay.io/coreos/registry-build-worker',
quay_username, quay_username,
quay_password, quay_password,
worker_tag, worker_tag,
@ -69,28 +54,37 @@ coreos:
restart_policy='no' restart_policy='no'
) | indent(4) }} ) | indent(4) }}
{% if logentries_token -%} {% if logentries_token -%}
# https://github.com/kelseyhightower/journal-2-logentries/pull/11 so moved journal-2-logentries to coreos
{{ dockersystemd('builder-logs', {{ dockersystemd('builder-logs',
'quay.io/coreos/journal-2-logentries', 'quay.io/kelseyhightower/journal-2-logentries',
extra_args='--env-file /root/overrides.list -v /run/journald.sock:/run/journald.sock', extra_args='--env-file /root/overrides.list -v /run/journald.sock:/run/journald.sock',
flattened=True, flattened=True,
after_units=['quay-builder.service'] after_units=['quay-builder.service']
) | indent(4) }} ) | indent(4) }}
{%- endif %} {%- endif %}
- name: disable-aws-metadata.service - name: format-var-lib-docker.service
command: start command: start
enable: yes
content: | content: |
[Unit] [Unit]
Description=Disable AWS metadata service Before=docker.service var-lib-docker.mount
Before=network-pre.target ConditionPathExists=!/var/lib/docker.btrfs
Wants=network-pre.target
[Service] [Service]
Type=oneshot Type=oneshot
ExecStart=/root/disable-aws-metadata.sh ExecStart=/usr/bin/truncate --size={{ volume_size }} /var/lib/docker.btrfs
RemainAfterExit=yes ExecStart=/usr/sbin/mkfs.btrfs /var/lib/docker.btrfs
- name: var-lib-docker.mount
enable: true
content: |
[Unit]
Before=docker.service
After=format-var-lib-docker.service
Requires=format-var-lib-docker.service
[Install] [Install]
WantedBy=multi-user.target RequiredBy=docker.service
[Mount]
What=/var/lib/docker.btrfs
Where=/var/lib/docker
Type=btrfs
Options=loop,discard
- name: machine-lifetime.service - name: machine-lifetime.service
command: start command: start
enable: yes enable: yes
@ -99,4 +93,4 @@ coreos:
Description=Machine Lifetime Service Description=Machine Lifetime Service
[Service] [Service]
Type=oneshot Type=oneshot
ExecStart=/bin/sh -xc "/bin/sleep {{ max_lifetime_s }}; /usr/bin/systemctl --no-block poweroff" ExecStart=/bin/sh -xc "/bin/sleep 10800; /usr/bin/systemctl --no-block poweroff"

View file

@ -1 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" width="123" height="20"><linearGradient id="b" x2="0" y2="100%"><stop offset="0" stop-color="#bbb" stop-opacity=".1"/><stop offset="1" stop-opacity=".1"/></linearGradient><mask id="a"><rect width="123" height="20" rx="3" fill="#fff"/></mask><g mask="url(#a)"><path fill="#555" d="M0 0h63v20H0z"/><path fill="#9f9f9f" d="M63 0h60v20H63z"/><path fill="url(#b)" d="M0 0h123v20H0z"/></g><g fill="#fff" text-anchor="middle" font-family="DejaVu Sans,Verdana,Geneva,sans-serif" font-size="11"><text x="32.5" y="15" fill="#010101" fill-opacity=".3">container</text><text x="32.5" y="14">container</text><text x="92" y="15" fill="#010101" fill-opacity=".3">cancelled</text><text x="92" y="14">cancelled</text></g></svg>

Before

Width:  |  Height:  |  Size: 751 B

View file

@ -1,85 +1,7 @@
import os
from abc import ABCMeta, abstractmethod
from jsonschema import validate
from six import add_metaclass
from active_migration import ActiveDataMigration, ERTMigrationFlags
from endpoints.building import PreparedBuild from endpoints.building import PreparedBuild
from data import model from data import model
from buildtrigger.triggerutil import get_trigger_config, InvalidServiceException from buildtrigger.triggerutil import get_trigger_config, InvalidServiceException
from jsonschema import validate
NAMESPACES_SCHEMA = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'personal': {
'type': 'boolean',
'description': 'True if the namespace is the user\'s personal namespace',
},
'score': {
'type': 'number',
'description': 'Score of the relevance of the namespace',
},
'avatar_url': {
'type': ['string', 'null'],
'description': 'URL of the avatar for this namespace',
},
'url': {
'type': 'string',
'description': 'URL of the website to view the namespace',
},
'id': {
'type': 'string',
'description': 'Trigger-internal ID of the namespace',
},
'title': {
'type': 'string',
'description': 'Human-readable title of the namespace',
},
},
'required': ['personal', 'score', 'avatar_url', 'id', 'title'],
},
}
BUILD_SOURCES_SCHEMA = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'name': {
'type': 'string',
'description': 'The name of the repository, without its namespace',
},
'full_name': {
'type': 'string',
'description': 'The name of the repository, with its namespace',
},
'description': {
'type': 'string',
'description': 'The description of the repository. May be an empty string',
},
'last_updated': {
'type': 'number',
'description': 'The date/time when the repository was last updated, since epoch in UTC',
},
'url': {
'type': 'string',
'description': 'The URL at which to view the repository in the browser',
},
'has_admin_permissions': {
'type': 'boolean',
'description': 'True if the current user has admin permissions on the repository',
},
'private': {
'type': 'boolean',
'description': 'True if the repository is private',
},
},
'required': ['name', 'full_name', 'description', 'last_updated',
'has_admin_permissions', 'private'],
},
}
METADATA_SCHEMA = { METADATA_SCHEMA = {
'type': 'object', 'type': 'object',
@ -96,7 +18,7 @@ METADATA_SCHEMA = {
'ref': { 'ref': {
'type': 'string', 'type': 'string',
'description': 'git reference for a git commit', 'description': 'git reference for a git commit',
'pattern': r'^refs\/(heads|tags|remotes)\/(.+)$', 'pattern': '^refs\/(heads|tags|remotes)\/(.+)$',
}, },
'default_branch': { 'default_branch': {
'type': 'string', 'type': 'string',
@ -157,14 +79,13 @@ METADATA_SCHEMA = {
'required': ['username'], 'required': ['username'],
}, },
}, },
'required': ['message'], 'required': ['url', 'message', 'date'],
}, },
}, },
'required': ['commit', 'git_url'], 'required': ['commit', 'git_url'],
} }
@add_metaclass(ABCMeta)
class BuildTriggerHandler(object): class BuildTriggerHandler(object):
def __init__(self, trigger, override_config=None): def __init__(self, trigger, override_config=None):
self.trigger = trigger self.trigger = trigger
@ -173,108 +94,74 @@ class BuildTriggerHandler(object):
@property @property
def auth_token(self): def auth_token(self):
""" Returns the auth token for the trigger. """ """ Returns the auth token for the trigger. """
# NOTE: This check is for testing. return self.trigger.auth_token
if isinstance(self.trigger.auth_token, str):
return self.trigger.auth_token
# TODO(remove-unenc): Remove legacy field.
if self.trigger.secure_auth_token is not None:
return self.trigger.secure_auth_token.decrypt()
if ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS):
return self.trigger.auth_token
return None
@abstractmethod
def load_dockerfile_contents(self): def load_dockerfile_contents(self):
""" """
Loads the Dockerfile found for the trigger's config and returns them or None if none could Loads the Dockerfile found for the trigger's config and returns them or None if none could
be found/loaded. be found/loaded.
""" """
pass raise NotImplementedError
@abstractmethod def list_build_sources(self):
def list_build_source_namespaces(self):
""" """
Take the auth information for the specific trigger type and load the Take the auth information for the specific trigger type and load the
list of namespaces that can contain build sources. list of build sources(repositories).
""" """
pass raise NotImplementedError
@abstractmethod
def list_build_sources_for_namespace(self, namespace):
"""
Take the auth information for the specific trigger type and load the
list of repositories under the given namespace.
"""
pass
@abstractmethod
def list_build_subdirs(self): def list_build_subdirs(self):
""" """
Take the auth information and the specified config so far and list all of Take the auth information and the specified config so far and list all of
the possible subdirs containing dockerfiles. the possible subdirs containing dockerfiles.
""" """
pass raise NotImplementedError
@abstractmethod def handle_trigger_request(self):
def handle_trigger_request(self, request):
""" """
Transform the incoming request data into a set of actions. Returns a PreparedBuild. Transform the incoming request data into a set of actions. Returns a PreparedBuild.
""" """
pass raise NotImplementedError
@abstractmethod
def is_active(self): def is_active(self):
""" """
Returns True if the current build trigger is active. Inactive means further Returns True if the current build trigger is active. Inactive means further
setup is needed. setup is needed.
""" """
pass raise NotImplementedError
@abstractmethod
def activate(self, standard_webhook_url): def activate(self, standard_webhook_url):
""" """
Activates the trigger for the service, with the given new configuration. Activates the trigger for the service, with the given new configuration.
Returns new public and private config that should be stored if successful. Returns new public and private config that should be stored if successful.
""" """
pass raise NotImplementedError
@abstractmethod
def deactivate(self): def deactivate(self):
""" """
Deactivates the trigger for the service, removing any hooks installed in Deactivates the trigger for the service, removing any hooks installed in
the remote service. Returns the new config that should be stored if this the remote service. Returns the new config that should be stored if this
trigger is going to be re-activated. trigger is going to be re-activated.
""" """
pass raise NotImplementedError
@abstractmethod
def manual_start(self, run_parameters=None): def manual_start(self, run_parameters=None):
""" """
Manually creates a repository build for this trigger. Returns a PreparedBuild. Manually creates a repository build for this trigger. Returns a PreparedBuild.
""" """
pass raise NotImplementedError
@abstractmethod
def list_field_values(self, field_name, limit=None): def list_field_values(self, field_name, limit=None):
""" """
Lists all values for the given custom trigger field. For example, a trigger might have a Lists all values for the given custom trigger field. For example, a trigger might have a
field named "branches", and this method would return all branches. field named "branches", and this method would return all branches.
""" """
pass raise NotImplementedError
@abstractmethod
def get_repository_url(self): def get_repository_url(self):
""" Returns the URL of the current trigger's repository. Note that this operation """ Returns the URL of the current trigger's repository. Note that this operation
can be called in a loop, so it should be as fast as possible. """ can be called in a loop, so it should be as fast as possible. """
pass raise NotImplementedError
@classmethod
def filename_is_dockerfile(cls, file_name):
""" Returns whether the file is named Dockerfile or follows the convention <name>.Dockerfile"""
return file_name.endswith(".Dockerfile") or u"Dockerfile" == file_name
@classmethod @classmethod
def service_name(cls): def service_name(cls):
@ -303,10 +190,14 @@ class BuildTriggerHandler(object):
def get_dockerfile_path(self): def get_dockerfile_path(self):
""" Returns the normalized path to the Dockerfile found in the subdirectory """ Returns the normalized path to the Dockerfile found in the subdirectory
in the config. """ in the config. """
dockerfile_path = self.config.get('dockerfile_path') or 'Dockerfile' subdirectory = self.config.get('subdir', '')
if dockerfile_path[0] == '/': if subdirectory == '/':
dockerfile_path = dockerfile_path[1:] subdirectory = ''
return dockerfile_path else:
if not subdirectory.endswith('/'):
subdirectory = subdirectory + '/'
return subdirectory + 'Dockerfile'
def prepare_build(self, metadata, is_manual=False): def prepare_build(self, metadata, is_manual=False):
# Ensure that the metadata meets the scheme. # Ensure that the metadata meets the scheme.
@ -316,10 +207,10 @@ class BuildTriggerHandler(object):
ref = metadata.get('ref', None) ref = metadata.get('ref', None)
commit_sha = metadata['commit'] commit_sha = metadata['commit']
default_branch = metadata.get('default_branch', None) default_branch = metadata.get('default_branch', None)
prepared = PreparedBuild(self.trigger) prepared = PreparedBuild(self.trigger)
prepared.name_from_sha(commit_sha) prepared.name_from_sha(commit_sha)
prepared.subdirectory = config.get('dockerfile_path', None) prepared.subdirectory = config.get('subdir', None)
prepared.context = config.get('context', None)
prepared.is_manual = is_manual prepared.is_manual = is_manual
prepared.metadata = metadata prepared.metadata = metadata
@ -329,39 +220,3 @@ class BuildTriggerHandler(object):
prepared.tags = [commit_sha[:7]] prepared.tags = [commit_sha[:7]]
return prepared return prepared
@classmethod
def build_sources_response(cls, sources):
validate(sources, BUILD_SOURCES_SCHEMA)
return sources
@classmethod
def build_namespaces_response(cls, namespaces_dict):
namespaces = list(namespaces_dict.values())
validate(namespaces, NAMESPACES_SCHEMA)
return namespaces
@classmethod
def get_parent_directory_mappings(cls, dockerfile_path, current_paths=None):
""" Returns a map of dockerfile_paths to it's possible contexts. """
if dockerfile_path == "":
return {}
if dockerfile_path[0] != os.path.sep:
dockerfile_path = os.path.sep + dockerfile_path
dockerfile_path = os.path.normpath(dockerfile_path)
all_paths = set()
path, _ = os.path.split(dockerfile_path)
if path == "":
path = os.path.sep
all_paths.add(path)
for i in range(1, len(path.split(os.path.sep))):
path, _ = os.path.split(path)
all_paths.add(path)
if current_paths:
return dict({dockerfile_path: list(all_paths)}, **current_paths)
return {dockerfile_path: list(all_paths)}

View file

@ -1,22 +1,19 @@
import logging import logging
import os
import re import re
from calendar import timegm
import dateutil.parser
from bitbucket import BitBucket
from jsonschema import validate from jsonschema import validate
from app import app, get_app_url
from buildtrigger.basehandler import BuildTriggerHandler
from buildtrigger.triggerutil import (RepositoryReadException, TriggerActivationException, from buildtrigger.triggerutil import (RepositoryReadException, TriggerActivationException,
TriggerDeactivationException, TriggerStartException, TriggerDeactivationException, TriggerStartException,
InvalidPayloadException, TriggerProviderException, InvalidPayloadException, TriggerProviderException,
SkipRequestException,
determine_build_ref, raise_if_skipped_build, determine_build_ref, raise_if_skipped_build,
find_matching_branches) find_matching_branches)
from util.dict_wrappers import JSONPathDict, SafeDictSetter
from buildtrigger.basehandler import BuildTriggerHandler
from app import app, get_app_url
from bitbucket import BitBucket
from util.security.ssh import generate_ssh_keypair from util.security.ssh import generate_ssh_keypair
from util.dict_wrappers import JSONPathDict, SafeDictSetter
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -34,7 +31,7 @@ BITBUCKET_WEBHOOK_PAYLOAD_SCHEMA = {
}, },
}, },
'required': ['full_name'], 'required': ['full_name'],
}, # /Repository },
'push': { 'push': {
'type': 'object', 'type': 'object',
'properties': { 'properties': {
@ -64,15 +61,21 @@ BITBUCKET_WEBHOOK_PAYLOAD_SCHEMA = {
'user': { 'user': {
'type': 'object', 'type': 'object',
'properties': { 'properties': {
'display_name': { 'username': {
'type': 'string',
},
'account_id': {
'type': 'string', 'type': 'string',
}, },
'links': { 'links': {
'type': 'object', 'type': 'object',
'properties': { 'properties': {
'html': {
'type': 'object',
'properties': {
'href': {
'type': 'string',
},
},
'required': ['href'],
},
'avatar': { 'avatar': {
'type': 'object', 'type': 'object',
'properties': { 'properties': {
@ -83,37 +86,59 @@ BITBUCKET_WEBHOOK_PAYLOAD_SCHEMA = {
'required': ['href'], 'required': ['href'],
}, },
}, },
'required': ['avatar'], 'required': ['html', 'avatar'],
}, # /User },
}, },
}, # /Author 'required': ['username'],
},
}, },
}, },
'links': {
'type': 'object',
'properties': {
'html': {
'type': 'object',
'properties': {
'href': {
'type': 'string',
},
},
'required': ['href'],
},
},
'required': ['html'],
},
}, },
'required': ['hash', 'message', 'date'], 'required': ['hash', 'message', 'date'],
}, # /Target },
}, },
'required': ['name', 'target'], 'required': ['target'],
}, # /New },
}, },
}, # /Changes item },
}, # /Changes },
}, },
'required': ['changes'], 'required': ['changes'],
}, # / Push },
}, },
'actor': { 'actor': {
'type': 'object', 'type': 'object',
'properties': { 'properties': {
'account_id': { 'username': {
'type': 'string',
},
'display_name': {
'type': 'string', 'type': 'string',
}, },
'links': { 'links': {
'type': 'object', 'type': 'object',
'properties': { 'properties': {
'html': {
'type': 'object',
'properties': {
'href': {
'type': 'string',
},
},
'required': ['href'],
},
'avatar': { 'avatar': {
'type': 'object', 'type': 'object',
'properties': { 'properties': {
@ -124,12 +149,13 @@ BITBUCKET_WEBHOOK_PAYLOAD_SCHEMA = {
'required': ['href'], 'required': ['href'],
}, },
}, },
'required': ['avatar'], 'required': ['html', 'avatar'],
}, },
}, },
}, # /Actor 'required': ['username'],
},
'required': ['push', 'repository'], 'required': ['push', 'repository'],
} # /Root }
BITBUCKET_COMMIT_INFO_SCHEMA = { BITBUCKET_COMMIT_INFO_SCHEMA = {
'type': 'object', 'type': 'object',
@ -177,7 +203,8 @@ def get_transformed_commit_info(bb_commit, ref, default_branch, repository_name,
author = lookup_author(match.group(1)) author = lookup_author(match.group(1))
author_info = JSONPathDict(author) if author is not None else None author_info = JSONPathDict(author) if author is not None else None
if author_info: if author_info:
config['commit_info.author.username'] = author_info['user.display_name'] config['commit_info.author.username'] = author_info['user.username']
config['commit_info.author.url'] = 'https://bitbucket.org/%s/' % author_info['user.username']
config['commit_info.author.avatar_url'] = author_info['user.avatar'] config['commit_info.author.avatar_url'] = author_info['user.avatar']
return config.dict_value() return config.dict_value()
@ -190,17 +217,16 @@ def get_transformed_webhook_payload(bb_payload, default_branch=None):
try: try:
validate(bb_payload, BITBUCKET_WEBHOOK_PAYLOAD_SCHEMA) validate(bb_payload, BITBUCKET_WEBHOOK_PAYLOAD_SCHEMA)
except Exception as exc: except Exception as exc:
logger.exception('Exception when validating Bitbucket webhook payload: %s from %s', exc.message, logger.exception('Exception when validating Bitbucket webhook payload: %s from %s', exc.message, bb_payload)
bb_payload)
raise InvalidPayloadException(exc.message) raise InvalidPayloadException(exc.message)
payload = JSONPathDict(bb_payload) payload = JSONPathDict(bb_payload)
change = payload['push.changes[-1].new'] change = payload['push.changes[-1].new']
if not change: if not change:
raise SkipRequestException return None
is_branch = change['type'] == 'branch' ref = ('refs/heads/' + change['name'] if change['type'] == 'branch'
ref = 'refs/heads/' + change['name'] if is_branch else 'refs/tags/' + change['name'] else 'refs/tags/' + change['name'])
repository_name = payload['repository.full_name'] repository_name = payload['repository.full_name']
target = change['target'] target = change['target']
@ -211,14 +237,16 @@ def get_transformed_webhook_payload(bb_payload, default_branch=None):
config['default_branch'] = default_branch config['default_branch'] = default_branch
config['git_url'] = 'git@bitbucket.org:%s.git' % repository_name config['git_url'] = 'git@bitbucket.org:%s.git' % repository_name
config['commit_info.url'] = target['links.html.href'] or '' config['commit_info.url'] = target['links.html.href']
config['commit_info.message'] = target['message'] config['commit_info.message'] = target['message']
config['commit_info.date'] = target['date'] config['commit_info.date'] = target['date']
config['commit_info.author.username'] = target['author.user.display_name'] config['commit_info.author.username'] = target['author.user.username']
config['commit_info.author.url'] = target['author.user.links.html.href']
config['commit_info.author.avatar_url'] = target['author.user.links.avatar.href'] config['commit_info.author.avatar_url'] = target['author.user.links.avatar.href']
config['commit_info.committer.username'] = payload['actor.display_name'] config['commit_info.committer.username'] = payload['actor.username']
config['commit_info.committer.url'] = payload['actor.links.html.href']
config['commit_info.committer.avatar_url'] = payload['actor.links.avatar.href'] config['commit_info.committer.avatar_url'] = payload['actor.links.avatar.href']
return config.dict_value() return config.dict_value()
@ -296,8 +324,8 @@ class BitbucketBuildTrigger(BuildTriggerHandler):
if not result: if not result:
return False return False
self.put_config_key('account_id', data['user']['account_id']) username = data['user']['username']
self.put_config_key('nickname', data['user']['nickname']) self.put_config_key('username', username)
return True return True
def is_active(self): def is_active(self):
@ -362,7 +390,7 @@ class BitbucketBuildTrigger(BuildTriggerHandler):
return config return config
def list_build_source_namespaces(self): def list_build_sources(self):
bitbucket_client = self._get_authorized_client() bitbucket_client = self._get_authorized_client()
(result, data, err_msg) = bitbucket_client.get_visible_repositories() (result, data, err_msg) = bitbucket_client.get_visible_repositories()
if not result: if not result:
@ -370,43 +398,22 @@ class BitbucketBuildTrigger(BuildTriggerHandler):
namespaces = {} namespaces = {}
for repo in data: for repo in data:
owner = repo['owner'] if not repo['scm'] == 'git':
continue
if owner in namespaces: owner = repo['owner']
namespaces[owner]['score'] = namespaces[owner]['score'] + 1 if not owner in namespaces:
else:
namespaces[owner] = { namespaces[owner] = {
'personal': owner == self.config.get('nickname', self.config.get('username')), 'personal': owner == self.config.get('username'),
'id': owner, 'repos': [],
'title': owner, 'info': {
'avatar_url': repo['logo'], 'name': owner
'url': 'https://bitbucket.org/%s' % (owner), }
'score': 1,
} }
return BuildTriggerHandler.build_namespaces_response(namespaces) namespaces[owner]['repos'].append(owner + '/' + repo['slug'])
def list_build_sources_for_namespace(self, namespace): return namespaces.values()
def repo_view(repo):
last_modified = dateutil.parser.parse(repo['utc_last_updated'])
return {
'name': repo['slug'],
'full_name': '%s/%s' % (repo['owner'], repo['slug']),
'description': repo['description'] or '',
'last_updated': timegm(last_modified.utctimetuple()),
'url': 'https://bitbucket.org/%s/%s' % (repo['owner'], repo['slug']),
'has_admin_permissions': repo['read_only'] is False,
'private': repo['is_private'],
}
bitbucket_client = self._get_authorized_client()
(result, data, err_msg) = bitbucket_client.get_visible_repositories()
if not result:
raise RepositoryReadException('Could not read repository list: ' + err_msg)
repos = [repo_view(repo) for repo in data if repo['owner'] == namespace]
return BuildTriggerHandler.build_sources_response(repos)
def list_build_subdirs(self): def list_build_subdirs(self):
config = self.config config = self.config
@ -423,7 +430,10 @@ class BitbucketBuildTrigger(BuildTriggerHandler):
raise RepositoryReadException(err_msg) raise RepositoryReadException(err_msg)
files = set([f['path'] for f in data['files']]) files = set([f['path'] for f in data['files']])
return ["/" + file_path for file_path in files if self.filename_is_dockerfile(os.path.basename(file_path))] if 'Dockerfile' in files:
return ['/']
return []
def load_dockerfile_contents(self): def load_dockerfile_contents(self):
repository = self._get_repository_client() repository = self._get_repository_client()
@ -431,14 +441,11 @@ class BitbucketBuildTrigger(BuildTriggerHandler):
(result, data, err_msg) = repository.get_raw_path_contents(path, revision='master') (result, data, err_msg) = repository.get_raw_path_contents(path, revision='master')
if not result: if not result:
return None raise RepositoryReadException(err_msg)
return data return data
def list_field_values(self, field_name, limit=None): def list_field_values(self, field_name, limit=None):
if 'build_source' not in self.config:
return None
source = self.config['build_source'] source = self.config['build_source']
(namespace, name) = source.split('/') (namespace, name) = source.split('/')
@ -487,9 +494,6 @@ class BitbucketBuildTrigger(BuildTriggerHandler):
def handle_trigger_request(self, request): def handle_trigger_request(self, request):
payload = request.get_json() payload = request.get_json()
if payload is None:
raise InvalidPayloadException('Missing payload')
logger.debug('Got BitBucket request: %s', payload) logger.debug('Got BitBucket request: %s', payload)
repository = self._get_repository_client() repository = self._get_repository_client()
@ -511,7 +515,7 @@ class BitbucketBuildTrigger(BuildTriggerHandler):
# Lookup the commit SHA for the branch. # Lookup the commit SHA for the branch.
(result, data, _) = repository.get_branch(branch_name) (result, data, _) = repository.get_branch(branch_name)
if not result: if not result:
raise TriggerStartException('Could not find branch in repository') raise TriggerStartException('Could not find branch commit SHA')
return data['target']['hash'] return data['target']['hash']
@ -519,7 +523,7 @@ class BitbucketBuildTrigger(BuildTriggerHandler):
# Lookup the commit SHA for the tag. # Lookup the commit SHA for the tag.
(result, data, _) = repository.get_tag(tag_name) (result, data, _) = repository.get_tag(tag_name)
if not result: if not result:
raise TriggerStartException('Could not find tag in repository') raise TriggerStartException('Could not find tag commit SHA')
return data['target']['hash'] return data['target']['hash']

View file

@ -16,6 +16,9 @@ from buildtrigger.bitbuckethandler import (BITBUCKET_WEBHOOK_PAYLOAD_SCHEMA as b
from buildtrigger.githubhandler import (GITHUB_WEBHOOK_PAYLOAD_SCHEMA as gh_schema, from buildtrigger.githubhandler import (GITHUB_WEBHOOK_PAYLOAD_SCHEMA as gh_schema,
get_transformed_webhook_payload as gh_payload) get_transformed_webhook_payload as gh_payload)
from buildtrigger.bitbuckethandler import (BITBUCKET_WEBHOOK_PAYLOAD_SCHEMA as bb_schema,
get_transformed_webhook_payload as bb_payload)
from buildtrigger.gitlabhandler import (GITLAB_WEBHOOK_PAYLOAD_SCHEMA as gl_schema, from buildtrigger.gitlabhandler import (GITLAB_WEBHOOK_PAYLOAD_SCHEMA as gl_schema,
get_transformed_webhook_payload as gl_payload) get_transformed_webhook_payload as gl_payload)
@ -159,7 +162,7 @@ class CustomBuildTrigger(BuildTriggerHandler):
def handle_trigger_request(self, request): def handle_trigger_request(self, request):
payload = request.data payload = request.data
if not payload: if not payload:
raise InvalidPayloadException('Missing expected payload') raise InvalidPayloadException()
logger.debug('Payload %s', payload) logger.debug('Payload %s', payload)
@ -183,10 +186,7 @@ class CustomBuildTrigger(BuildTriggerHandler):
'git_url': config['build_source'], 'git_url': config['build_source'],
} }
try: return self.prepare_build(metadata, is_manual=True)
return self.prepare_build(metadata, is_manual=True)
except ValidationError as ve:
raise TriggerStartException(ve.message)
def activate(self, standard_webhook_url): def activate(self, standard_webhook_url):
config = self.config config = self.config
@ -212,18 +212,3 @@ class CustomBuildTrigger(BuildTriggerHandler):
def get_repository_url(self): def get_repository_url(self):
return None return None
def list_build_source_namespaces(self):
raise NotImplementedError
def list_build_sources_for_namespace(self, namespace):
raise NotImplementedError
def list_build_subdirs(self):
raise NotImplementedError
def list_field_values(self, field_name, limit=None):
raise NotImplementedError
def load_dockerfile_contents(self):
raise NotImplementedError

View file

@ -1,29 +1,25 @@
import logging import logging
import os.path import os.path
import base64 import base64
import re
from calendar import timegm
from functools import wraps
from ssl import SSLError
from github import (Github, UnknownObjectException, GithubException,
BadCredentialsException as GitHubBadCredentialsException)
from jsonschema import validate
from app import app, github_trigger from app import app, github_trigger
from jsonschema import validate
from buildtrigger.triggerutil import (RepositoryReadException, TriggerActivationException, from buildtrigger.triggerutil import (RepositoryReadException, TriggerActivationException,
TriggerDeactivationException, TriggerStartException, TriggerDeactivationException, TriggerStartException,
EmptyRepositoryException, ValidationRequestException, EmptyRepositoryException, ValidationRequestException,
SkipRequestException, InvalidPayloadException, SkipRequestException, InvalidPayloadException,
determine_build_ref, raise_if_skipped_build, determine_build_ref, raise_if_skipped_build,
find_matching_branches) find_matching_branches)
from buildtrigger.basehandler import BuildTriggerHandler from buildtrigger.basehandler import BuildTriggerHandler
from endpoints.exception import ExternalServiceError
from util.security.ssh import generate_ssh_keypair from util.security.ssh import generate_ssh_keypair
from util.dict_wrappers import JSONPathDict, SafeDictSetter from util.dict_wrappers import JSONPathDict, SafeDictSetter
from github import (Github, UnknownObjectException, GithubException,
BadCredentialsException as GitHubBadCredentialsException)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
GITHUB_WEBHOOK_PAYLOAD_SCHEMA = { GITHUB_WEBHOOK_PAYLOAD_SCHEMA = {
@ -33,7 +29,7 @@ GITHUB_WEBHOOK_PAYLOAD_SCHEMA = {
'type': 'string', 'type': 'string',
}, },
'head_commit': { 'head_commit': {
'type': ['object', 'null'], 'type': 'object',
'properties': { 'properties': {
'id': { 'id': {
'type': 'string', 'type': 'string',
@ -102,13 +98,10 @@ def get_transformed_webhook_payload(gh_payload, default_branch=None, lookup_user
payload = JSONPathDict(gh_payload) payload = JSONPathDict(gh_payload)
if payload['head_commit'] is None:
raise SkipRequestException
config = SafeDictSetter() config = SafeDictSetter()
config['commit'] = payload['head_commit.id'] config['commit'] = payload['head_commit.id']
config['ref'] = payload['ref'] config['ref'] = payload['ref']
config['default_branch'] = payload['repository.default_branch'] or default_branch config['default_branch'] = default_branch
config['git_url'] = payload['repository.ssh_url'] config['git_url'] = payload['repository.ssh_url']
config['commit_info.url'] = payload['head_commit.url'] config['commit_info.url'] = payload['head_commit.url']
@ -143,18 +136,6 @@ def get_transformed_webhook_payload(gh_payload, default_branch=None, lookup_user
return config.dict_value() return config.dict_value()
def _catch_ssl_errors(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except SSLError as se:
msg = 'Request to the GitHub API failed: %s' % se.message
logger.exception(msg)
raise ExternalServiceError(msg)
return wrapper
class GithubBuildTrigger(BuildTriggerHandler): class GithubBuildTrigger(BuildTriggerHandler):
""" """
BuildTrigger for GitHub that uses the archive API and buildpacks. BuildTrigger for GitHub that uses the archive API and buildpacks.
@ -185,7 +166,6 @@ class GithubBuildTrigger(BuildTriggerHandler):
return default_msg return default_msg
@_catch_ssl_errors
def activate(self, standard_webhook_url): def activate(self, standard_webhook_url):
config = self.config config = self.config
new_build_source = config['build_source'] new_build_source = config['build_source']
@ -233,7 +213,6 @@ class GithubBuildTrigger(BuildTriggerHandler):
return config, {'private_key': private_key} return config, {'private_key': private_key}
@_catch_ssl_errors
def deactivate(self): def deactivate(self):
config = self.config config = self.config
gh_client = self._get_client() gh_client = self._get_client()
@ -262,84 +241,68 @@ class GithubBuildTrigger(BuildTriggerHandler):
raise TriggerDeactivationException(msg) raise TriggerDeactivationException(msg)
# Remove the webhook. # Remove the webhook.
if 'hook_id' in config: try:
try: hook = repo.get_hook(config['hook_id'])
hook = repo.get_hook(config['hook_id']) hook.delete()
hook.delete() except GithubException as ghe:
except GithubException as ghe: default_msg = 'Unable to remove hook: %s' % config['hook_id']
default_msg = 'Unable to remove hook: %s' % config['hook_id'] msg = GithubBuildTrigger._get_error_message(ghe, default_msg)
msg = GithubBuildTrigger._get_error_message(ghe, default_msg) raise TriggerDeactivationException(msg)
raise TriggerDeactivationException(msg)
config.pop('hook_id', None) config.pop('hook_id', None)
self.config = config self.config = config
return config return config
@_catch_ssl_errors def list_build_sources(self):
def list_build_source_namespaces(self):
gh_client = self._get_client() gh_client = self._get_client()
usr = gh_client.get_user() usr = gh_client.get_user()
# Build the full set of namespaces for the user, starting with their own.
namespaces = {}
namespaces[usr.login] = {
'personal': True,
'id': usr.login,
'title': usr.name or usr.login,
'avatar_url': usr.avatar_url,
'url': usr.html_url,
'score': usr.plan.private_repos if usr.plan else 0,
}
for org in usr.get_orgs():
organization = org.login if org.login else org.name
# NOTE: We don't load the organization's html_url nor its plan, because doing
# so requires loading *each organization* via its own API call in this tight
# loop, which was massively slowing down the load time for users when setting
# up triggers.
namespaces[organization] = {
'personal': False,
'id': organization,
'title': organization,
'avatar_url': org.avatar_url,
'url': '',
'score': 0,
}
return BuildTriggerHandler.build_namespaces_response(namespaces)
@_catch_ssl_errors
def list_build_sources_for_namespace(self, namespace):
def repo_view(repo):
return {
'name': repo.name,
'full_name': repo.full_name,
'description': repo.description or '',
'last_updated': timegm(repo.pushed_at.utctimetuple()) if repo.pushed_at else 0,
'url': repo.html_url,
'has_admin_permissions': repo.permissions.admin,
'private': repo.private,
}
gh_client = self._get_client()
usr = gh_client.get_user()
if namespace == usr.login:
repos = [repo_view(repo) for repo in usr.get_repos(type='owner', sort='updated')]
return BuildTriggerHandler.build_sources_response(repos)
try: try:
org = gh_client.get_organization(namespace) repos = usr.get_repos()
if org is None:
return []
except GithubException: except GithubException:
return [] raise RepositoryReadException('Unable to list user repositories')
repos = [repo_view(repo) for repo in org.get_repos(type='member')] namespaces = {}
return BuildTriggerHandler.build_sources_response(repos) has_non_personal = False
for repository in repos:
namespace = repository.owner.login
if not namespace in namespaces:
is_personal_repo = namespace == usr.login
namespaces[namespace] = {
'personal': is_personal_repo,
'repos': [],
'info': {
'name': namespace,
'avatar_url': repository.owner.avatar_url
}
}
if not is_personal_repo:
has_non_personal = True
namespaces[namespace]['repos'].append(repository.full_name)
# In older versions of GitHub Enterprise, the get_repos call above does not
# return any non-personal repositories. In that case, we need to lookup the
# repositories manually.
# TODO: Remove this once we no longer support GHE versions <= 2.1
if not has_non_personal:
for org in usr.get_orgs():
repo_list = [repo.full_name for repo in org.get_repos(type='member')]
namespaces[org.name] = {
'personal': False,
'repos': repo_list,
'info': {
'name': org.name or org.login,
'avatar_url': org.avatar_url
}
}
entries = list(namespaces.values())
entries.sort(key=lambda e: e['info']['name'])
return entries
@_catch_ssl_errors
def list_build_subdirs(self): def list_build_subdirs(self):
config = self.config config = self.config
gh_client = self._get_client() gh_client = self._get_client()
@ -355,8 +318,9 @@ class GithubBuildTrigger(BuildTriggerHandler):
default_commit = repo.get_branch(branches[0]).commit default_commit = repo.get_branch(branches[0]).commit
commit_tree = repo.get_git_tree(default_commit.sha, recursive=True) commit_tree = repo.get_git_tree(default_commit.sha, recursive=True)
return [elem.path for elem in commit_tree.tree return [os.path.dirname(elem.path) for elem in commit_tree.tree
if (elem.type == u'blob' and self.filename_is_dockerfile(os.path.basename(elem.path)))] if (elem.type == u'blob' and
os.path.basename(elem.path) == u'Dockerfile')]
except GithubException as ghe: except GithubException as ghe:
message = ghe.data.get('message', 'Unable to list contents of repository: %s' % source) message = ghe.data.get('message', 'Unable to list contents of repository: %s' % source)
if message == 'Branch not found': if message == 'Branch not found':
@ -364,41 +328,27 @@ class GithubBuildTrigger(BuildTriggerHandler):
raise RepositoryReadException(message) raise RepositoryReadException(message)
@_catch_ssl_errors
def load_dockerfile_contents(self): def load_dockerfile_contents(self):
config = self.config config = self.config
gh_client = self._get_client() gh_client = self._get_client()
source = config['build_source']
source = config['build_source']
path = self.get_dockerfile_path()
try: try:
repo = gh_client.get_repo(source) repo = gh_client.get_repo(source)
file_info = repo.get_file_contents(path)
if file_info is None:
return None
content = file_info.content
if file_info.encoding == 'base64':
content = base64.b64decode(content)
return content
except GithubException as ghe: except GithubException as ghe:
message = ghe.data.get('message', 'Unable to list contents of repository: %s' % source) message = ghe.data.get('message', 'Unable to read Dockerfile: %s' % source)
raise RepositoryReadException(message) raise RepositoryReadException(message)
path = self.get_dockerfile_path()
if not path:
return None
try:
file_info = repo.get_contents(path)
# TypeError is needed because directory inputs cause a TypeError
except (GithubException, TypeError) as ghe:
logger.error("got error from trying to find github file %s" % ghe)
return None
if file_info is None:
return None
if isinstance(file_info, list):
return None
content = file_info.content
if file_info.encoding == 'base64':
content = base64.b64decode(content)
return content
@_catch_ssl_errors
def list_field_values(self, field_name, limit=None): def list_field_values(self, field_name, limit=None):
if field_name == 'refs': if field_name == 'refs':
branches = self.list_field_values('branch_name') branches = self.list_field_values('branch_name')
@ -408,13 +358,10 @@ class GithubBuildTrigger(BuildTriggerHandler):
[{'kind': 'tag', 'name': tag} for tag in tags]) [{'kind': 'tag', 'name': tag} for tag in tags])
config = self.config config = self.config
source = config.get('build_source')
if source is None:
return []
if field_name == 'tag_name': if field_name == 'tag_name':
try: try:
gh_client = self._get_client() gh_client = self._get_client()
source = config['build_source']
repo = gh_client.get_repo(source) repo = gh_client.get_repo(source)
gh_tags = repo.get_tags() gh_tags = repo.get_tags()
if limit: if limit:
@ -431,6 +378,7 @@ class GithubBuildTrigger(BuildTriggerHandler):
if field_name == 'branch_name': if field_name == 'branch_name':
try: try:
gh_client = self._get_client() gh_client = self._get_client()
source = config['build_source']
repo = gh_client.get_repo(source) repo = gh_client.get_repo(source)
gh_branches = repo.get_branches() gh_branches = repo.get_branches()
if limit: if limit:
@ -491,7 +439,6 @@ class GithubBuildTrigger(BuildTriggerHandler):
'commit_info': commit_info 'commit_info': commit_info
} }
@_catch_ssl_errors
def manual_start(self, run_parameters=None): def manual_start(self, run_parameters=None):
config = self.config config = self.config
source = config['build_source'] source = config['build_source']
@ -505,11 +452,8 @@ class GithubBuildTrigger(BuildTriggerHandler):
raise TriggerStartException(msg) raise TriggerStartException(msg)
def get_branch_sha(branch_name): def get_branch_sha(branch_name):
try: branch = repo.get_branch(branch_name)
branch = repo.get_branch(branch_name) return branch.commit.sha
return branch.commit.sha
except GithubException:
raise TriggerStartException('Could not find branch in repository')
def get_tag_sha(tag_name): def get_tag_sha(tag_name):
tags = {tag.name: tag for tag in repo.get_tags()} tags = {tag.name: tag for tag in repo.get_tags()}
@ -525,7 +469,6 @@ class GithubBuildTrigger(BuildTriggerHandler):
metadata = GithubBuildTrigger._build_metadata_for_commit(commit_sha, ref, repo) metadata = GithubBuildTrigger._build_metadata_for_commit(commit_sha, ref, repo)
return self.prepare_build(metadata, is_manual=True) return self.prepare_build(metadata, is_manual=True)
@_catch_ssl_errors
def lookup_user(self, username): def lookup_user(self, username):
try: try:
gh_client = self._get_client() gh_client = self._get_client()
@ -537,30 +480,15 @@ class GithubBuildTrigger(BuildTriggerHandler):
except GithubException: except GithubException:
return None return None
@_catch_ssl_errors
def handle_trigger_request(self, request): def handle_trigger_request(self, request):
# Check the payload to see if we should skip it based on the lack of a head_commit. # Check the payload to see if we should skip it based on the lack of a head_commit.
payload = request.get_json() payload = request.get_json()
if payload is None:
raise InvalidPayloadException('Missing payload')
# This is for GitHub's probing/testing. # This is for GitHub's probing/testing.
if 'zen' in payload: if 'zen' in payload:
raise SkipRequestException() raise ValidationRequestException()
# Lookup the default branch for the repository. # Lookup the default branch for the repository.
if 'repository' not in payload:
raise InvalidPayloadException("Missing 'repository' on request")
if 'owner' not in payload['repository']:
raise InvalidPayloadException("Missing 'owner' on repository")
if 'name' not in payload['repository']['owner']:
raise InvalidPayloadException("Missing owner 'name' on repository")
if 'name' not in payload['repository']:
raise InvalidPayloadException("Missing 'name' on repository")
default_branch = None default_branch = None
lookup_user = None lookup_user = None
try: try:
@ -579,7 +507,7 @@ class GithubBuildTrigger(BuildTriggerHandler):
logger.debug('GitHub trigger payload %s', payload) logger.debug('GitHub trigger payload %s', payload)
metadata = get_transformed_webhook_payload(payload, default_branch=default_branch, metadata = get_transformed_webhook_payload(payload, default_branch=default_branch,
lookup_user=lookup_user) lookup_user=lookup_user)
prepared = self.prepare_build(metadata) prepared = self.prepare_build(metadata)
# Check if we should skip this build. # Check if we should skip this build.

View file

@ -1,26 +1,23 @@
import os.path
import logging import logging
from calendar import timegm
from functools import wraps from functools import wraps
from app import app, gitlab_trigger
import dateutil.parser
import gitlab
import requests
from jsonschema import validate from jsonschema import validate
from app import app, gitlab_trigger
from buildtrigger.triggerutil import (RepositoryReadException, TriggerActivationException, from buildtrigger.triggerutil import (RepositoryReadException, TriggerActivationException,
TriggerDeactivationException, TriggerStartException, TriggerDeactivationException, TriggerStartException,
SkipRequestException, InvalidPayloadException, SkipRequestException, InvalidPayloadException,
TriggerAuthException,
determine_build_ref, raise_if_skipped_build, determine_build_ref, raise_if_skipped_build,
find_matching_branches) find_matching_branches)
from buildtrigger.basehandler import BuildTriggerHandler from buildtrigger.basehandler import BuildTriggerHandler
from endpoints.exception import ExternalServiceError
from util.security.ssh import generate_ssh_keypair from util.security.ssh import generate_ssh_keypair
from util.dict_wrappers import JSONPathDict, SafeDictSetter from util.dict_wrappers import JSONPathDict, SafeDictSetter
from endpoints.exception import ExternalServiceTimeout
import gitlab
import requests
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -47,11 +44,8 @@ GITLAB_WEBHOOK_PAYLOAD_SCHEMA = {
'items': { 'items': {
'type': 'object', 'type': 'object',
'properties': { 'properties': {
'id': {
'type': 'string',
},
'url': { 'url': {
'type': ['string', 'null'], 'type': 'string',
}, },
'message': { 'message': {
'type': 'string', 'type': 'string',
@ -69,25 +63,14 @@ GITLAB_WEBHOOK_PAYLOAD_SCHEMA = {
'required': ['email'], 'required': ['email'],
}, },
}, },
'required': ['id', 'message', 'timestamp'], 'required': ['url', 'message', 'timestamp'],
}, },
}, },
}, },
'required': ['ref', 'checkout_sha', 'repository'], 'required': ['ref', 'checkout_sha', 'repository'],
} }
_ACCESS_LEVEL_MAP = { def _catch_timeouts(func):
50: ("owner", True),
40: ("master", True),
30: ("developer", False),
20: ("reporter", False),
10: ("guest", False),
}
_PER_PAGE_COUNT = 20
def _catch_timeouts_and_errors(func):
@wraps(func) @wraps(func)
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
try: try:
@ -95,37 +78,11 @@ def _catch_timeouts_and_errors(func):
except requests.exceptions.Timeout: except requests.exceptions.Timeout:
msg = 'Request to the GitLab API timed out' msg = 'Request to the GitLab API timed out'
logger.exception(msg) logger.exception(msg)
raise ExternalServiceError(msg) raise ExternalServiceTimeout(msg)
except gitlab.GitlabError:
msg = 'GitLab API error. Please contact support.'
logger.exception(msg)
raise ExternalServiceError(msg)
return wrapper return wrapper
def _paginated_iterator(func, exc, **kwargs): def get_transformed_webhook_payload(gl_payload, default_branch=None, lookup_user=None):
""" Returns an iterator over invocations of the given function, automatically handling
pagination.
"""
page = 1
while True:
result = func(page=page, per_page=_PER_PAGE_COUNT, **kwargs)
if result is None or result is False:
raise exc
counter = 0
for item in result:
yield item
counter = counter + 1
if counter < _PER_PAGE_COUNT:
break
page = page + 1
def get_transformed_webhook_payload(gl_payload, default_branch=None, lookup_user=None,
lookup_commit=None):
""" Returns the Gitlab webhook JSON payload transformed into our own payload """ Returns the Gitlab webhook JSON payload transformed into our own payload
format. If the gl_payload is not valid, returns None. format. If the gl_payload is not valid, returns None.
""" """
@ -136,44 +93,27 @@ def get_transformed_webhook_payload(gl_payload, default_branch=None, lookup_user
payload = JSONPathDict(gl_payload) payload = JSONPathDict(gl_payload)
if payload['object_kind'] != 'push' and payload['object_kind'] != 'tag_push':
# Unknown kind of webhook.
raise SkipRequestException
# Check for empty commits. The commits list will be empty if the branch is deleted. # Check for empty commits. The commits list will be empty if the branch is deleted.
commits = payload['commits'] commits = payload['commits']
if payload['object_kind'] == 'push' and not commits: if not commits:
raise SkipRequestException
# Check for missing commit information.
commit_sha = payload['checkout_sha'] or payload['after']
if commit_sha is None or commit_sha == '0000000000000000000000000000000000000000':
raise SkipRequestException raise SkipRequestException
config = SafeDictSetter() config = SafeDictSetter()
config['commit'] = commit_sha config['commit'] = payload['checkout_sha']
config['ref'] = payload['ref'] config['ref'] = payload['ref']
config['default_branch'] = default_branch config['default_branch'] = default_branch
config['git_url'] = payload['repository.git_ssh_url'] config['git_url'] = payload['repository.git_ssh_url']
found_commit = JSONPathDict({}) # Find the commit associated with the checkout_sha. Gitlab doesn't (necessary) send this in
if payload['object_kind'] == 'push' or payload['object_kind'] == 'tag_push': # any order, so we cannot simply index into the commits list.
# Find the commit associated with the checkout_sha. Gitlab doesn't (necessary) send this in found_commit = None
# any order, so we cannot simply index into the commits list. for commit in commits:
found_commit = None if commit['id'] == payload['checkout_sha']:
if commits is not None: found_commit = JSONPathDict(commit)
for commit in commits: break
if commit['id'] == payload['checkout_sha']:
found_commit = JSONPathDict(commit)
break
if found_commit is None and lookup_commit: if found_commit is None:
checkout_sha = payload['checkout_sha'] or payload['after'] raise SkipRequestException
found_commit_info = lookup_commit(payload['project_id'], checkout_sha)
found_commit = JSONPathDict(dict(found_commit_info) if found_commit_info else {})
if found_commit is None:
raise SkipRequestException
config['commit_info.url'] = found_commit['url'] config['commit_info.url'] = found_commit['url']
config['commit_info.message'] = found_commit['message'] config['commit_info.message'] = found_commit['message']
@ -181,7 +121,7 @@ def get_transformed_webhook_payload(gl_payload, default_branch=None, lookup_user
# Note: Gitlab does not send full user information with the payload, so we have to # Note: Gitlab does not send full user information with the payload, so we have to
# (optionally) look it up. # (optionally) look it up.
author_email = found_commit['author.email'] or found_commit['author_email'] author_email = found_commit['author.email']
if lookup_user and author_email: if lookup_user and author_email:
author_info = lookup_user(author_email) author_info = lookup_user(author_email)
if author_info: if author_info:
@ -202,28 +142,20 @@ class GitLabBuildTrigger(BuildTriggerHandler):
def _get_authorized_client(self): def _get_authorized_client(self):
auth_token = self.auth_token or 'invalid' auth_token = self.auth_token or 'invalid'
api_version = self.config.get('API_VERSION', '4') return gitlab.Gitlab(gitlab_trigger.api_endpoint(), oauth_token=auth_token, timeout=5)
client = gitlab.Gitlab(gitlab_trigger.api_endpoint(), oauth_token=auth_token, timeout=20,
api_version=api_version)
try:
client.auth()
except gitlab.GitlabGetError as ex:
raise TriggerAuthException(ex.message)
return client
def is_active(self): def is_active(self):
return 'hook_id' in self.config return 'hook_id' in self.config
@_catch_timeouts_and_errors @_catch_timeouts
def activate(self, standard_webhook_url): def activate(self, standard_webhook_url):
config = self.config config = self.config
new_build_source = config['build_source'] new_build_source = config['build_source']
gl_client = self._get_authorized_client() gl_client = self._get_authorized_client()
# Find the GitLab repository. # Find the GitLab repository.
gl_project = gl_client.projects.get(new_build_source) repository = gl_client.getproject(new_build_source)
if not gl_project: if repository is False:
msg = 'Unable to find GitLab repository for source: %s' % new_build_source msg = 'Unable to find GitLab repository for source: %s' % new_build_source
raise TriggerActivationException(msg) raise TriggerActivationException(msg)
@ -235,31 +167,20 @@ class GitLabBuildTrigger(BuildTriggerHandler):
'value': public_key, 'value': public_key,
}, },
] ]
key = gl_client.adddeploykey(repository['id'], '%s Builder' % app.config['REGISTRY_TITLE'],
key = gl_project.keys.create({ public_key)
'title': '%s Builder' % app.config['REGISTRY_TITLE'], if key is False:
'key': public_key,
})
if not key:
msg = 'Unable to add deploy key to repository: %s' % new_build_source msg = 'Unable to add deploy key to repository: %s' % new_build_source
raise TriggerActivationException(msg) raise TriggerActivationException(msg)
config['key_id'] = key['id']
config['key_id'] = key.get_id()
# Add the webhook to the GitLab repository. # Add the webhook to the GitLab repository.
hook = gl_project.hooks.create({ hook = gl_client.addprojecthook(repository['id'], standard_webhook_url, push=True)
'url': standard_webhook_url, if hook is False:
'push': True,
'tag_push': True,
'push_events': True,
'tag_push_events': True,
})
if not hook:
msg = 'Unable to create webhook on repository: %s' % new_build_source msg = 'Unable to create webhook on repository: %s' % new_build_source
raise TriggerActivationException(msg) raise TriggerActivationException(msg)
config['hook_id'] = hook.get_id() config['hook_id'] = hook['id']
self.config = config self.config = config
return config, {'private_key': private_key} return config, {'private_key': private_key}
@ -268,169 +189,94 @@ class GitLabBuildTrigger(BuildTriggerHandler):
gl_client = self._get_authorized_client() gl_client = self._get_authorized_client()
# Find the GitLab repository. # Find the GitLab repository.
try: repository = gl_client.getproject(config['build_source'])
gl_project = gl_client.projects.get(config['build_source']) if repository is False:
if not gl_project: msg = 'Unable to find GitLab repository for source: %s' % config['build_source']
config.pop('key_id', None) raise TriggerDeactivationException(msg)
config.pop('hook_id', None)
self.config = config
return config
except gitlab.GitlabGetError as ex:
if ex.response_code != 404:
raise
# Remove the webhook. # Remove the webhook.
try: success = gl_client.deleteprojecthook(repository['id'], config['hook_id'])
gl_project.hooks.delete(config['hook_id']) if success is False:
except gitlab.GitlabDeleteError as ex: msg = 'Unable to remove hook: %s' % config['hook_id']
if ex.response_code != 404: raise TriggerDeactivationException(msg)
raise
config.pop('hook_id', None) config.pop('hook_id', None)
# Remove the key # Remove the key
try: success = gl_client.deletedeploykey(repository['id'], config['key_id'])
gl_project.keys.delete(config['key_id']) if success is False:
except gitlab.GitlabDeleteError as ex: msg = 'Unable to remove deploy key: %s' % config['key_id']
if ex.response_code != 404: raise TriggerDeactivationException(msg)
raise
config.pop('key_id', None) config.pop('key_id', None)
self.config = config self.config = config
return config return config
@_catch_timeouts_and_errors @_catch_timeouts
def list_build_source_namespaces(self): def list_build_sources(self):
gl_client = self._get_authorized_client() gl_client = self._get_authorized_client()
current_user = gl_client.user current_user = gl_client.currentuser()
if not current_user: if current_user is False:
raise RepositoryReadException('Unable to get current user') raise RepositoryReadException('Unable to get current user')
repositories = gl_client.getprojects()
if repositories is False:
raise RepositoryReadException('Unable to list user repositories')
namespaces = {} namespaces = {}
for namespace in _paginated_iterator(gl_client.namespaces.list, RepositoryReadException): for repo in repositories:
namespace_id = namespace.get_id() owner = repo['namespace']['name']
if namespace_id in namespaces: if not owner in namespaces:
namespaces[namespace_id]['score'] = namespaces[namespace_id]['score'] + 1 namespaces[owner] = {
else: 'personal': owner == current_user['username'],
owner = namespace.attributes['name'] 'repos': [],
namespaces[namespace_id] = { 'info': {
'personal': namespace.attributes['kind'] == 'user', 'name': owner,
'id': str(namespace_id), }
'title': namespace.attributes['name'],
'avatar_url': namespace.attributes.get('avatar_url'),
'score': 1,
'url': namespace.attributes.get('web_url') or '',
} }
return BuildTriggerHandler.build_namespaces_response(namespaces) namespaces[owner]['repos'].append(repo['path_with_namespace'])
def _get_namespace(self, gl_client, gl_namespace, lazy=False): return namespaces.values()
try:
if gl_namespace.attributes['kind'] == 'group':
return gl_client.groups.get(gl_namespace.attributes['id'], lazy=lazy)
if gl_namespace.attributes['kind'] == 'user': @_catch_timeouts
return gl_client.users.get(gl_client.user.attributes['id'], lazy=lazy)
# Note: This doesn't seem to work for IDs retrieved via the namespaces API; the IDs are
# different.
return gl_client.users.get(gl_namespace.attributes['id'], lazy=lazy)
except gitlab.GitlabGetError:
return None
@_catch_timeouts_and_errors
def list_build_sources_for_namespace(self, namespace_id):
if not namespace_id:
return []
def repo_view(repo):
# Because *anything* can be None in GitLab API!
permissions = repo.attributes.get('permissions') or {}
group_access = permissions.get('group_access') or {}
project_access = permissions.get('project_access') or {}
missing_group_access = permissions.get('group_access') is None
missing_project_access = permissions.get('project_access') is None
access_level = max(group_access.get('access_level') or 0,
project_access.get('access_level') or 0)
has_admin_permission = _ACCESS_LEVEL_MAP.get(access_level, ("", False))[1]
if missing_group_access or missing_project_access:
# Default to has permission if we cannot check the permissions. This will allow our users
# to select the repository and then GitLab's own checks will ensure that the webhook is
# added only if allowed.
# TODO: Do we want to display this differently in the UI?
has_admin_permission = True
view = {
'name': repo.attributes['path'],
'full_name': repo.attributes['path_with_namespace'],
'description': repo.attributes.get('description') or '',
'url': repo.attributes.get('web_url'),
'has_admin_permissions': has_admin_permission,
'private': repo.attributes.get('visibility') == 'private',
}
if repo.attributes.get('last_activity_at'):
try:
last_modified = dateutil.parser.parse(repo.attributes['last_activity_at'])
view['last_updated'] = timegm(last_modified.utctimetuple())
except ValueError:
logger.exception('Gitlab gave us an invalid last_activity_at: %s', last_modified)
return view
gl_client = self._get_authorized_client()
try:
gl_namespace = gl_client.namespaces.get(namespace_id)
except gitlab.GitlabGetError:
return []
namespace_obj = self._get_namespace(gl_client, gl_namespace, lazy=True)
repositories = _paginated_iterator(namespace_obj.projects.list, RepositoryReadException)
try:
return BuildTriggerHandler.build_sources_response([repo_view(repo) for repo in repositories])
except gitlab.GitlabGetError:
return []
@_catch_timeouts_and_errors
def list_build_subdirs(self): def list_build_subdirs(self):
config = self.config config = self.config
gl_client = self._get_authorized_client() gl_client = self._get_authorized_client()
new_build_source = config['build_source'] new_build_source = config['build_source']
gl_project = gl_client.projects.get(new_build_source) repository = gl_client.getproject(new_build_source)
if not gl_project: if repository is False:
msg = 'Unable to find GitLab repository for source: %s' % new_build_source msg = 'Unable to find GitLab repository for source: %s' % new_build_source
raise RepositoryReadException(msg) raise RepositoryReadException(msg)
repo_branches = gl_project.branches.list() repo_branches = gl_client.getbranches(repository['id'])
if not repo_branches: if repo_branches is False:
msg = 'Unable to find GitLab branches for source: %s' % new_build_source msg = 'Unable to find GitLab branches for source: %s' % new_build_source
raise RepositoryReadException(msg) raise RepositoryReadException(msg)
branches = [branch.attributes['name'] for branch in repo_branches] branches = [branch['name'] for branch in repo_branches]
branches = find_matching_branches(config, branches) branches = find_matching_branches(config, branches)
branches = branches or [gl_project.attributes['default_branch'] or 'master'] branches = branches or [repository['default_branch'] or 'master']
repo_tree = gl_project.repository_tree(ref=branches[0]) repo_tree = gl_client.getrepositorytree(repository['id'], ref_name=branches[0])
if not repo_tree: if repo_tree is False:
msg = 'Unable to find GitLab repository tree for source: %s' % new_build_source msg = 'Unable to find GitLab repository tree for source: %s' % new_build_source
raise RepositoryReadException(msg) raise RepositoryReadException(msg)
return [node['name'] for node in repo_tree if self.filename_is_dockerfile(node['name'])] for node in repo_tree:
if node['name'] == 'Dockerfile':
return ['/']
@_catch_timeouts_and_errors return []
@_catch_timeouts
def load_dockerfile_contents(self): def load_dockerfile_contents(self):
gl_client = self._get_authorized_client() gl_client = self._get_authorized_client()
path = self.get_dockerfile_path() path = self.get_dockerfile_path()
gl_project = gl_client.projects.get(self.config['build_source']) repository = gl_client.getproject(self.config['build_source'])
if not gl_project: if repository is False:
return None return None
branches = self.list_field_values('branch_name') branches = self.list_field_values('branch_name')
@ -439,15 +285,16 @@ class GitLabBuildTrigger(BuildTriggerHandler):
return None return None
branch_name = branches[0] branch_name = branches[0]
if gl_project.attributes['default_branch'] in branches: if repository['default_branch'] in branches:
branch_name = gl_project.attributes['default_branch'] branch_name = repository['default_branch']
try: contents = gl_client.getrawfile(repository['id'], branch_name, path)
return gl_project.files.get(path, branch_name).decode() if contents is False:
except gitlab.GitlabGetError:
return None return None
@_catch_timeouts_and_errors return contents
@_catch_timeouts
def list_field_values(self, field_name, limit=None): def list_field_values(self, field_name, limit=None):
if field_name == 'refs': if field_name == 'refs':
branches = self.list_field_values('branch_name') branches = self.list_field_values('branch_name')
@ -457,163 +304,141 @@ class GitLabBuildTrigger(BuildTriggerHandler):
[{'kind': 'tag', 'name': t} for t in tags]) [{'kind': 'tag', 'name': t} for t in tags])
gl_client = self._get_authorized_client() gl_client = self._get_authorized_client()
gl_project = gl_client.projects.get(self.config['build_source']) repo = gl_client.getproject(self.config['build_source'])
if not gl_project: if repo is False:
return [] return []
if field_name == 'tag_name': if field_name == 'tag_name':
tags = gl_project.tags.list() tags = gl_client.getrepositorytags(repo['id'])
if not tags: if tags is False:
return [] return []
if limit: if limit:
tags = tags[0:limit] tags = tags[0:limit]
return [tag.attributes['name'] for tag in tags] return [tag['name'] for tag in tags]
if field_name == 'branch_name': if field_name == 'branch_name':
branches = gl_project.branches.list() branches = gl_client.getbranches(repo['id'])
if not branches: if branches is False:
return [] return []
if limit: if limit:
branches = branches[0:limit] branches = branches[0:limit]
return [branch.attributes['name'] for branch in branches] return [branch['name'] for branch in branches]
return None return None
def get_repository_url(self): def get_repository_url(self):
return gitlab_trigger.get_public_url(self.config['build_source']) return gitlab_trigger.get_public_url(self.config['build_source'])
@_catch_timeouts_and_errors @_catch_timeouts
def lookup_commit(self, repo_id, commit_sha):
if repo_id is None:
return None
gl_client = self._get_authorized_client()
gl_project = gl_client.projects.get(self.config['build_source'], lazy=True)
commit = gl_project.commits.get(commit_sha)
if not commit:
return None
return commit
@_catch_timeouts_and_errors
def lookup_user(self, email): def lookup_user(self, email):
gl_client = self._get_authorized_client() gl_client = self._get_authorized_client()
try: try:
result = gl_client.users.list(search=email) [user] = gl_client.getusers(search=email)
if not result:
return None
[user] = result
return { return {
'username': user.attributes['username'], 'username': user['username'],
'html_url': user.attributes['web_url'], 'html_url': gl_client.host + '/' + user['username'],
'avatar_url': user.attributes['avatar_url'] 'avatar_url': user['avatar_url']
} }
except ValueError: except ValueError:
return None return None
@_catch_timeouts_and_errors @_catch_timeouts
def get_metadata_for_commit(self, commit_sha, ref, repo): def get_metadata_for_commit(self, commit_sha, ref, repo):
commit = self.lookup_commit(repo.get_id(), commit_sha) gl_client = self._get_authorized_client()
if commit is None: commit = gl_client.getrepositorycommit(repo['id'], commit_sha)
return None
metadata = { metadata = {
'commit': commit.attributes['id'], 'commit': commit['id'],
'ref': ref, 'ref': ref,
'default_branch': repo.attributes['default_branch'], 'default_branch': repo['default_branch'],
'git_url': repo.attributes['ssh_url_to_repo'], 'git_url': repo['ssh_url_to_repo'],
'commit_info': { 'commit_info': {
'url': os.path.join(repo.attributes['web_url'], 'commit', commit.attributes['id']), 'url': gl_client.host + '/' + repo['path_with_namespace'] + '/commit/' + commit['id'],
'message': commit.attributes['message'], 'message': commit['message'],
'date': commit.attributes['committed_date'], 'date': commit['committed_date'],
}, },
} }
committer = None committer = None
if 'committer_email' in commit.attributes: if 'committer_email' in commit:
committer = self.lookup_user(commit.attributes['committer_email']) committer = self.lookup_user(commit['committer_email'])
author = None author = None
if 'author_email' in commit.attributes: if 'author_email' in commit:
author = self.lookup_user(commit.attributes['author_email']) author = self.lookup_user(commit['author_email'])
if committer is not None: if committer is not None:
metadata['commit_info']['committer'] = { metadata['commit_info']['committer'] = {
'username': committer['username'], 'username': committer['username'],
'avatar_url': committer['avatar_url'], 'avatar_url': committer['avatar_url'],
'url': committer.get('http_url', ''), 'url': gl_client.host + '/' + committer['username'],
} }
if author is not None: if author is not None:
metadata['commit_info']['author'] = { metadata['commit_info']['author'] = {
'username': author['username'], 'username': author['username'],
'avatar_url': author['avatar_url'], 'avatar_url': author['avatar_url'],
'url': author.get('http_url', ''), 'url': gl_client.host + '/' + author['username']
} }
return metadata return metadata
@_catch_timeouts_and_errors @_catch_timeouts
def manual_start(self, run_parameters=None): def manual_start(self, run_parameters=None):
gl_client = self._get_authorized_client() gl_client = self._get_authorized_client()
gl_project = gl_client.projects.get(self.config['build_source'])
if not gl_project: repo = gl_client.getproject(self.config['build_source'])
if repo is False:
raise TriggerStartException('Could not find repository') raise TriggerStartException('Could not find repository')
def get_tag_sha(tag_name): def get_tag_sha(tag_name):
try: tags = gl_client.getrepositorytags(repo['id'])
tag = gl_project.tags.get(tag_name) if tags is False:
except gitlab.GitlabGetError: raise TriggerStartException('Could not find tags')
raise TriggerStartException('Could not find tag in repository')
return tag.attributes['commit']['id'] for tag in tags:
if tag['name'] == tag_name:
return tag['commit']['id']
raise TriggerStartException('Could not find commit')
def get_branch_sha(branch_name): def get_branch_sha(branch_name):
try: branch = gl_client.getbranch(repo['id'], branch_name)
branch = gl_project.branches.get(branch_name) if branch is False:
except gitlab.GitlabGetError: raise TriggerStartException('Could not find branch')
raise TriggerStartException('Could not find branch in repository')
return branch.attributes['commit']['id'] return branch['commit']['id']
# Find the branch or tag to build. # Find the branch or tag to build.
(commit_sha, ref) = determine_build_ref(run_parameters, get_branch_sha, get_tag_sha, (commit_sha, ref) = determine_build_ref(run_parameters, get_branch_sha, get_tag_sha,
gl_project.attributes['default_branch']) repo['default_branch'])
metadata = self.get_metadata_for_commit(commit_sha, ref, gl_project) metadata = self.get_metadata_for_commit(commit_sha, ref, repo)
return self.prepare_build(metadata, is_manual=True) return self.prepare_build(metadata, is_manual=True)
@_catch_timeouts_and_errors @_catch_timeouts
def handle_trigger_request(self, request): def handle_trigger_request(self, request):
payload = request.get_json() payload = request.get_json()
if not payload: if not payload:
raise InvalidPayloadException() raise SkipRequestException()
logger.debug('GitLab trigger payload %s', payload) logger.debug('GitLab trigger payload %s', payload)
# Lookup the default branch. # Lookup the default branch.
gl_client = self._get_authorized_client() gl_client = self._get_authorized_client()
gl_project = gl_client.projects.get(self.config['build_source']) repo = gl_client.getproject(self.config['build_source'])
if not gl_project: if repo is False:
logger.debug('Skipping GitLab build; project %s not found', self.config['build_source']) logger.debug('Skipping GitLab build; project %s not found', self.config['build_source'])
raise InvalidPayloadException() raise SkipRequestException()
def lookup_commit(repo_id, commit_sha): default_branch = repo['default_branch']
commit = self.lookup_commit(repo_id, commit_sha)
if commit is None:
return None
return dict(commit.attributes)
default_branch = gl_project.attributes['default_branch']
metadata = get_transformed_webhook_payload(payload, default_branch=default_branch, metadata = get_transformed_webhook_payload(payload, default_branch=default_branch,
lookup_user=self.lookup_user, lookup_user=self.lookup_user)
lookup_commit=lookup_commit)
prepared = self.prepare_build(metadata) prepared = self.prepare_build(metadata)
# Check if we should skip this build. # Check if we should skip this build.

View file

@ -1,159 +0,0 @@
from datetime import datetime
from mock import Mock
from buildtrigger.bitbuckethandler import BitbucketBuildTrigger
from util.morecollections import AttrDict
def get_bitbucket_trigger(dockerfile_path=''):
trigger_obj = AttrDict(dict(auth_token='foobar', id='sometrigger'))
trigger = BitbucketBuildTrigger(trigger_obj, {
'build_source': 'foo/bar',
'dockerfile_path': dockerfile_path,
'nickname': 'knownuser',
'account_id': 'foo',
})
trigger._get_client = get_mock_bitbucket
return trigger
def get_repo_path_contents(path, revision):
data = {
'files': [{'path': 'Dockerfile'}],
}
return (True, data, None)
def get_raw_path_contents(path, revision):
if path == 'Dockerfile':
return (True, 'hello world', None)
if path == 'somesubdir/Dockerfile':
return (True, 'hi universe', None)
return (False, None, None)
def get_branches_and_tags():
data = {
'branches': [{'name': 'master'}, {'name': 'otherbranch'}],
'tags': [{'name': 'sometag'}, {'name': 'someothertag'}],
}
return (True, data, None)
def get_branches():
return (True, {'master': {}, 'otherbranch': {}}, None)
def get_tags():
return (True, {'sometag': {}, 'someothertag': {}}, None)
def get_branch(branch_name):
if branch_name != 'master':
return (False, None, None)
data = {
'target': {
'hash': 'aaaaaaa',
},
}
return (True, data, None)
def get_tag(tag_name):
if tag_name != 'sometag':
return (False, None, None)
data = {
'target': {
'hash': 'aaaaaaa',
},
}
return (True, data, None)
def get_changeset_mock(commit_sha):
if commit_sha != 'aaaaaaa':
return (False, None, 'Not found')
data = {
'node': 'aaaaaaa',
'message': 'some message',
'timestamp': 'now',
'raw_author': 'foo@bar.com',
}
return (True, data, None)
def get_changesets():
changesets_mock = Mock()
changesets_mock.get = Mock(side_effect=get_changeset_mock)
return changesets_mock
def get_deploykeys():
deploykeys_mock = Mock()
deploykeys_mock.create = Mock(return_value=(True, {'pk': 'someprivatekey'}, None))
deploykeys_mock.delete = Mock(return_value=(True, {}, None))
return deploykeys_mock
def get_webhooks():
webhooks_mock = Mock()
webhooks_mock.create = Mock(return_value=(True, {'uuid': 'someuuid'}, None))
webhooks_mock.delete = Mock(return_value=(True, {}, None))
return webhooks_mock
def get_repo_mock(name):
if name != 'bar':
return None
repo_mock = Mock()
repo_mock.get_main_branch = Mock(return_value=(True, {'name': 'master'}, None))
repo_mock.get_path_contents = Mock(side_effect=get_repo_path_contents)
repo_mock.get_raw_path_contents = Mock(side_effect=get_raw_path_contents)
repo_mock.get_branches_and_tags = Mock(side_effect=get_branches_and_tags)
repo_mock.get_branches = Mock(side_effect=get_branches)
repo_mock.get_tags = Mock(side_effect=get_tags)
repo_mock.get_branch = Mock(side_effect=get_branch)
repo_mock.get_tag = Mock(side_effect=get_tag)
repo_mock.changesets = Mock(side_effect=get_changesets)
repo_mock.deploykeys = Mock(side_effect=get_deploykeys)
repo_mock.webhooks = Mock(side_effect=get_webhooks)
return repo_mock
def get_repositories_mock():
repos_mock = Mock()
repos_mock.get = Mock(side_effect=get_repo_mock)
return repos_mock
def get_namespace_mock(namespace):
namespace_mock = Mock()
namespace_mock.repositories = Mock(side_effect=get_repositories_mock)
return namespace_mock
def get_repo(namespace, name):
return {
'owner': namespace,
'logo': 'avatarurl',
'slug': name,
'description': 'some %s repo' % (name),
'utc_last_updated': str(datetime.utcfromtimestamp(0)),
'read_only': namespace != 'knownuser',
'is_private': name == 'somerepo',
}
def get_visible_repos():
repos = [
get_repo('knownuser', 'somerepo'),
get_repo('someorg', 'somerepo'),
get_repo('someorg', 'anotherrepo'),
]
return (True, repos, None)
def get_authed_mock(token, secret):
authed_mock = Mock()
authed_mock.for_namespace = Mock(side_effect=get_namespace_mock)
authed_mock.get_visible_repositories = Mock(side_effect=get_visible_repos)
return authed_mock
def get_mock_bitbucket():
bitbucket_mock = Mock()
bitbucket_mock.get_authorized_client = Mock(side_effect=get_authed_mock)
return bitbucket_mock

View file

@ -1,178 +0,0 @@
from datetime import datetime
from mock import Mock
from github import GithubException
from buildtrigger.githubhandler import GithubBuildTrigger
from util.morecollections import AttrDict
def get_github_trigger(dockerfile_path=''):
trigger_obj = AttrDict(dict(auth_token='foobar', id='sometrigger'))
trigger = GithubBuildTrigger(trigger_obj, {'build_source': 'foo', 'dockerfile_path': dockerfile_path})
trigger._get_client = get_mock_github
return trigger
def get_mock_github():
def get_commit_mock(commit_sha):
if commit_sha == 'aaaaaaa':
commit_mock = Mock()
commit_mock.sha = commit_sha
commit_mock.html_url = 'http://url/to/commit'
commit_mock.last_modified = 'now'
commit_mock.commit = Mock()
commit_mock.commit.message = 'some cool message'
commit_mock.committer = Mock()
commit_mock.committer.login = 'someuser'
commit_mock.committer.avatar_url = 'avatarurl'
commit_mock.committer.html_url = 'htmlurl'
commit_mock.author = Mock()
commit_mock.author.login = 'someuser'
commit_mock.author.avatar_url = 'avatarurl'
commit_mock.author.html_url = 'htmlurl'
return commit_mock
raise GithubException(None, None)
def get_branch_mock(branch_name):
if branch_name == 'master':
branch_mock = Mock()
branch_mock.commit = Mock()
branch_mock.commit.sha = 'aaaaaaa'
return branch_mock
raise GithubException(None, None)
def get_repo_mock(namespace, name):
repo_mock = Mock()
repo_mock.owner = Mock()
repo_mock.owner.login = namespace
repo_mock.full_name = '%s/%s' % (namespace, name)
repo_mock.name = name
repo_mock.description = 'some %s repo' % (name)
if name != 'anotherrepo':
repo_mock.pushed_at = datetime.utcfromtimestamp(0)
else:
repo_mock.pushed_at = None
repo_mock.html_url = 'https://bitbucket.org/%s/%s' % (namespace, name)
repo_mock.private = name == 'somerepo'
repo_mock.permissions = Mock()
repo_mock.permissions.admin = namespace == 'knownuser'
return repo_mock
def get_user_repos_mock(type='all', sort='created'):
return [get_repo_mock('knownuser', 'somerepo')]
def get_org_repos_mock(type='all'):
return [get_repo_mock('someorg', 'somerepo'), get_repo_mock('someorg', 'anotherrepo')]
def get_orgs_mock():
return [get_org_mock('someorg')]
def get_user_mock(username='knownuser'):
if username == 'knownuser':
user_mock = Mock()
user_mock.name = username
user_mock.plan = Mock()
user_mock.plan.private_repos = 1
user_mock.login = username
user_mock.html_url = 'https://bitbucket.org/%s' % (username)
user_mock.avatar_url = 'avatarurl'
user_mock.get_repos = Mock(side_effect=get_user_repos_mock)
user_mock.get_orgs = Mock(side_effect=get_orgs_mock)
return user_mock
raise GithubException(None, None)
def get_org_mock(namespace):
if namespace == 'someorg':
org_mock = Mock()
org_mock.get_repos = Mock(side_effect=get_org_repos_mock)
org_mock.login = namespace
org_mock.html_url = 'https://bitbucket.org/%s' % (namespace)
org_mock.avatar_url = 'avatarurl'
org_mock.name = namespace
org_mock.plan = Mock()
org_mock.plan.private_repos = 2
return org_mock
raise GithubException(None, None)
def get_tags_mock():
sometag = Mock()
sometag.name = 'sometag'
sometag.commit = get_commit_mock('aaaaaaa')
someothertag = Mock()
someothertag.name = 'someothertag'
someothertag.commit = get_commit_mock('aaaaaaa')
return [sometag, someothertag]
def get_branches_mock():
master = Mock()
master.name = 'master'
master.commit = get_commit_mock('aaaaaaa')
otherbranch = Mock()
otherbranch.name = 'otherbranch'
otherbranch.commit = get_commit_mock('aaaaaaa')
return [master, otherbranch]
def get_contents_mock(filepath):
if filepath == 'Dockerfile':
m = Mock()
m.content = 'hello world'
return m
if filepath == 'somesubdir/Dockerfile':
m = Mock()
m.content = 'hi universe'
return m
raise GithubException(None, None)
def get_git_tree_mock(commit_sha, recursive=False):
first_file = Mock()
first_file.type = 'blob'
first_file.path = 'Dockerfile'
second_file = Mock()
second_file.type = 'other'
second_file.path = '/some/Dockerfile'
third_file = Mock()
third_file.type = 'blob'
third_file.path = 'somesubdir/Dockerfile'
t = Mock()
if commit_sha == 'aaaaaaa':
t.tree = [
first_file, second_file, third_file,
]
else:
t.tree = []
return t
repo_mock = Mock()
repo_mock.default_branch = 'master'
repo_mock.ssh_url = 'ssh_url'
repo_mock.get_branch = Mock(side_effect=get_branch_mock)
repo_mock.get_tags = Mock(side_effect=get_tags_mock)
repo_mock.get_branches = Mock(side_effect=get_branches_mock)
repo_mock.get_commit = Mock(side_effect=get_commit_mock)
repo_mock.get_contents = Mock(side_effect=get_contents_mock)
repo_mock.get_git_tree = Mock(side_effect=get_git_tree_mock)
gh_mock = Mock()
gh_mock.get_repo = Mock(return_value=repo_mock)
gh_mock.get_user = Mock(side_effect=get_user_mock)
gh_mock.get_organization = Mock(side_effect=get_org_mock)
return gh_mock

View file

@ -1,598 +0,0 @@
import base64
import json
from contextlib import contextmanager
import gitlab
from httmock import urlmatch, HTTMock
from buildtrigger.gitlabhandler import GitLabBuildTrigger
from util.morecollections import AttrDict
@urlmatch(netloc=r'fakegitlab')
def catchall_handler(url, request):
return {'status_code': 404}
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/users$')
def users_handler(url, request):
if not request.headers.get('Authorization') == 'Bearer foobar':
return {'status_code': 401}
if url.query.find('knownuser') < 0:
return {
'status_code': 200,
'headers': {
'Content-Type': 'application/json',
},
'content': json.dumps([]),
}
return {
'status_code': 200,
'headers': {
'Content-Type': 'application/json',
},
'content': json.dumps([
{
"id": 1,
"username": "knownuser",
"name": "Known User",
"state": "active",
"avatar_url": "avatarurl",
"web_url": "https://bitbucket.org/knownuser",
},
]),
}
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/user$')
def user_handler(_, request):
if not request.headers.get('Authorization') == 'Bearer foobar':
return {'status_code': 401}
return {
'status_code': 200,
'headers': {
'Content-Type': 'application/json',
},
'content': json.dumps({
"id": 1,
"username": "john_smith",
"email": "john@example.com",
"name": "John Smith",
"state": "active",
}),
}
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/foo%2Fbar$')
def project_handler(_, request):
if not request.headers.get('Authorization') == 'Bearer foobar':
return {'status_code': 401}
return {
'status_code': 200,
'headers': {
'Content-Type': 'application/json',
},
'content': json.dumps({
"id": 4,
"description": None,
"default_branch": "master",
"visibility": "private",
"path_with_namespace": "someorg/somerepo",
"ssh_url_to_repo": "git@example.com:someorg/somerepo.git",
"web_url": "http://example.com/someorg/somerepo",
}),
}
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/repository/tree$')
def project_tree_handler(_, request):
if not request.headers.get('Authorization') == 'Bearer foobar':
return {'status_code': 401}
return {
'status_code': 200,
'headers': {
'Content-Type': 'application/json',
},
'content': json.dumps([
{
"id": "a1e8f8d745cc87e3a9248358d9352bb7f9a0aeba",
"name": "Dockerfile",
"type": "tree",
"path": "files/Dockerfile",
"mode": "040000",
},
]),
}
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/repository/tags$')
def project_tags_handler(_, request):
if not request.headers.get('Authorization') == 'Bearer foobar':
return {'status_code': 401}
return {
'status_code': 200,
'headers': {
'Content-Type': 'application/json',
},
'content': json.dumps([
{
'name': 'sometag',
'commit': {
'id': '60a8ff033665e1207714d6670fcd7b65304ec02f',
},
},
{
'name': 'someothertag',
'commit': {
'id': '60a8ff033665e1207714d6670fcd7b65304ec02f',
},
},
]),
}
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/repository/branches$')
def project_branches_handler(_, request):
if not request.headers.get('Authorization') == 'Bearer foobar':
return {'status_code': 401}
return {
'status_code': 200,
'headers': {
'Content-Type': 'application/json',
},
'content': json.dumps([
{
'name': 'master',
'commit': {
'id': '60a8ff033665e1207714d6670fcd7b65304ec02f',
},
},
{
'name': 'otherbranch',
'commit': {
'id': '60a8ff033665e1207714d6670fcd7b65304ec02f',
},
},
]),
}
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/repository/branches/master$')
def project_branch_handler(_, request):
if not request.headers.get('Authorization') == 'Bearer foobar':
return {'status_code': 401}
return {
'status_code': 200,
'headers': {
'Content-Type': 'application/json',
},
'content': json.dumps({
"name": "master",
"merged": True,
"protected": True,
"developers_can_push": False,
"developers_can_merge": False,
"commit": {
"author_email": "john@example.com",
"author_name": "John Smith",
"authored_date": "2012-06-27T05:51:39-07:00",
"committed_date": "2012-06-28T03:44:20-07:00",
"committer_email": "john@example.com",
"committer_name": "John Smith",
"id": "60a8ff033665e1207714d6670fcd7b65304ec02f",
"short_id": "7b5c3cc",
"title": "add projects API",
"message": "add projects API",
"parent_ids": [
"4ad91d3c1144c406e50c7b33bae684bd6837faf8",
],
},
}),
}
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/namespaces/someorg$')
def namespace_handler(_, request):
if not request.headers.get('Authorization') == 'Bearer foobar':
return {'status_code': 401}
return {
'status_code': 200,
'headers': {
'Content-Type': 'application/json',
},
'content': json.dumps({
"id": 2,
"name": "someorg",
"path": "someorg",
"kind": "group",
"full_path": "someorg",
"parent_id": None,
"members_count_with_descendants": 2
}),
}
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/namespaces/knownuser$')
def user_namespace_handler(_, request):
if not request.headers.get('Authorization') == 'Bearer foobar':
return {'status_code': 401}
return {
'status_code': 200,
'headers': {
'Content-Type': 'application/json',
},
'content': json.dumps({
"id": 1,
"name": "knownuser",
"path": "knownuser",
"kind": "user",
"full_path": "knownuser",
"parent_id": None,
"members_count_with_descendants": 2
}),
}
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/namespaces(/)?$')
def namespaces_handler(_, request):
if not request.headers.get('Authorization') == 'Bearer foobar':
return {'status_code': 401}
return {
'status_code': 200,
'headers': {
'Content-Type': 'application/json',
},
'content': json.dumps([{
"id": 2,
"name": "someorg",
"path": "someorg",
"kind": "group",
"full_path": "someorg",
"parent_id": None,
"web_url": "http://gitlab.com/groups/someorg",
"members_count_with_descendants": 2
}]),
}
def get_projects_handler(add_permissions_block):
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/groups/2/projects$')
def projects_handler(_, request):
if not request.headers.get('Authorization') == 'Bearer foobar':
return {'status_code': 401}
permissions_block = {
"project_access": {
"access_level": 10,
"notification_level": 3
},
"group_access": {
"access_level": 20,
"notification_level": 3
},
}
return {
'status_code': 200,
'headers': {
'Content-Type': 'application/json',
},
'content': json.dumps([{
"id": 4,
"name": "Some project",
"description": None,
"default_branch": "master",
"visibility": "private",
"path": "someproject",
"path_with_namespace": "someorg/someproject",
"last_activity_at": "2013-09-30T13:46:02Z",
"web_url": "http://example.com/someorg/someproject",
"permissions": permissions_block if add_permissions_block else None,
},
{
"id": 5,
"name": "Another project",
"description": None,
"default_branch": "master",
"visibility": "public",
"path": "anotherproject",
"path_with_namespace": "someorg/anotherproject",
"last_activity_at": "2013-09-30T13:46:02Z",
"web_url": "http://example.com/someorg/anotherproject",
}]),
}
return projects_handler
def get_group_handler(null_avatar):
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/groups/2$')
def group_handler(_, request):
if not request.headers.get('Authorization') == 'Bearer foobar':
return {'status_code': 401}
return {
'status_code': 200,
'headers': {
'Content-Type': 'application/json',
},
'content': json.dumps({
"id": 1,
"name": "SomeOrg Group",
"path": "someorg",
"description": "An interesting group",
"visibility": "public",
"lfs_enabled": True,
"avatar_url": 'avatar_url' if not null_avatar else None,
"web_url": "http://gitlab.com/groups/someorg",
"request_access_enabled": False,
"full_name": "SomeOrg Group",
"full_path": "someorg",
"parent_id": None,
}),
}
return group_handler
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/repository/files/Dockerfile$')
def dockerfile_handler(_, request):
if not request.headers.get('Authorization') == 'Bearer foobar':
return {'status_code': 401}
return {
'status_code': 200,
'headers': {
'Content-Type': 'application/json',
},
'content': json.dumps({
"file_name": "Dockerfile",
"file_path": "Dockerfile",
"size": 10,
"encoding": "base64",
"content": base64.b64encode('hello world'),
"ref": "master",
"blob_id": "79f7bbd25901e8334750839545a9bd021f0e4c83",
"commit_id": "d5a3ff139356ce33e37e73add446f16869741b50",
"last_commit_id": "570e7b2abdd848b95f2f578043fc23bd6f6fd24d"
}),
}
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/repository/files/somesubdir%2FDockerfile$')
def sub_dockerfile_handler(_, request):
if not request.headers.get('Authorization') == 'Bearer foobar':
return {'status_code': 401}
return {
'status_code': 200,
'headers': {
'Content-Type': 'application/json',
},
'content': json.dumps({
"file_name": "Dockerfile",
"file_path": "somesubdir/Dockerfile",
"size": 10,
"encoding": "base64",
"content": base64.b64encode('hi universe'),
"ref": "master",
"blob_id": "79f7bbd25901e8334750839545a9bd021f0e4c83",
"commit_id": "d5a3ff139356ce33e37e73add446f16869741b50",
"last_commit_id": "570e7b2abdd848b95f2f578043fc23bd6f6fd24d"
}),
}
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/repository/tags/sometag$')
def tag_handler(_, request):
if not request.headers.get('Authorization') == 'Bearer foobar':
return {'status_code': 401}
return {
'status_code': 200,
'headers': {
'Content-Type': 'application/json',
},
'content': json.dumps({
"name": "sometag",
"message": "some cool message",
"target": "60a8ff033665e1207714d6670fcd7b65304ec02f",
"commit": {
"id": "60a8ff033665e1207714d6670fcd7b65304ec02f",
"short_id": "60a8ff03",
"title": "Initial commit",
"created_at": "2017-07-26T11:08:53.000+02:00",
"parent_ids": [
"f61c062ff8bcbdb00e0a1b3317a91aed6ceee06b"
],
"message": "v5.0.0\n",
"author_name": "Arthur Verschaeve",
"author_email": "contact@arthurverschaeve.be",
"authored_date": "2015-02-01T21:56:31.000+01:00",
"committer_name": "Arthur Verschaeve",
"committer_email": "contact@arthurverschaeve.be",
"committed_date": "2015-02-01T21:56:31.000+01:00"
},
"release": None,
}),
}
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/foo%2Fbar/repository/commits/60a8ff033665e1207714d6670fcd7b65304ec02f$')
def commit_handler(_, request):
if not request.headers.get('Authorization') == 'Bearer foobar':
return {'status_code': 401}
return {
'status_code': 200,
'headers': {
'Content-Type': 'application/json',
},
'content': json.dumps({
"id": "60a8ff033665e1207714d6670fcd7b65304ec02f",
"short_id": "60a8ff03366",
"title": "Sanitize for network graph",
"author_name": "someguy",
"author_email": "some.guy@gmail.com",
"committer_name": "Some Guy",
"committer_email": "some.guy@gmail.com",
"created_at": "2012-09-20T09:06:12+03:00",
"message": "Sanitize for network graph",
"committed_date": "2012-09-20T09:06:12+03:00",
"authored_date": "2012-09-20T09:06:12+03:00",
"parent_ids": [
"ae1d9fb46aa2b07ee9836d49862ec4e2c46fbbba"
],
"last_pipeline" : {
"id": 8,
"ref": "master",
"sha": "2dc6aa325a317eda67812f05600bdf0fcdc70ab0",
"status": "created",
},
"stats": {
"additions": 15,
"deletions": 10,
"total": 25
},
"status": "running"
}),
}
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/deploy_keys$', method='POST')
def create_deploykey_handler(_, request):
if not request.headers.get('Authorization') == 'Bearer foobar':
return {'status_code': 401}
return {
'status_code': 200,
'headers': {
'Content-Type': 'application/json',
},
'content': json.dumps({
"id": 1,
"title": "Public key",
"key": "ssh-rsa some stuff",
"created_at": "2013-10-02T10:12:29Z",
"can_push": False,
}),
}
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/hooks$', method='POST')
def create_hook_handler(_, request):
if not request.headers.get('Authorization') == 'Bearer foobar':
return {'status_code': 401}
return {
'status_code': 200,
'headers': {
'Content-Type': 'application/json',
},
'content': json.dumps({
"id": 1,
"url": "http://example.com/hook",
"project_id": 4,
"push_events": True,
"issues_events": True,
"confidential_issues_events": True,
"merge_requests_events": True,
"tag_push_events": True,
"note_events": True,
"job_events": True,
"pipeline_events": True,
"wiki_page_events": True,
"enable_ssl_verification": True,
"created_at": "2012-10-12T17:04:47Z",
}),
}
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/hooks/1$', method='DELETE')
def delete_hook_handler(_, request):
if not request.headers.get('Authorization') == 'Bearer foobar':
return {'status_code': 401}
return {
'status_code': 200,
'headers': {
'Content-Type': 'application/json',
},
'content': json.dumps({}),
}
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/deploy_keys/1$', method='DELETE')
def delete_deploykey_handker(_, request):
if not request.headers.get('Authorization') == 'Bearer foobar':
return {'status_code': 401}
return {
'status_code': 200,
'headers': {
'Content-Type': 'application/json',
},
'content': json.dumps({}),
}
@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/users/1/projects$')
def user_projects_list_handler(_, request):
if not request.headers.get('Authorization') == 'Bearer foobar':
return {'status_code': 401}
return {
'status_code': 200,
'headers': {
'Content-Type': 'application/json',
},
'content': json.dumps([
{
"id": 2,
"name": "Another project",
"description": None,
"default_branch": "master",
"visibility": "public",
"path": "anotherproject",
"path_with_namespace": "knownuser/anotherproject",
"last_activity_at": "2013-09-30T13:46:02Z",
"web_url": "http://example.com/knownuser/anotherproject",
}
]),
}
@contextmanager
def get_gitlab_trigger(dockerfile_path='', add_permissions=True, missing_avatar_url=False):
handlers = [user_handler, users_handler, project_branches_handler, project_tree_handler,
project_handler, get_projects_handler(add_permissions), tag_handler,
project_branch_handler, get_group_handler(missing_avatar_url), dockerfile_handler,
sub_dockerfile_handler, namespace_handler, user_namespace_handler, namespaces_handler,
commit_handler, create_deploykey_handler, delete_deploykey_handker,
create_hook_handler, delete_hook_handler, project_tags_handler,
user_projects_list_handler, catchall_handler]
with HTTMock(*handlers):
trigger_obj = AttrDict(dict(auth_token='foobar', id='sometrigger'))
trigger = GitLabBuildTrigger(trigger_obj, {
'build_source': 'foo/bar',
'dockerfile_path': dockerfile_path,
'username': 'knownuser'
})
client = gitlab.Gitlab('http://fakegitlab', oauth_token='foobar', timeout=20, api_version=4)
client.auth()
trigger._get_authorized_client = lambda: client
yield trigger

View file

@ -1,55 +0,0 @@
import pytest
from buildtrigger.basehandler import BuildTriggerHandler
@pytest.mark.parametrize('input,output', [
("Dockerfile", True),
("server.Dockerfile", True),
(u"Dockerfile", True),
(u"server.Dockerfile", True),
("bad file name", False),
(u"bad file name", False),
])
def test_path_is_dockerfile(input, output):
assert BuildTriggerHandler.filename_is_dockerfile(input) == output
@pytest.mark.parametrize('input,output', [
("", {}),
("/a", {"/a": ["/"]}),
("a", {"/a": ["/"]}),
("/b/a", {"/b/a": ["/b", "/"]}),
("b/a", {"/b/a": ["/b", "/"]}),
("/c/b/a", {"/c/b/a": ["/c/b", "/c", "/"]}),
("/a//b//c", {"/a/b/c": ["/", "/a", "/a/b"]}),
("/a", {"/a": ["/"]}),
])
def test_subdir_path_map_no_previous(input, output):
actual_mapping = BuildTriggerHandler.get_parent_directory_mappings(input)
for key in actual_mapping:
value = actual_mapping[key]
actual_mapping[key] = value.sort()
for key in output:
value = output[key]
output[key] = value.sort()
assert actual_mapping == output
@pytest.mark.parametrize('new_path,original_dictionary,output', [
("/a", {}, {"/a": ["/"]}),
("b", {"/a": ["some_path", "another_path"]}, {"/a": ["some_path", "another_path"], "/b": ["/"]}),
("/a/b/c/d", {"/e": ["some_path", "another_path"]},
{"/e": ["some_path", "another_path"], "/a/b/c/d": ["/", "/a", "/a/b", "/a/b/c"]}),
])
def test_subdir_path_map(new_path, original_dictionary, output):
actual_mapping = BuildTriggerHandler.get_parent_directory_mappings(new_path, original_dictionary)
for key in actual_mapping:
value = actual_mapping[key]
actual_mapping[key] = value.sort()
for key in output:
value = output[key]
output[key] = value.sort()
assert actual_mapping == output

View file

@ -1,91 +0,0 @@
import json
import pytest
from buildtrigger.test.bitbucketmock import get_bitbucket_trigger
from buildtrigger.triggerutil import (SkipRequestException, ValidationRequestException,
InvalidPayloadException)
from endpoints.building import PreparedBuild
from util.morecollections import AttrDict
@pytest.fixture
def bitbucket_trigger():
return get_bitbucket_trigger()
def test_list_build_subdirs(bitbucket_trigger):
assert bitbucket_trigger.list_build_subdirs() == ["/Dockerfile"]
@pytest.mark.parametrize('dockerfile_path, contents', [
('/Dockerfile', 'hello world'),
('somesubdir/Dockerfile', 'hi universe'),
('unknownpath', None),
])
def test_load_dockerfile_contents(dockerfile_path, contents):
trigger = get_bitbucket_trigger(dockerfile_path)
assert trigger.load_dockerfile_contents() == contents
@pytest.mark.parametrize('payload, expected_error, expected_message', [
('{}', InvalidPayloadException, "'push' is a required property"),
# Valid payload:
('''{
"push": {
"changes": [{
"new": {
"name": "somechange",
"target": {
"hash": "aaaaaaa",
"message": "foo",
"date": "now",
"links": {
"html": {
"href": "somelink"
}
}
}
}
}]
},
"repository": {
"full_name": "foo/bar"
}
}''', None, None),
# Skip message:
('''{
"push": {
"changes": [{
"new": {
"name": "somechange",
"target": {
"hash": "aaaaaaa",
"message": "[skip build] foo",
"date": "now",
"links": {
"html": {
"href": "somelink"
}
}
}
}
}]
},
"repository": {
"full_name": "foo/bar"
}
}''', SkipRequestException, ''),
])
def test_handle_trigger_request(bitbucket_trigger, payload, expected_error, expected_message):
def get_payload():
return json.loads(payload)
request = AttrDict(dict(get_json=get_payload))
if expected_error is not None:
with pytest.raises(expected_error) as ipe:
bitbucket_trigger.handle_trigger_request(request)
assert str(ipe.value) == expected_message
else:
assert isinstance(bitbucket_trigger.handle_trigger_request(request), PreparedBuild)

View file

@ -1,51 +0,0 @@
import pytest
from buildtrigger.customhandler import CustomBuildTrigger
from buildtrigger.triggerutil import (InvalidPayloadException, SkipRequestException,
TriggerStartException)
from endpoints.building import PreparedBuild
from util.morecollections import AttrDict
@pytest.mark.parametrize('payload, expected_error, expected_message', [
('', InvalidPayloadException, 'Missing expected payload'),
('{}', InvalidPayloadException, "'commit' is a required property"),
('{"commit": "foo", "ref": "refs/heads/something", "default_branch": "baz"}',
InvalidPayloadException, "u'foo' does not match '^([A-Fa-f0-9]{7,})$'"),
('{"commit": "11d6fbc", "ref": "refs/heads/something", "default_branch": "baz"}', None, None),
('''{
"commit": "11d6fbc",
"ref": "refs/heads/something",
"default_branch": "baz",
"commit_info": {
"message": "[skip build]",
"url": "http://foo.bar",
"date": "NOW"
}
}''', SkipRequestException, ''),
])
def test_handle_trigger_request(payload, expected_error, expected_message):
trigger = CustomBuildTrigger(None, {'build_source': 'foo'})
request = AttrDict(dict(data=payload))
if expected_error is not None:
with pytest.raises(expected_error) as ipe:
trigger.handle_trigger_request(request)
assert str(ipe.value) == expected_message
else:
assert isinstance(trigger.handle_trigger_request(request), PreparedBuild)
@pytest.mark.parametrize('run_parameters, expected_error, expected_message', [
({}, TriggerStartException, 'missing required parameter'),
({'commit_sha': 'foo'}, TriggerStartException, "'foo' does not match '^([A-Fa-f0-9]{7,})$'"),
({'commit_sha': '11d6fbc'}, None, None),
])
def test_manual_start(run_parameters, expected_error, expected_message):
trigger = CustomBuildTrigger(None, {'build_source': 'foo'})
if expected_error is not None:
with pytest.raises(expected_error) as ipe:
trigger.manual_start(run_parameters)
assert str(ipe.value) == expected_message
else:
assert isinstance(trigger.manual_start(run_parameters), PreparedBuild)

View file

@ -1,121 +0,0 @@
import pytest
from buildtrigger.triggerutil import TriggerStartException
from buildtrigger.test.bitbucketmock import get_bitbucket_trigger
from buildtrigger.test.githubmock import get_github_trigger
from endpoints.building import PreparedBuild
# Note: This test suite executes a common set of tests against all the trigger types specified
# in this fixture. Each trigger's mock is expected to return the same data for all of these calls.
@pytest.fixture(params=[get_github_trigger(), get_bitbucket_trigger()])
def githost_trigger(request):
return request.param
@pytest.mark.parametrize('run_parameters, expected_error, expected_message', [
# No branch or tag specified: use the commit of the default branch.
({}, None, None),
# Invalid branch.
({'refs': {'kind': 'branch', 'name': 'invalid'}}, TriggerStartException,
'Could not find branch in repository'),
# Invalid tag.
({'refs': {'kind': 'tag', 'name': 'invalid'}}, TriggerStartException,
'Could not find tag in repository'),
# Valid branch.
({'refs': {'kind': 'branch', 'name': 'master'}}, None, None),
# Valid tag.
({'refs': {'kind': 'tag', 'name': 'sometag'}}, None, None),
])
def test_manual_start(run_parameters, expected_error, expected_message, githost_trigger):
if expected_error is not None:
with pytest.raises(expected_error) as ipe:
githost_trigger.manual_start(run_parameters)
assert str(ipe.value) == expected_message
else:
assert isinstance(githost_trigger.manual_start(run_parameters), PreparedBuild)
@pytest.mark.parametrize('name, expected', [
('refs', [
{'kind': 'branch', 'name': 'master'},
{'kind': 'branch', 'name': 'otherbranch'},
{'kind': 'tag', 'name': 'sometag'},
{'kind': 'tag', 'name': 'someothertag'},
]),
('tag_name', set(['sometag', 'someothertag'])),
('branch_name', set(['master', 'otherbranch'])),
('invalid', None)
])
def test_list_field_values(name, expected, githost_trigger):
if expected is None:
assert githost_trigger.list_field_values(name) is None
elif isinstance(expected, set):
assert set(githost_trigger.list_field_values(name)) == set(expected)
else:
assert githost_trigger.list_field_values(name) == expected
def test_list_build_source_namespaces():
namespaces_expected = [
{
'personal': True,
'score': 1,
'avatar_url': 'avatarurl',
'id': 'knownuser',
'title': 'knownuser',
'url': 'https://bitbucket.org/knownuser',
},
{
'score': 2,
'title': 'someorg',
'personal': False,
'url': 'https://bitbucket.org/someorg',
'avatar_url': 'avatarurl',
'id': 'someorg'
}
]
found = get_bitbucket_trigger().list_build_source_namespaces()
found.sort()
namespaces_expected.sort()
assert found == namespaces_expected
@pytest.mark.parametrize('namespace, expected', [
('', []),
('unknown', []),
('knownuser', [
{
'last_updated': 0, 'name': 'somerepo',
'url': 'https://bitbucket.org/knownuser/somerepo', 'private': True,
'full_name': 'knownuser/somerepo', 'has_admin_permissions': True,
'description': 'some somerepo repo'
}]),
('someorg', [
{
'last_updated': 0, 'name': 'somerepo',
'url': 'https://bitbucket.org/someorg/somerepo', 'private': True,
'full_name': 'someorg/somerepo', 'has_admin_permissions': False,
'description': 'some somerepo repo'
},
{
'last_updated': 0, 'name': 'anotherrepo',
'url': 'https://bitbucket.org/someorg/anotherrepo', 'private': False,
'full_name': 'someorg/anotherrepo', 'has_admin_permissions': False,
'description': 'some anotherrepo repo'
}]),
])
def test_list_build_sources_for_namespace(namespace, expected, githost_trigger):
assert githost_trigger.list_build_sources_for_namespace(namespace) == expected
def test_activate_and_deactivate(githost_trigger):
_, private_key = githost_trigger.activate('http://some/url')
assert 'private_key' in private_key
githost_trigger.deactivate()

View file

@ -1,117 +0,0 @@
import json
import pytest
from buildtrigger.test.githubmock import get_github_trigger
from buildtrigger.triggerutil import (SkipRequestException, ValidationRequestException,
InvalidPayloadException)
from endpoints.building import PreparedBuild
from util.morecollections import AttrDict
@pytest.fixture
def github_trigger():
return get_github_trigger()
@pytest.mark.parametrize('payload, expected_error, expected_message', [
('{"zen": true}', SkipRequestException, ""),
('{}', InvalidPayloadException, "Missing 'repository' on request"),
('{"repository": "foo"}', InvalidPayloadException, "Missing 'owner' on repository"),
# Valid payload:
('''{
"repository": {
"owner": {
"name": "someguy"
},
"name": "somerepo",
"ssh_url": "someurl"
},
"ref": "refs/tags/foo",
"head_commit": {
"id": "11d6fbc",
"url": "http://some/url",
"message": "some message",
"timestamp": "NOW"
}
}''', None, None),
# Skip message:
('''{
"repository": {
"owner": {
"name": "someguy"
},
"name": "somerepo",
"ssh_url": "someurl"
},
"ref": "refs/tags/foo",
"head_commit": {
"id": "11d6fbc",
"url": "http://some/url",
"message": "[skip build]",
"timestamp": "NOW"
}
}''', SkipRequestException, ''),
])
def test_handle_trigger_request(github_trigger, payload, expected_error, expected_message):
def get_payload():
return json.loads(payload)
request = AttrDict(dict(get_json=get_payload))
if expected_error is not None:
with pytest.raises(expected_error) as ipe:
github_trigger.handle_trigger_request(request)
assert str(ipe.value) == expected_message
else:
assert isinstance(github_trigger.handle_trigger_request(request), PreparedBuild)
@pytest.mark.parametrize('dockerfile_path, contents', [
('/Dockerfile', 'hello world'),
('somesubdir/Dockerfile', 'hi universe'),
('unknownpath', None),
])
def test_load_dockerfile_contents(dockerfile_path, contents):
trigger = get_github_trigger(dockerfile_path)
assert trigger.load_dockerfile_contents() == contents
@pytest.mark.parametrize('username, expected_response', [
('unknownuser', None),
('knownuser', {'html_url': 'https://bitbucket.org/knownuser', 'avatar_url': 'avatarurl'}),
])
def test_lookup_user(username, expected_response, github_trigger):
assert github_trigger.lookup_user(username) == expected_response
def test_list_build_subdirs(github_trigger):
assert github_trigger.list_build_subdirs() == ['Dockerfile', 'somesubdir/Dockerfile']
def test_list_build_source_namespaces(github_trigger):
namespaces_expected = [
{
'personal': True,
'score': 1,
'avatar_url': 'avatarurl',
'id': 'knownuser',
'title': 'knownuser',
'url': 'https://bitbucket.org/knownuser',
},
{
'score': 0,
'title': 'someorg',
'personal': False,
'url': '',
'avatar_url': 'avatarurl',
'id': 'someorg'
}
]
found = github_trigger.list_build_source_namespaces()
found.sort()
namespaces_expected.sort()
assert found == namespaces_expected

View file

@ -1,231 +0,0 @@
import json
import pytest
from mock import Mock
from buildtrigger.test.gitlabmock import get_gitlab_trigger
from buildtrigger.triggerutil import (SkipRequestException, ValidationRequestException,
InvalidPayloadException, TriggerStartException)
from endpoints.building import PreparedBuild
from util.morecollections import AttrDict
@pytest.fixture()
def gitlab_trigger():
with get_gitlab_trigger() as t:
yield t
def test_list_build_subdirs(gitlab_trigger):
assert gitlab_trigger.list_build_subdirs() == ['Dockerfile']
@pytest.mark.parametrize('dockerfile_path, contents', [
('/Dockerfile', 'hello world'),
('somesubdir/Dockerfile', 'hi universe'),
('unknownpath', None),
])
def test_load_dockerfile_contents(dockerfile_path, contents):
with get_gitlab_trigger(dockerfile_path=dockerfile_path) as trigger:
assert trigger.load_dockerfile_contents() == contents
@pytest.mark.parametrize('email, expected_response', [
('unknown@email.com', None),
('knownuser', {'username': 'knownuser', 'html_url': 'https://bitbucket.org/knownuser',
'avatar_url': 'avatarurl'}),
])
def test_lookup_user(email, expected_response, gitlab_trigger):
assert gitlab_trigger.lookup_user(email) == expected_response
def test_null_permissions():
with get_gitlab_trigger(add_permissions=False) as trigger:
sources = trigger.list_build_sources_for_namespace('someorg')
source = sources[0]
assert source['has_admin_permissions']
def test_list_build_sources():
with get_gitlab_trigger() as trigger:
sources = trigger.list_build_sources_for_namespace('someorg')
assert sources == [
{
'last_updated': 1380548762,
'name': u'someproject',
'url': u'http://example.com/someorg/someproject',
'private': True,
'full_name': u'someorg/someproject',
'has_admin_permissions': False,
'description': ''
},
{
'last_updated': 1380548762,
'name': u'anotherproject',
'url': u'http://example.com/someorg/anotherproject',
'private': False,
'full_name': u'someorg/anotherproject',
'has_admin_permissions': True,
'description': '',
}]
def test_null_avatar():
with get_gitlab_trigger(missing_avatar_url=True) as trigger:
namespace_data = trigger.list_build_source_namespaces()
expected = {
'avatar_url': None,
'personal': False,
'title': u'someorg',
'url': u'http://gitlab.com/groups/someorg',
'score': 1,
'id': '2',
}
assert namespace_data == [expected]
@pytest.mark.parametrize('payload, expected_error, expected_message', [
('{}', InvalidPayloadException, ''),
# Valid payload:
('''{
"object_kind": "push",
"ref": "refs/heads/master",
"checkout_sha": "aaaaaaa",
"repository": {
"git_ssh_url": "foobar"
},
"commits": [
{
"id": "aaaaaaa",
"url": "someurl",
"message": "hello there!",
"timestamp": "now"
}
]
}''', None, None),
# Skip message:
('''{
"object_kind": "push",
"ref": "refs/heads/master",
"checkout_sha": "aaaaaaa",
"repository": {
"git_ssh_url": "foobar"
},
"commits": [
{
"id": "aaaaaaa",
"url": "someurl",
"message": "[skip build] hello there!",
"timestamp": "now"
}
]
}''', SkipRequestException, ''),
])
def test_handle_trigger_request(gitlab_trigger, payload, expected_error, expected_message):
def get_payload():
return json.loads(payload)
request = AttrDict(dict(get_json=get_payload))
if expected_error is not None:
with pytest.raises(expected_error) as ipe:
gitlab_trigger.handle_trigger_request(request)
assert str(ipe.value) == expected_message
else:
assert isinstance(gitlab_trigger.handle_trigger_request(request), PreparedBuild)
@pytest.mark.parametrize('run_parameters, expected_error, expected_message', [
# No branch or tag specified: use the commit of the default branch.
({}, None, None),
# Invalid branch.
({'refs': {'kind': 'branch', 'name': 'invalid'}}, TriggerStartException,
'Could not find branch in repository'),
# Invalid tag.
({'refs': {'kind': 'tag', 'name': 'invalid'}}, TriggerStartException,
'Could not find tag in repository'),
# Valid branch.
({'refs': {'kind': 'branch', 'name': 'master'}}, None, None),
# Valid tag.
({'refs': {'kind': 'tag', 'name': 'sometag'}}, None, None),
])
def test_manual_start(run_parameters, expected_error, expected_message, gitlab_trigger):
if expected_error is not None:
with pytest.raises(expected_error) as ipe:
gitlab_trigger.manual_start(run_parameters)
assert str(ipe.value) == expected_message
else:
assert isinstance(gitlab_trigger.manual_start(run_parameters), PreparedBuild)
def test_activate_and_deactivate(gitlab_trigger):
_, private_key = gitlab_trigger.activate('http://some/url')
assert 'private_key' in private_key
gitlab_trigger.deactivate()
@pytest.mark.parametrize('name, expected', [
('refs', [
{'kind': 'branch', 'name': 'master'},
{'kind': 'branch', 'name': 'otherbranch'},
{'kind': 'tag', 'name': 'sometag'},
{'kind': 'tag', 'name': 'someothertag'},
]),
('tag_name', set(['sometag', 'someothertag'])),
('branch_name', set(['master', 'otherbranch'])),
('invalid', None)
])
def test_list_field_values(name, expected, gitlab_trigger):
if expected is None:
assert gitlab_trigger.list_field_values(name) is None
elif isinstance(expected, set):
assert set(gitlab_trigger.list_field_values(name)) == set(expected)
else:
assert gitlab_trigger.list_field_values(name) == expected
@pytest.mark.parametrize('namespace, expected', [
('', []),
('unknown', []),
('knownuser', [
{
'last_updated': 1380548762,
'name': u'anotherproject',
'url': u'http://example.com/knownuser/anotherproject',
'private': False,
'full_name': u'knownuser/anotherproject',
'has_admin_permissions': True,
'description': ''
},
]),
('someorg', [
{
'last_updated': 1380548762,
'name': u'someproject',
'url': u'http://example.com/someorg/someproject',
'private': True,
'full_name': u'someorg/someproject',
'has_admin_permissions': False,
'description': ''
},
{
'last_updated': 1380548762,
'name': u'anotherproject',
'url': u'http://example.com/someorg/anotherproject',
'private': False,
'full_name': u'someorg/anotherproject',
'has_admin_permissions': True,
'description': '',
}]),
])
def test_list_build_sources_for_namespace(namespace, expected, gitlab_trigger):
assert gitlab_trigger.list_build_sources_for_namespace(namespace) == expected

View file

@ -1,572 +0,0 @@
import json
import pytest
from jsonschema import validate
from buildtrigger.customhandler import custom_trigger_payload
from buildtrigger.basehandler import METADATA_SCHEMA
from buildtrigger.bitbuckethandler import get_transformed_webhook_payload as bb_webhook
from buildtrigger.bitbuckethandler import get_transformed_commit_info as bb_commit
from buildtrigger.githubhandler import get_transformed_webhook_payload as gh_webhook
from buildtrigger.gitlabhandler import get_transformed_webhook_payload as gl_webhook
from buildtrigger.triggerutil import SkipRequestException
def assertSkipped(filename, processor, *args, **kwargs):
with open('buildtrigger/test/triggerjson/%s.json' % filename) as f:
payload = json.loads(f.read())
nargs = [payload]
nargs.extend(args)
with pytest.raises(SkipRequestException):
processor(*nargs, **kwargs)
def assertSchema(filename, expected, processor, *args, **kwargs):
with open('buildtrigger/test/triggerjson/%s.json' % filename) as f:
payload = json.loads(f.read())
nargs = [payload]
nargs.extend(args)
created = processor(*nargs, **kwargs)
assert created == expected
validate(created, METADATA_SCHEMA)
def test_custom_custom():
expected = {
u'commit':u'1c002dd',
u'commit_info': {
u'url': u'gitsoftware.com/repository/commits/1234567',
u'date': u'timestamp',
u'message': u'initial commit',
u'committer': {
u'username': u'user',
u'url': u'gitsoftware.com/users/user',
u'avatar_url': u'gravatar.com/user.png'
},
u'author': {
u'username': u'user',
u'url': u'gitsoftware.com/users/user',
u'avatar_url': u'gravatar.com/user.png'
}
},
u'ref': u'refs/heads/master',
u'default_branch': u'master',
u'git_url': u'foobar',
}
assertSchema('custom_webhook', expected, custom_trigger_payload, git_url='foobar')
def test_custom_gitlab():
expected = {
'commit': u'fb88379ee45de28a0a4590fddcbd8eff8b36026e',
'ref': u'refs/heads/master',
'git_url': u'git@gitlab.com:jsmith/somerepo.git',
'commit_info': {
'url': u'https://gitlab.com/jsmith/somerepo/commit/fb88379ee45de28a0a4590fddcbd8eff8b36026e',
'date': u'2015-08-13T19:33:18+00:00',
'message': u'Fix link\n',
},
}
assertSchema('gitlab_webhook', expected, custom_trigger_payload, git_url='git@gitlab.com:jsmith/somerepo.git')
def test_custom_github():
expected = {
'commit': u'410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'ref': u'refs/heads/master',
'default_branch': u'master',
'git_url': u'git@github.com:jsmith/anothertest.git',
'commit_info': {
'url': u'https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'date': u'2015-09-11T14:26:16-04:00',
'message': u'Update Dockerfile',
'committer': {
'username': u'jsmith',
},
'author': {
'username': u'jsmith',
},
},
}
assertSchema('github_webhook', expected, custom_trigger_payload,
git_url='git@github.com:jsmith/anothertest.git')
def test_custom_bitbucket():
expected = {
"commit": u"af64ae7188685f8424040b4735ad12941b980d75",
"ref": u"refs/heads/master",
"git_url": u"git@bitbucket.org:jsmith/another-repo.git",
"commit_info": {
"url": u"https://bitbucket.org/jsmith/another-repo/commits/af64ae7188685f8424040b4735ad12941b980d75",
"date": u"2015-09-10T20:40:54+00:00",
"message": u"Dockerfile edited online with Bitbucket",
"author": {
"username": u"John Smith",
"avatar_url": u"https://bitbucket.org/account/jsmith/avatar/32/",
},
"committer": {
"username": u"John Smith",
"avatar_url": u"https://bitbucket.org/account/jsmith/avatar/32/",
},
},
}
assertSchema('bitbucket_webhook', expected, custom_trigger_payload, git_url='git@bitbucket.org:jsmith/another-repo.git')
def test_bitbucket_customer_payload_noauthor():
expected = {
"commit": "a0ec139843b2bb281ab21a433266ddc498e605dc",
"ref": "refs/heads/master",
"git_url": "git@bitbucket.org:somecoollabs/svc-identity.git",
"commit_info": {
"url": "https://bitbucket.org/somecoollabs/svc-identity/commits/a0ec139843b2bb281ab21a433266ddc498e605dc",
"date": "2015-09-25T00:55:08+00:00",
"message": "Update version.py to 0.1.2 [skip ci]\n\n(by utilitybelt/scripts/autotag_version.py)\n",
"committer": {
"username": "CodeShip Tagging",
"avatar_url": "https://bitbucket.org/account/SomeCoolLabs_CodeShip/avatar/32/",
},
},
}
assertSchema('bitbucket_customer_example_noauthor', expected, bb_webhook)
def test_bitbucket_customer_payload_tag():
expected = {
"commit": "a0ec139843b2bb281ab21a433266ddc498e605dc",
"ref": "refs/tags/0.1.2",
"git_url": "git@bitbucket.org:somecoollabs/svc-identity.git",
"commit_info": {
"url": "https://bitbucket.org/somecoollabs/svc-identity/commits/a0ec139843b2bb281ab21a433266ddc498e605dc",
"date": "2015-09-25T00:55:08+00:00",
"message": "Update version.py to 0.1.2 [skip ci]\n\n(by utilitybelt/scripts/autotag_version.py)\n",
"committer": {
"username": "CodeShip Tagging",
"avatar_url": "https://bitbucket.org/account/SomeCoolLabs_CodeShip/avatar/32/",
},
},
}
assertSchema('bitbucket_customer_example_tag', expected, bb_webhook)
def test_bitbucket_commit():
ref = 'refs/heads/somebranch'
default_branch = 'somebranch'
repository_name = 'foo/bar'
def lookup_author(_):
return {
'user': {
'display_name': 'cooluser',
'avatar': 'http://some/avatar/url'
}
}
expected = {
"commit": u"abdeaf1b2b4a6b9ddf742c1e1754236380435a62",
"ref": u"refs/heads/somebranch",
"git_url": u"git@bitbucket.org:foo/bar.git",
"default_branch": u"somebranch",
"commit_info": {
"url": u"https://bitbucket.org/foo/bar/commits/abdeaf1b2b4a6b9ddf742c1e1754236380435a62",
"date": u"2012-07-24 00:26:36",
"message": u"making some changes\n",
"author": {
"avatar_url": u"http://some/avatar/url",
"username": u"cooluser",
}
}
}
assertSchema('bitbucket_commit', expected, bb_commit, ref, default_branch,
repository_name, lookup_author)
def test_bitbucket_webhook_payload():
expected = {
"commit": u"af64ae7188685f8424040b4735ad12941b980d75",
"ref": u"refs/heads/master",
"git_url": u"git@bitbucket.org:jsmith/another-repo.git",
"commit_info": {
"url": u"https://bitbucket.org/jsmith/another-repo/commits/af64ae7188685f8424040b4735ad12941b980d75",
"date": u"2015-09-10T20:40:54+00:00",
"message": u"Dockerfile edited online with Bitbucket",
"author": {
"username": u"John Smith",
"avatar_url": u"https://bitbucket.org/account/jsmith/avatar/32/",
},
"committer": {
"username": u"John Smith",
"avatar_url": u"https://bitbucket.org/account/jsmith/avatar/32/",
},
},
}
assertSchema('bitbucket_webhook', expected, bb_webhook)
def test_github_webhook_payload_slash_branch():
expected = {
'commit': u'410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'ref': u'refs/heads/slash/branch',
'default_branch': u'master',
'git_url': u'git@github.com:jsmith/anothertest.git',
'commit_info': {
'url': u'https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'date': u'2015-09-11T14:26:16-04:00',
'message': u'Update Dockerfile',
'committer': {
'username': u'jsmith',
},
'author': {
'username': u'jsmith',
},
},
}
assertSchema('github_webhook_slash_branch', expected, gh_webhook)
def test_github_webhook_payload():
expected = {
'commit': u'410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'ref': u'refs/heads/master',
'default_branch': u'master',
'git_url': u'git@github.com:jsmith/anothertest.git',
'commit_info': {
'url': u'https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'date': u'2015-09-11T14:26:16-04:00',
'message': u'Update Dockerfile',
'committer': {
'username': u'jsmith',
},
'author': {
'username': u'jsmith',
},
},
}
assertSchema('github_webhook', expected, gh_webhook)
def test_github_webhook_payload_with_lookup():
expected = {
'commit': u'410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'ref': u'refs/heads/master',
'default_branch': u'master',
'git_url': u'git@github.com:jsmith/anothertest.git',
'commit_info': {
'url': u'https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'date': u'2015-09-11T14:26:16-04:00',
'message': u'Update Dockerfile',
'committer': {
'username': u'jsmith',
'url': u'http://github.com/jsmith',
'avatar_url': u'http://some/avatar/url',
},
'author': {
'username': u'jsmith',
'url': u'http://github.com/jsmith',
'avatar_url': u'http://some/avatar/url',
},
},
}
def lookup_user(_):
return {
'html_url': 'http://github.com/jsmith',
'avatar_url': 'http://some/avatar/url'
}
assertSchema('github_webhook', expected, gh_webhook, lookup_user=lookup_user)
def test_github_webhook_payload_missing_fields_with_lookup():
expected = {
'commit': u'410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'ref': u'refs/heads/master',
'default_branch': u'master',
'git_url': u'git@github.com:jsmith/anothertest.git',
'commit_info': {
'url': u'https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'date': u'2015-09-11T14:26:16-04:00',
'message': u'Update Dockerfile'
},
}
def lookup_user(username):
if not username:
raise Exception('Fail!')
return {
'html_url': 'http://github.com/jsmith',
'avatar_url': 'http://some/avatar/url'
}
assertSchema('github_webhook_missing', expected, gh_webhook, lookup_user=lookup_user)
def test_gitlab_webhook_payload():
expected = {
'commit': u'fb88379ee45de28a0a4590fddcbd8eff8b36026e',
'ref': u'refs/heads/master',
'git_url': u'git@gitlab.com:jsmith/somerepo.git',
'commit_info': {
'url': u'https://gitlab.com/jsmith/somerepo/commit/fb88379ee45de28a0a4590fddcbd8eff8b36026e',
'date': u'2015-08-13T19:33:18+00:00',
'message': u'Fix link\n',
},
}
assertSchema('gitlab_webhook', expected, gl_webhook)
def test_github_webhook_payload_known_issue():
expected = {
"commit": "118b07121695d9f2e40a5ff264fdcc2917680870",
"ref": "refs/heads/master",
"default_branch": "master",
"git_url": "git@github.com:jsmith/docker-test.git",
"commit_info": {
"url": "https://github.com/jsmith/docker-test/commit/118b07121695d9f2e40a5ff264fdcc2917680870",
"date": "2015-09-25T14:55:11-04:00",
"message": "Fail",
},
}
assertSchema('github_webhook_noname', expected, gh_webhook)
def test_github_webhook_payload_missing_fields():
expected = {
'commit': u'410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'ref': u'refs/heads/master',
'default_branch': u'master',
'git_url': u'git@github.com:jsmith/anothertest.git',
'commit_info': {
'url': u'https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c',
'date': u'2015-09-11T14:26:16-04:00',
'message': u'Update Dockerfile'
},
}
assertSchema('github_webhook_missing', expected, gh_webhook)
def test_gitlab_webhook_nocommit_payload():
assertSkipped('gitlab_webhook_nocommit', gl_webhook)
def test_gitlab_webhook_multiple_commits():
expected = {
'commit': u'9a052a0b2fbe01d4a1a88638dd9fe31c1c56ef53',
'ref': u'refs/heads/master',
'git_url': u'git@gitlab.com:jsmith/some-test-project.git',
'commit_info': {
'url': u'https://gitlab.com/jsmith/some-test-project/commit/9a052a0b2fbe01d4a1a88638dd9fe31c1c56ef53',
'date': u'2016-09-29T15:02:41+00:00',
'message': u"Merge branch 'foobar' into 'master'\r\n\r\nAdd changelog\r\n\r\nSome merge thing\r\n\r\nSee merge request !1",
'author': {
'username': 'jsmith',
'url': 'http://gitlab.com/jsmith',
'avatar_url': 'http://some/avatar/url'
},
},
}
def lookup_user(_):
return {
'username': 'jsmith',
'html_url': 'http://gitlab.com/jsmith',
'avatar_url': 'http://some/avatar/url',
}
assertSchema('gitlab_webhook_multicommit', expected, gl_webhook, lookup_user=lookup_user)
def test_gitlab_webhook_for_tag():
expected = {
'commit': u'82b3d5ae55f7080f1e6022629cdb57bfae7cccc7',
'commit_info': {
'author': {
'avatar_url': 'http://some/avatar/url',
'url': 'http://gitlab.com/jsmith',
'username': 'jsmith'
},
'date': '2015-08-13T19:33:18+00:00',
'message': 'Fix link\n',
'url': 'https://some/url',
},
'git_url': u'git@example.com:jsmith/example.git',
'ref': u'refs/tags/v1.0.0',
}
def lookup_user(_):
return {
'username': 'jsmith',
'html_url': 'http://gitlab.com/jsmith',
'avatar_url': 'http://some/avatar/url',
}
def lookup_commit(repo_id, commit_sha):
if commit_sha == '82b3d5ae55f7080f1e6022629cdb57bfae7cccc7':
return {
"id": "82b3d5ae55f7080f1e6022629cdb57bfae7cccc7",
"message": "Fix link\n",
"timestamp": "2015-08-13T19:33:18+00:00",
"url": "https://some/url",
"author_name": "Foo Guy",
"author_email": "foo@bar.com",
}
return None
assertSchema('gitlab_webhook_tag', expected, gl_webhook, lookup_user=lookup_user,
lookup_commit=lookup_commit)
def test_gitlab_webhook_for_tag_nocommit():
assertSkipped('gitlab_webhook_tag', gl_webhook)
def test_gitlab_webhook_for_tag_commit_sha_null():
assertSkipped('gitlab_webhook_tag_commit_sha_null', gl_webhook)
def test_gitlab_webhook_for_tag_known_issue():
expected = {
'commit': u'770830e7ca132856991e6db4f7fc0f4dbe20bd5f',
'ref': u'refs/tags/thirdtag',
'git_url': u'git@gitlab.com:someuser/some-test-project.git',
'commit_info': {
'url': u'https://gitlab.com/someuser/some-test-project/commit/770830e7ca132856991e6db4f7fc0f4dbe20bd5f',
'date': u'2019-10-17T18:07:48Z',
'message': u'Update Dockerfile',
'author': {
'username': 'someuser',
'url': 'http://gitlab.com/someuser',
'avatar_url': 'http://some/avatar/url',
},
},
}
def lookup_user(_):
return {
'username': 'someuser',
'html_url': 'http://gitlab.com/someuser',
'avatar_url': 'http://some/avatar/url',
}
assertSchema('gitlab_webhook_tag_commit_issue', expected, gl_webhook, lookup_user=lookup_user)
def test_gitlab_webhook_payload_known_issue():
expected = {
'commit': u'770830e7ca132856991e6db4f7fc0f4dbe20bd5f',
'ref': u'refs/tags/fourthtag',
'git_url': u'git@gitlab.com:someuser/some-test-project.git',
'commit_info': {
'url': u'https://gitlab.com/someuser/some-test-project/commit/770830e7ca132856991e6db4f7fc0f4dbe20bd5f',
'date': u'2019-10-17T18:07:48Z',
'message': u'Update Dockerfile',
},
}
def lookup_commit(repo_id, commit_sha):
if commit_sha == '770830e7ca132856991e6db4f7fc0f4dbe20bd5f':
return {
"added": [],
"author": {
"name": "Some User",
"email": "someuser@somedomain.com"
},
"url": "https://gitlab.com/someuser/some-test-project/commit/770830e7ca132856991e6db4f7fc0f4dbe20bd5f",
"message": "Update Dockerfile",
"removed": [],
"modified": [
"Dockerfile"
],
"id": "770830e7ca132856991e6db4f7fc0f4dbe20bd5f"
}
return None
assertSchema('gitlab_webhook_known_issue', expected, gl_webhook, lookup_commit=lookup_commit)
def test_gitlab_webhook_for_other():
assertSkipped('gitlab_webhook_other', gl_webhook)
def test_gitlab_webhook_payload_with_lookup():
expected = {
'commit': u'fb88379ee45de28a0a4590fddcbd8eff8b36026e',
'ref': u'refs/heads/master',
'git_url': u'git@gitlab.com:jsmith/somerepo.git',
'commit_info': {
'url': u'https://gitlab.com/jsmith/somerepo/commit/fb88379ee45de28a0a4590fddcbd8eff8b36026e',
'date': u'2015-08-13T19:33:18+00:00',
'message': u'Fix link\n',
'author': {
'username': 'jsmith',
'url': 'http://gitlab.com/jsmith',
'avatar_url': 'http://some/avatar/url',
},
},
}
def lookup_user(_):
return {
'username': 'jsmith',
'html_url': 'http://gitlab.com/jsmith',
'avatar_url': 'http://some/avatar/url',
}
assertSchema('gitlab_webhook', expected, gl_webhook, lookup_user=lookup_user)
def test_github_webhook_payload_deleted_commit():
expected = {
'commit': u'456806b662cb903a0febbaed8344f3ed42f27bab',
'commit_info': {
'author': {
'username': u'jsmith'
},
'committer': {
'username': u'jsmith'
},
'date': u'2015-12-08T18:07:03-05:00',
'message': (u'Merge pull request #1044 from jsmith/errerror\n\n' +
'Assign the exception to a variable to log it'),
'url': u'https://github.com/jsmith/somerepo/commit/456806b662cb903a0febbaed8344f3ed42f27bab'
},
'git_url': u'git@github.com:jsmith/somerepo.git',
'ref': u'refs/heads/master',
'default_branch': u'master',
}
def lookup_user(_):
return None
assertSchema('github_webhook_deletedcommit', expected, gh_webhook, lookup_user=lookup_user)
def test_github_webhook_known_issue():
def lookup_user(_):
return None
assertSkipped('github_webhook_knownissue', gh_webhook, lookup_user=lookup_user)
def test_bitbucket_webhook_known_issue():
assertSkipped('bitbucket_knownissue', bb_webhook)

View file

@ -1,25 +0,0 @@
import re
import pytest
from buildtrigger.triggerutil import matches_ref
@pytest.mark.parametrize('ref, filt, matches', [
('ref/heads/master', '.+', True),
('ref/heads/master', 'heads/.+', True),
('ref/heads/master', 'heads/master', True),
('ref/heads/slash/branch', 'heads/slash/branch', True),
('ref/heads/slash/branch', 'heads/.+', True),
('ref/heads/foobar', 'heads/master', False),
('ref/heads/master', 'tags/master', False),
('ref/heads/master', '(((heads/alpha)|(heads/beta))|(heads/gamma))|(heads/master)', True),
('ref/heads/alpha', '(((heads/alpha)|(heads/beta))|(heads/gamma))|(heads/master)', True),
('ref/heads/beta', '(((heads/alpha)|(heads/beta))|(heads/gamma))|(heads/master)', True),
('ref/heads/gamma', '(((heads/alpha)|(heads/beta))|(heads/gamma))|(heads/master)', True),
('ref/heads/delta', '(((heads/alpha)|(heads/beta))|(heads/gamma))|(heads/master)', False),
])
def test_matches_ref(ref, filt, matches):
assert matches_ref(ref, re.compile(filt)) == matches

View file

@ -1,68 +0,0 @@
{
"push": {
"changes": [
]
},
"actor": {
"account_id": "jsmith",
"display_name": "John Smith",
"type": "user",
"links": {
"self": {
"href": "https:\/\/api.bitbucket.org\/2.0\/users\/jsmith"
},
"avatar": {
"href": "https:\/\/bitbucket.org\/account\/jsmith\/avatar\/32\/"
}
}
},
"repository": {
"website": "",
"scm": "git",
"name": "slip-api",
"links": {
"self": {
"href": "https:\/\/api.bitbucket.org\/2.0\/repositories\/goldcuff\/slip-api"
},
"html": {
"href": "https:\/\/bitbucket.org\/goldcuff\/slip-api"
},
"avatar": {
"href": "https:\/\/bitbucket.org\/goldcuff\/slip-api\/avatar\/32\/"
}
},
"project": {
"links": {
"self": {
"href": "https:\/\/api.bitbucket.org\/2.0\/teams\/goldcuff\/projects\/SLIP"
},
"html": {
"href": "https:\/\/bitbucket.org\/account\/user\/goldcuff\/projects\/SLIP"
},
"avatar": {
"href": "https:\/\/bitbucket.org\/account\/user\/goldcuff\/projects\/SLIP\/avatar\/32"
}
},
"type": "project",
"name": "SLIP",
"key": "SLIP"
},
"full_name": "goldcuff\/slip-api",
"owner": {
"account_id": "goldcuff",
"display_name": "Goldcuff",
"type": "team",
"links": {
"self": {
"href": "https:\/\/api.bitbucket.org\/2.0\/teams\/goldcuff"
},
"avatar": {
"href": "https:\/\/bitbucket.org\/account\/goldcuff\/avatar\/32\/"
}
}
},
"type": "repository",
"is_private": true
}
}

View file

@ -1,153 +0,0 @@
{
"ref": "refs/heads/master",
"before": "9ea43cab474709d4a61afb7e3340de1ffc405b41",
"after": "410f4cdf8ff09b87f245b13845e8497f90b90a4c",
"created": false,
"deleted": false,
"forced": false,
"base_ref": null,
"compare": "https://github.com/jsmith/anothertest/compare/9ea43cab4747...410f4cdf8ff0",
"commits": [
{
"id": "410f4cdf8ff09b87f245b13845e8497f90b90a4c",
"distinct": true,
"message": "Update Dockerfile",
"timestamp": "2015-09-11T14:26:16-04:00",
"url": "https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c",
"author": {
"name": "jsmith",
"email": "jsmith@users.noreply.github.com",
"username": "jsmith"
},
"committer": {
"name": "jsmith",
"email": "jsmith@users.noreply.github.com",
"username": "jsmith"
},
"added": [],
"removed": [],
"modified": [
"Dockerfile"
]
}
],
"head_commit": {
"id": "410f4cdf8ff09b87f245b13845e8497f90b90a4c",
"distinct": true,
"message": "Update Dockerfile",
"timestamp": "2015-09-11T14:26:16-04:00",
"url": "https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c",
"author": {
"name": "jsmith",
"email": "jsmith@users.noreply.github.com",
"username": "jsmith"
},
"committer": {
"name": "jsmith",
"email": "jsmith@users.noreply.github.com",
"username": "jsmith"
},
"added": [],
"removed": [],
"modified": [
"Dockerfile"
]
},
"repository": {
"id": 1234567,
"name": "anothertest",
"full_name": "jsmith/anothertest",
"owner": {
"name": "jsmith",
"email": "jsmith@users.noreply.github.com"
},
"private": false,
"html_url": "https://github.com/jsmith/anothertest",
"description": "",
"fork": false,
"url": "https://github.com/jsmith/anothertest",
"forks_url": "https://api.github.com/repos/jsmith/anothertest/forks",
"keys_url": "https://api.github.com/repos/jsmith/anothertest/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/jsmith/anothertest/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/jsmith/anothertest/teams",
"hooks_url": "https://api.github.com/repos/jsmith/anothertest/hooks",
"issue_events_url": "https://api.github.com/repos/jsmith/anothertest/issues/events{/number}",
"events_url": "https://api.github.com/repos/jsmith/anothertest/events",
"assignees_url": "https://api.github.com/repos/jsmith/anothertest/assignees{/user}",
"branches_url": "https://api.github.com/repos/jsmith/anothertest/branches{/branch}",
"tags_url": "https://api.github.com/repos/jsmith/anothertest/tags",
"blobs_url": "https://api.github.com/repos/jsmith/anothertest/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/jsmith/anothertest/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/jsmith/anothertest/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/jsmith/anothertest/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/jsmith/anothertest/statuses/{sha}",
"languages_url": "https://api.github.com/repos/jsmith/anothertest/languages",
"stargazers_url": "https://api.github.com/repos/jsmith/anothertest/stargazers",
"contributors_url": "https://api.github.com/repos/jsmith/anothertest/contributors",
"subscribers_url": "https://api.github.com/repos/jsmith/anothertest/subscribers",
"subscription_url": "https://api.github.com/repos/jsmith/anothertest/subscription",
"commits_url": "https://api.github.com/repos/jsmith/anothertest/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/jsmith/anothertest/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/jsmith/anothertest/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/jsmith/anothertest/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/jsmith/anothertest/contents/{+path}",
"compare_url": "https://api.github.com/repos/jsmith/anothertest/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/jsmith/anothertest/merges",
"archive_url": "https://api.github.com/repos/jsmith/anothertest/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/jsmith/anothertest/downloads",
"issues_url": "https://api.github.com/repos/jsmith/anothertest/issues{/number}",
"pulls_url": "https://api.github.com/repos/jsmith/anothertest/pulls{/number}",
"milestones_url": "https://api.github.com/repos/jsmith/anothertest/milestones{/number}",
"notifications_url": "https://api.github.com/repos/jsmith/anothertest/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/jsmith/anothertest/labels{/name}",
"releases_url": "https://api.github.com/repos/jsmith/anothertest/releases{/id}",
"created_at": 1430426945,
"updated_at": "2015-04-30T20:49:05Z",
"pushed_at": 1441995976,
"git_url": "git://github.com/jsmith/anothertest.git",
"ssh_url": "git@github.com:jsmith/anothertest.git",
"clone_url": "https://github.com/jsmith/anothertest.git",
"svn_url": "https://github.com/jsmith/anothertest",
"homepage": null,
"size": 144,
"stargazers_count": 0,
"watchers_count": 0,
"language": null,
"has_issues": true,
"has_downloads": true,
"has_wiki": true,
"has_pages": false,
"forks_count": 0,
"mirror_url": null,
"open_issues_count": 0,
"forks": 0,
"open_issues": 0,
"watchers": 0,
"default_branch": "master",
"stargazers": 0,
"master_branch": "master"
},
"pusher": {
"name": "jsmith",
"email": "jsmith@users.noreply.github.com"
},
"sender": {
"login": "jsmith",
"id": 1234567,
"avatar_url": "https://avatars.githubusercontent.com/u/1234567?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/jsmith",
"html_url": "https://github.com/jsmith",
"followers_url": "https://api.github.com/users/jsmith/followers",
"following_url": "https://api.github.com/users/jsmith/following{/other_user}",
"gists_url": "https://api.github.com/users/jsmith/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jsmith/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jsmith/subscriptions",
"organizations_url": "https://api.github.com/users/jsmith/orgs",
"repos_url": "https://api.github.com/users/jsmith/repos",
"events_url": "https://api.github.com/users/jsmith/events{/privacy}",
"received_events_url": "https://api.github.com/users/jsmith/received_events",
"type": "User",
"site_admin": false
}
}

View file

@ -1,199 +0,0 @@
{
"ref": "refs/heads/master",
"before": "c7fa613b99d509c0d4fcbf946f0415b5f024150b",
"after": "456806b662cb903a0febbaed8344f3ed42f27bab",
"created": false,
"deleted": false,
"forced": false,
"base_ref": null,
"compare": "https://github.com/jsmith/somerepo/compare/c7fa613b99d5...456806b662cb",
"commits": [
{
"id": "e00365b225ad7f454982e9198756cc1ab5dc4428",
"distinct": true,
"message": "Assign the exception to a variable to log it",
"timestamp": "2015-12-08T18:03:48-05:00",
"url": "https://github.com/jsmith/somerepo/commit/e00365b225ad7f454982e9198756cc1ab5dc4428",
"author": {
"name": "John Smith",
"email": "j@smith.com",
"username": "jsmith"
},
"committer": {
"name": "John Smith",
"email": "j@smith.com",
"username": "jsmith"
},
"added": [
],
"removed": [
],
"modified": [
"storage/basestorage.py"
]
},
{
"id": "456806b662cb903a0febbaed8344f3ed42f27bab",
"distinct": true,
"message": "Merge pull request #1044 from jsmith/errerror\n\nAssign the exception to a variable to log it",
"timestamp": "2015-12-08T18:07:03-05:00",
"url": "https://github.com/jsmith/somerepo/commit/456806b662cb903a0febbaed8344f3ed42f27bab",
"author": {
"name": "John Smith",
"email": "j@smith.com",
"username": "jsmith"
},
"committer": {
"name": "John Smith",
"email": "j@smith.com",
"username": "jsmith"
},
"added": [
],
"removed": [
],
"modified": [
"storage/basestorage.py"
]
}
],
"head_commit": {
"id": "456806b662cb903a0febbaed8344f3ed42f27bab",
"distinct": true,
"message": "Merge pull request #1044 from jsmith/errerror\n\nAssign the exception to a variable to log it",
"timestamp": "2015-12-08T18:07:03-05:00",
"url": "https://github.com/jsmith/somerepo/commit/456806b662cb903a0febbaed8344f3ed42f27bab",
"author": {
"name": "John Smith",
"email": "j@smith.com",
"username": "jsmith"
},
"committer": {
"name": "John Smith",
"email": "j@smith.com",
"username": "jsmith"
},
"added": [
],
"removed": [
],
"modified": [
"storage/basestorage.py"
]
},
"repository": {
"id": 12345678,
"name": "somerepo",
"full_name": "jsmith/somerepo",
"owner": {
"name": "jsmith",
"email": null
},
"private": true,
"html_url": "https://github.com/jsmith/somerepo",
"description": "Some Cool Repo",
"fork": false,
"url": "https://github.com/jsmith/somerepo",
"forks_url": "https://api.github.com/repos/jsmith/somerepo/forks",
"keys_url": "https://api.github.com/repos/jsmith/somerepo/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/jsmith/somerepo/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/jsmith/somerepo/teams",
"hooks_url": "https://api.github.com/repos/jsmith/somerepo/hooks",
"issue_events_url": "https://api.github.com/repos/jsmith/somerepo/issues/events{/number}",
"events_url": "https://api.github.com/repos/jsmith/somerepo/events",
"assignees_url": "https://api.github.com/repos/jsmith/somerepo/assignees{/user}",
"branches_url": "https://api.github.com/repos/jsmith/somerepo/branches{/branch}",
"tags_url": "https://api.github.com/repos/jsmith/somerepo/tags",
"blobs_url": "https://api.github.com/repos/jsmith/somerepo/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/jsmith/somerepo/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/jsmith/somerepo/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/jsmith/somerepo/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/jsmith/somerepo/statuses/{sha}",
"languages_url": "https://api.github.com/repos/jsmith/somerepo/languages",
"stargazers_url": "https://api.github.com/repos/jsmith/somerepo/stargazers",
"contributors_url": "https://api.github.com/repos/jsmith/somerepo/contributors",
"subscribers_url": "https://api.github.com/repos/jsmith/somerepo/subscribers",
"subscription_url": "https://api.github.com/repos/jsmith/somerepo/subscription",
"commits_url": "https://api.github.com/repos/jsmith/somerepo/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/jsmith/somerepo/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/jsmith/somerepo/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/jsmith/somerepo/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/jsmith/somerepo/contents/{+path}",
"compare_url": "https://api.github.com/repos/jsmith/somerepo/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/jsmith/somerepo/merges",
"archive_url": "https://api.github.com/repos/jsmith/somerepo/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/jsmith/somerepo/downloads",
"issues_url": "https://api.github.com/repos/jsmith/somerepo/issues{/number}",
"pulls_url": "https://api.github.com/repos/jsmith/somerepo/pulls{/number}",
"milestones_url": "https://api.github.com/repos/jsmith/somerepo/milestones{/number}",
"notifications_url": "https://api.github.com/repos/jsmith/somerepo/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/jsmith/somerepo/labels{/name}",
"releases_url": "https://api.github.com/repos/jsmith/somerepo/releases{/id}",
"created_at": 1415056063,
"updated_at": "2015-11-12T05:16:51Z",
"pushed_at": 1449616023,
"git_url": "git://github.com/jsmith/somerepo.git",
"ssh_url": "git@github.com:jsmith/somerepo.git",
"clone_url": "https://github.com/jsmith/somerepo.git",
"svn_url": "https://github.com/jsmith/somerepo",
"homepage": "",
"size": 183677,
"stargazers_count": 3,
"watchers_count": 3,
"language": "Python",
"has_issues": true,
"has_downloads": true,
"has_wiki": false,
"has_pages": false,
"forks_count": 8,
"mirror_url": null,
"open_issues_count": 188,
"forks": 8,
"open_issues": 188,
"watchers": 3,
"default_branch": "master",
"stargazers": 3,
"master_branch": "master",
"organization": "jsmith"
},
"pusher": {
"name": "jsmith",
"email": "j@smith.com"
},
"organization": {
"login": "jsmith",
"id": 9876543,
"url": "https://api.github.com/orgs/jsmith",
"repos_url": "https://api.github.com/orgs/jsmith/repos",
"events_url": "https://api.github.com/orgs/jsmith/events",
"members_url": "https://api.github.com/orgs/jsmith/members{/member}",
"public_members_url": "https://api.github.com/orgs/jsmith/public_members{/member}",
"avatar_url": "https://avatars.githubusercontent.com/u/5504624?v=3",
"description": null
},
"sender": {
"login": "jsmith",
"id": 1234567,
"avatar_url": "https://avatars.githubusercontent.com/u/000000?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/jsmith",
"html_url": "https://github.com/jsmith",
"followers_url": "https://api.github.com/users/jsmith/followers",
"following_url": "https://api.github.com/users/jsmith/following{/other_user}",
"gists_url": "https://api.github.com/users/jsmith/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jsmith/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jsmith/subscriptions",
"organizations_url": "https://api.github.com/users/jsmith/orgs",
"repos_url": "https://api.github.com/users/jsmith/repos",
"events_url": "https://api.github.com/users/jsmith/events{/privacy}",
"received_events_url": "https://api.github.com/users/jsmith/received_events",
"type": "User",
"site_admin": false
}
}

View file

@ -1,126 +0,0 @@
{
"ref": "refs/heads/1.2.6",
"before": "76a309ed96c72986eddffc02d2f4dda3fe689f10",
"after": "0000000000000000000000000000000000000000",
"created": false,
"deleted": true,
"forced": false,
"base_ref": null,
"compare": "https://github.com/jsmith/somerepo/compare/76a309ed96c7...000000000000",
"commits": [
],
"head_commit": null,
"repository": {
"id": 12345678,
"name": "somerepo",
"full_name": "jsmith/somerepo",
"owner": {
"name": "jsmith",
"email": "j@smith.com"
},
"private": true,
"html_url": "https://github.com/jsmith/somerepo",
"description": "Dockerfile for some repo",
"fork": false,
"url": "https://github.com/jsmith/somerepo",
"forks_url": "https://api.github.com/repos/jsmith/somerepo/forks",
"keys_url": "https://api.github.com/repos/jsmith/somerepo/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/jsmith/somerepo/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/jsmith/somerepo/teams",
"hooks_url": "https://api.github.com/repos/jsmith/somerepo/hooks",
"issue_events_url": "https://api.github.com/repos/jsmith/somerepo/issues/events{/number}",
"events_url": "https://api.github.com/repos/jsmith/somerepo/events",
"assignees_url": "https://api.github.com/repos/jsmith/somerepo/assignees{/user}",
"branches_url": "https://api.github.com/repos/jsmith/somerepo/branches{/branch}",
"tags_url": "https://api.github.com/repos/jsmith/somerepo/tags",
"blobs_url": "https://api.github.com/repos/jsmith/somerepo/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/jsmith/somerepo/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/jsmith/somerepo/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/jsmith/somerepo/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/jsmith/somerepo/statuses/{sha}",
"languages_url": "https://api.github.com/repos/jsmith/somerepo/languages",
"stargazers_url": "https://api.github.com/repos/jsmith/somerepo/stargazers",
"contributors_url": "https://api.github.com/repos/jsmith/somerepo/contributors",
"subscribers_url": "https://api.github.com/repos/jsmith/somerepo/subscribers",
"subscription_url": "https://api.github.com/repos/jsmith/somerepo/subscription",
"commits_url": "https://api.github.com/repos/jsmith/somerepo/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/jsmith/somerepo/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/jsmith/somerepo/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/jsmith/somerepo/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/jsmith/somerepo/contents/{+path}",
"compare_url": "https://api.github.com/repos/jsmith/somerepo/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/jsmith/somerepo/merges",
"archive_url": "https://api.github.com/repos/jsmith/somerepo/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/jsmith/somerepo/downloads",
"issues_url": "https://api.github.com/repos/jsmith/somerepo/issues{/number}",
"pulls_url": "https://api.github.com/repos/jsmith/somerepo/pulls{/number}",
"milestones_url": "https://api.github.com/repos/jsmith/somerepo/milestones{/number}",
"notifications_url": "https://api.github.com/repos/jsmith/somerepo/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/jsmith/somerepo/labels{/name}",
"releases_url": "https://api.github.com/repos/jsmith/somerepo/releases{/id}",
"deployments_url": "https://api.github.com/repos/jsmith/somerepo/deployments",
"created_at": 1461165926,
"updated_at": "2016-11-03T18:20:01Z",
"pushed_at": 1479313569,
"git_url": "git://github.com/jsmith/somerepo.git",
"ssh_url": "git@github.com:jsmith/somerepo.git",
"clone_url": "https://github.com/jsmith/somerepo.git",
"svn_url": "https://github.com/jsmith/somerepo",
"homepage": "",
"size": 3114,
"stargazers_count": 0,
"watchers_count": 0,
"language": "Shell",
"has_issues": true,
"has_downloads": true,
"has_wiki": true,
"has_pages": false,
"forks_count": 0,
"mirror_url": null,
"open_issues_count": 0,
"forks": 0,
"open_issues": 0,
"watchers": 0,
"default_branch": "master",
"stargazers": 0,
"master_branch": "master",
"organization": "jsmith"
},
"pusher": {
"name": "jsmith",
"email": "j@smith.com"
},
"organization": {
"login": "jsmith",
"id": 9876543,
"url": "https://api.github.com/orgs/jsmith",
"repos_url": "https://api.github.com/orgs/jsmith/repos",
"events_url": "https://api.github.com/orgs/jsmith/events",
"hooks_url": "https://api.github.com/orgs/jsmith/hooks",
"issues_url": "https://api.github.com/orgs/jsmith/issues",
"members_url": "https://api.github.com/orgs/jsmith/members{/member}",
"public_members_url": "https://api.github.com/orgs/jsmith/public_members{/member}",
"avatar_url": "https://avatars.githubusercontent.com/u/1234567?v=3",
"description": "Open Source Projects for Linux Containers"
},
"sender": {
"login": "jsmith",
"id": 12345678,
"avatar_url": "https://avatars.githubusercontent.com/u/1234567?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/jsmith",
"html_url": "https://github.com/jsmith",
"followers_url": "https://api.github.com/users/jsmith/followers",
"following_url": "https://api.github.com/users/jsmith/following{/other_user}",
"gists_url": "https://api.github.com/users/jsmith/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jsmith/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jsmith/subscriptions",
"organizations_url": "https://api.github.com/users/jsmith/orgs",
"repos_url": "https://api.github.com/users/jsmith/repos",
"events_url": "https://api.github.com/users/jsmith/events{/privacy}",
"received_events_url": "https://api.github.com/users/jsmith/received_events",
"type": "User",
"site_admin": false
}
}

View file

@ -1,133 +0,0 @@
{
"ref": "refs/heads/master",
"before": "9ea43cab474709d4a61afb7e3340de1ffc405b41",
"after": "410f4cdf8ff09b87f245b13845e8497f90b90a4c",
"created": false,
"deleted": false,
"forced": false,
"base_ref": null,
"compare": "https://github.com/jsmith/anothertest/compare/9ea43cab4747...410f4cdf8ff0",
"commits": [
{
"id": "410f4cdf8ff09b87f245b13845e8497f90b90a4c",
"distinct": true,
"message": "Update Dockerfile",
"timestamp": "2015-09-11T14:26:16-04:00",
"url": "https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c",
"added": [],
"removed": [],
"modified": [
"Dockerfile"
]
}
],
"head_commit": {
"id": "410f4cdf8ff09b87f245b13845e8497f90b90a4c",
"distinct": true,
"message": "Update Dockerfile",
"timestamp": "2015-09-11T14:26:16-04:00",
"url": "https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c",
"added": [],
"removed": [],
"modified": [
"Dockerfile"
]
},
"repository": {
"id": 12345678,
"name": "anothertest",
"full_name": "jsmith/anothertest",
"owner": {
"name": "jsmith",
"email": "jsmith@users.noreply.github.com"
},
"private": false,
"html_url": "https://github.com/jsmith/anothertest",
"description": "",
"fork": false,
"url": "https://github.com/jsmith/anothertest",
"forks_url": "https://api.github.com/repos/jsmith/anothertest/forks",
"keys_url": "https://api.github.com/repos/jsmith/anothertest/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/jsmith/anothertest/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/jsmith/anothertest/teams",
"hooks_url": "https://api.github.com/repos/jsmith/anothertest/hooks",
"issue_events_url": "https://api.github.com/repos/jsmith/anothertest/issues/events{/number}",
"events_url": "https://api.github.com/repos/jsmith/anothertest/events",
"assignees_url": "https://api.github.com/repos/jsmith/anothertest/assignees{/user}",
"branches_url": "https://api.github.com/repos/jsmith/anothertest/branches{/branch}",
"tags_url": "https://api.github.com/repos/jsmith/anothertest/tags",
"blobs_url": "https://api.github.com/repos/jsmith/anothertest/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/jsmith/anothertest/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/jsmith/anothertest/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/jsmith/anothertest/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/jsmith/anothertest/statuses/{sha}",
"languages_url": "https://api.github.com/repos/jsmith/anothertest/languages",
"stargazers_url": "https://api.github.com/repos/jsmith/anothertest/stargazers",
"contributors_url": "https://api.github.com/repos/jsmith/anothertest/contributors",
"subscribers_url": "https://api.github.com/repos/jsmith/anothertest/subscribers",
"subscription_url": "https://api.github.com/repos/jsmith/anothertest/subscription",
"commits_url": "https://api.github.com/repos/jsmith/anothertest/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/jsmith/anothertest/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/jsmith/anothertest/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/jsmith/anothertest/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/jsmith/anothertest/contents/{+path}",
"compare_url": "https://api.github.com/repos/jsmith/anothertest/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/jsmith/anothertest/merges",
"archive_url": "https://api.github.com/repos/jsmith/anothertest/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/jsmith/anothertest/downloads",
"issues_url": "https://api.github.com/repos/jsmith/anothertest/issues{/number}",
"pulls_url": "https://api.github.com/repos/jsmith/anothertest/pulls{/number}",
"milestones_url": "https://api.github.com/repos/jsmith/anothertest/milestones{/number}",
"notifications_url": "https://api.github.com/repos/jsmith/anothertest/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/jsmith/anothertest/labels{/name}",
"releases_url": "https://api.github.com/repos/jsmith/anothertest/releases{/id}",
"created_at": 1430426945,
"updated_at": "2015-04-30T20:49:05Z",
"pushed_at": 1441995976,
"git_url": "git://github.com/jsmith/anothertest.git",
"ssh_url": "git@github.com:jsmith/anothertest.git",
"clone_url": "https://github.com/jsmith/anothertest.git",
"svn_url": "https://github.com/jsmith/anothertest",
"homepage": null,
"size": 144,
"stargazers_count": 0,
"watchers_count": 0,
"language": null,
"has_issues": true,
"has_downloads": true,
"has_wiki": true,
"has_pages": false,
"forks_count": 0,
"mirror_url": null,
"open_issues_count": 0,
"forks": 0,
"open_issues": 0,
"watchers": 0,
"default_branch": "master",
"stargazers": 0,
"master_branch": "master"
},
"pusher": {
"name": "jsmith",
"email": "jsmith@users.noreply.github.com"
},
"sender": {
"login": "jsmith",
"id": 1234567,
"avatar_url": "https://avatars.githubusercontent.com/u/4073002?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/jsmith",
"html_url": "https://github.com/jsmith",
"followers_url": "https://api.github.com/users/jsmith/followers",
"following_url": "https://api.github.com/users/jsmith/following{/other_user}",
"gists_url": "https://api.github.com/users/jsmith/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jsmith/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jsmith/subscriptions",
"organizations_url": "https://api.github.com/users/jsmith/orgs",
"repos_url": "https://api.github.com/users/jsmith/repos",
"events_url": "https://api.github.com/users/jsmith/events{/privacy}",
"received_events_url": "https://api.github.com/users/jsmith/received_events",
"type": "User",
"site_admin": false
}
}

View file

@ -1,149 +0,0 @@
{
"ref": "refs/heads/master",
"before": "9716b516939221dc754a056e0f9ddf599e71d4b8",
"after": "118b07121695d9f2e40a5ff264fdcc2917680870",
"created": false,
"deleted": false,
"forced": false,
"base_ref": null,
"compare": "https://github.com/jsmith/docker-test/compare/9716b5169392...118b07121695",
"commits": [
{
"id": "118b07121695d9f2e40a5ff264fdcc2917680870",
"distinct": true,
"message": "Fail",
"timestamp": "2015-09-25T14:55:11-04:00",
"url": "https://github.com/jsmith/docker-test/commit/118b07121695d9f2e40a5ff264fdcc2917680870",
"author": {
"name": "John Smith",
"email": "j@smith.com"
},
"committer": {
"name": "John Smith",
"email": "j@smith.com"
},
"added": [],
"removed": [],
"modified": [
"README.md"
]
}
],
"head_commit": {
"id": "118b07121695d9f2e40a5ff264fdcc2917680870",
"distinct": true,
"message": "Fail",
"timestamp": "2015-09-25T14:55:11-04:00",
"url": "https://github.com/jsmith/docker-test/commit/118b07121695d9f2e40a5ff264fdcc2917680870",
"author": {
"name": "John Smith",
"email": "j@smith.com"
},
"committer": {
"name": "John Smith",
"email": "j@smith.com"
},
"added": [],
"removed": [],
"modified": [
"README.md"
]
},
"repository": {
"id": 1234567,
"name": "docker-test",
"full_name": "jsmith/docker-test",
"owner": {
"name": "jsmith",
"email": "j@smith.com"
},
"private": false,
"html_url": "https://github.com/jsmith/docker-test",
"description": "",
"fork": false,
"url": "https://github.com/jsmith/docker-test",
"forks_url": "https://api.github.com/repos/jsmith/docker-test/forks",
"keys_url": "https://api.github.com/repos/jsmith/docker-test/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/jsmith/docker-test/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/jsmith/docker-test/teams",
"hooks_url": "https://api.github.com/repos/jsmith/docker-test/hooks",
"issue_events_url": "https://api.github.com/repos/jsmith/docker-test/issues/events{/number}",
"events_url": "https://api.github.com/repos/jsmith/docker-test/events",
"assignees_url": "https://api.github.com/repos/jsmith/docker-test/assignees{/user}",
"branches_url": "https://api.github.com/repos/jsmith/docker-test/branches{/branch}",
"tags_url": "https://api.github.com/repos/jsmith/docker-test/tags",
"blobs_url": "https://api.github.com/repos/jsmith/docker-test/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/jsmith/docker-test/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/jsmith/docker-test/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/jsmith/docker-test/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/jsmith/docker-test/statuses/{sha}",
"languages_url": "https://api.github.com/repos/jsmith/docker-test/languages",
"stargazers_url": "https://api.github.com/repos/jsmith/docker-test/stargazers",
"contributors_url": "https://api.github.com/repos/jsmith/docker-test/contributors",
"subscribers_url": "https://api.github.com/repos/jsmith/docker-test/subscribers",
"subscription_url": "https://api.github.com/repos/jsmith/docker-test/subscription",
"commits_url": "https://api.github.com/repos/jsmith/docker-test/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/jsmith/docker-test/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/jsmith/docker-test/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/jsmith/docker-test/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/jsmith/docker-test/contents/{+path}",
"compare_url": "https://api.github.com/repos/jsmith/docker-test/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/jsmith/docker-test/merges",
"archive_url": "https://api.github.com/repos/jsmith/docker-test/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/jsmith/docker-test/downloads",
"issues_url": "https://api.github.com/repos/jsmith/docker-test/issues{/number}",
"pulls_url": "https://api.github.com/repos/jsmith/docker-test/pulls{/number}",
"milestones_url": "https://api.github.com/repos/jsmith/docker-test/milestones{/number}",
"notifications_url": "https://api.github.com/repos/jsmith/docker-test/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/jsmith/docker-test/labels{/name}",
"releases_url": "https://api.github.com/repos/jsmith/docker-test/releases{/id}",
"created_at": 1442254053,
"updated_at": "2015-09-14T18:07:33Z",
"pushed_at": 1443207315,
"git_url": "git://github.com/jsmith/docker-test.git",
"ssh_url": "git@github.com:jsmith/docker-test.git",
"clone_url": "https://github.com/jsmith/docker-test.git",
"svn_url": "https://github.com/jsmith/docker-test",
"homepage": null,
"size": 108,
"stargazers_count": 0,
"watchers_count": 0,
"language": null,
"has_issues": true,
"has_downloads": true,
"has_wiki": true,
"has_pages": false,
"forks_count": 0,
"mirror_url": null,
"open_issues_count": 0,
"forks": 0,
"open_issues": 0,
"watchers": 0,
"default_branch": "master",
"stargazers": 0,
"master_branch": "master"
},
"pusher": {
"name": "jsmith",
"email": "j@smith.com"
},
"sender": {
"login": "jsmith",
"id": 1234567,
"avatar_url": "https://avatars.githubusercontent.com/u/1234567?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/jsmith",
"html_url": "https://github.com/jsmith",
"followers_url": "https://api.github.com/users/jsmith/followers",
"following_url": "https://api.github.com/users/jsmith/following{/other_user}",
"gists_url": "https://api.github.com/users/jsmith/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jsmith/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jsmith/subscriptions",
"organizations_url": "https://api.github.com/users/jsmith/orgs",
"repos_url": "https://api.github.com/users/jsmith/repos",
"events_url": "https://api.github.com/users/jsmith/events{/privacy}",
"received_events_url": "https://api.github.com/users/jsmith/received_events",
"type": "User",
"site_admin": false
}
}

View file

@ -1,153 +0,0 @@
{
"ref": "refs/heads/slash/branch",
"before": "9ea43cab474709d4a61afb7e3340de1ffc405b41",
"after": "410f4cdf8ff09b87f245b13845e8497f90b90a4c",
"created": false,
"deleted": false,
"forced": false,
"base_ref": null,
"compare": "https://github.com/jsmith/anothertest/compare/9ea43cab4747...410f4cdf8ff0",
"commits": [
{
"id": "410f4cdf8ff09b87f245b13845e8497f90b90a4c",
"distinct": true,
"message": "Update Dockerfile",
"timestamp": "2015-09-11T14:26:16-04:00",
"url": "https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c",
"author": {
"name": "jsmith",
"email": "jsmith@users.noreply.github.com",
"username": "jsmith"
},
"committer": {
"name": "jsmith",
"email": "jsmith@users.noreply.github.com",
"username": "jsmith"
},
"added": [],
"removed": [],
"modified": [
"Dockerfile"
]
}
],
"head_commit": {
"id": "410f4cdf8ff09b87f245b13845e8497f90b90a4c",
"distinct": true,
"message": "Update Dockerfile",
"timestamp": "2015-09-11T14:26:16-04:00",
"url": "https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c",
"author": {
"name": "jsmith",
"email": "jsmith@users.noreply.github.com",
"username": "jsmith"
},
"committer": {
"name": "jsmith",
"email": "jsmith@users.noreply.github.com",
"username": "jsmith"
},
"added": [],
"removed": [],
"modified": [
"Dockerfile"
]
},
"repository": {
"id": 1234567,
"name": "anothertest",
"full_name": "jsmith/anothertest",
"owner": {
"name": "jsmith",
"email": "jsmith@users.noreply.github.com"
},
"private": false,
"html_url": "https://github.com/jsmith/anothertest",
"description": "",
"fork": false,
"url": "https://github.com/jsmith/anothertest",
"forks_url": "https://api.github.com/repos/jsmith/anothertest/forks",
"keys_url": "https://api.github.com/repos/jsmith/anothertest/keys{/key_id}",
"collaborators_url": "https://api.github.com/repos/jsmith/anothertest/collaborators{/collaborator}",
"teams_url": "https://api.github.com/repos/jsmith/anothertest/teams",
"hooks_url": "https://api.github.com/repos/jsmith/anothertest/hooks",
"issue_events_url": "https://api.github.com/repos/jsmith/anothertest/issues/events{/number}",
"events_url": "https://api.github.com/repos/jsmith/anothertest/events",
"assignees_url": "https://api.github.com/repos/jsmith/anothertest/assignees{/user}",
"branches_url": "https://api.github.com/repos/jsmith/anothertest/branches{/branch}",
"tags_url": "https://api.github.com/repos/jsmith/anothertest/tags",
"blobs_url": "https://api.github.com/repos/jsmith/anothertest/git/blobs{/sha}",
"git_tags_url": "https://api.github.com/repos/jsmith/anothertest/git/tags{/sha}",
"git_refs_url": "https://api.github.com/repos/jsmith/anothertest/git/refs{/sha}",
"trees_url": "https://api.github.com/repos/jsmith/anothertest/git/trees{/sha}",
"statuses_url": "https://api.github.com/repos/jsmith/anothertest/statuses/{sha}",
"languages_url": "https://api.github.com/repos/jsmith/anothertest/languages",
"stargazers_url": "https://api.github.com/repos/jsmith/anothertest/stargazers",
"contributors_url": "https://api.github.com/repos/jsmith/anothertest/contributors",
"subscribers_url": "https://api.github.com/repos/jsmith/anothertest/subscribers",
"subscription_url": "https://api.github.com/repos/jsmith/anothertest/subscription",
"commits_url": "https://api.github.com/repos/jsmith/anothertest/commits{/sha}",
"git_commits_url": "https://api.github.com/repos/jsmith/anothertest/git/commits{/sha}",
"comments_url": "https://api.github.com/repos/jsmith/anothertest/comments{/number}",
"issue_comment_url": "https://api.github.com/repos/jsmith/anothertest/issues/comments{/number}",
"contents_url": "https://api.github.com/repos/jsmith/anothertest/contents/{+path}",
"compare_url": "https://api.github.com/repos/jsmith/anothertest/compare/{base}...{head}",
"merges_url": "https://api.github.com/repos/jsmith/anothertest/merges",
"archive_url": "https://api.github.com/repos/jsmith/anothertest/{archive_format}{/ref}",
"downloads_url": "https://api.github.com/repos/jsmith/anothertest/downloads",
"issues_url": "https://api.github.com/repos/jsmith/anothertest/issues{/number}",
"pulls_url": "https://api.github.com/repos/jsmith/anothertest/pulls{/number}",
"milestones_url": "https://api.github.com/repos/jsmith/anothertest/milestones{/number}",
"notifications_url": "https://api.github.com/repos/jsmith/anothertest/notifications{?since,all,participating}",
"labels_url": "https://api.github.com/repos/jsmith/anothertest/labels{/name}",
"releases_url": "https://api.github.com/repos/jsmith/anothertest/releases{/id}",
"created_at": 1430426945,
"updated_at": "2015-04-30T20:49:05Z",
"pushed_at": 1441995976,
"git_url": "git://github.com/jsmith/anothertest.git",
"ssh_url": "git@github.com:jsmith/anothertest.git",
"clone_url": "https://github.com/jsmith/anothertest.git",
"svn_url": "https://github.com/jsmith/anothertest",
"homepage": null,
"size": 144,
"stargazers_count": 0,
"watchers_count": 0,
"language": null,
"has_issues": true,
"has_downloads": true,
"has_wiki": true,
"has_pages": false,
"forks_count": 0,
"mirror_url": null,
"open_issues_count": 0,
"forks": 0,
"open_issues": 0,
"watchers": 0,
"default_branch": "master",
"stargazers": 0,
"master_branch": "master"
},
"pusher": {
"name": "jsmith",
"email": "jsmith@users.noreply.github.com"
},
"sender": {
"login": "jsmith",
"id": 1234567,
"avatar_url": "https://avatars.githubusercontent.com/u/1234567?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/jsmith",
"html_url": "https://github.com/jsmith",
"followers_url": "https://api.github.com/users/jsmith/followers",
"following_url": "https://api.github.com/users/jsmith/following{/other_user}",
"gists_url": "https://api.github.com/users/jsmith/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jsmith/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jsmith/subscriptions",
"organizations_url": "https://api.github.com/users/jsmith/orgs",
"repos_url": "https://api.github.com/users/jsmith/repos",
"events_url": "https://api.github.com/users/jsmith/events{/privacy}",
"received_events_url": "https://api.github.com/users/jsmith/received_events",
"type": "User",
"site_admin": false
}
}

View file

@ -1,54 +0,0 @@
{
"object_kind": "push",
"before": "11fcaca195e8b17ca7e3dc47d9608d5b6b892f45",
"after": "fb88379ee45de28a0a4590fddcbd8eff8b36026e",
"ref": "refs/heads/master",
"checkout_sha": "fb88379ee45de28a0a4590fddcbd8eff8b36026e",
"message": null,
"user_id": 98765,
"user_name": "John Smith",
"user_email": "j@smith.com",
"project_id": 12344567,
"repository": {
"name": "somerepo",
"url": "git@gitlab.com:jsmith/somerepo.git",
"description": "",
"homepage": "https://gitlab.com/jsmith/somerepo",
"git_http_url": "https://gitlab.com/jsmith/somerepo.git",
"git_ssh_url": "git@gitlab.com:jsmith/somerepo.git",
"visibility_level": 20
},
"commits": [
{
"id": "fb88379ee45de28a0a4590fddcbd8eff8b36026e",
"message": "Fix link\n",
"timestamp": "2015-08-13T19:33:18+00:00",
"url": "https://gitlab.com/jsmith/somerepo/commit/fb88379ee45de28a0a4590fddcbd8eff8b36026e",
"author": {
"name": "Jane Smith",
"email": "jane@smith.com"
}
},
{
"id": "4ca166bc0b511f21fa331873f260f1a7cb38d723",
"message": "Do Some Cool Thing",
"timestamp": "2015-08-13T15:52:15+00:00",
"url": "https://gitlab.com/jsmith/somerepo/commit/4ca166bc0b511f21fa331873f260f1a7cb38d723",
"author": {
"name": "Jane Smith",
"email": "jane@smith.com"
}
},
{
"id": "11fcaca195e8b17ca7e3dc47d9608d5b6b892f45",
"message": "Merge another cool thing",
"timestamp": "2015-08-13T09:31:47+00:00",
"url": "https://gitlab.com/jsmith/somerepo/commit/11fcaca195e8b17ca7e3dc47d9608d5b6b892f45",
"author": {
"name": "Kate Smith",
"email": "kate@smith.com"
}
}
],
"total_commits_count": 3
}

Some files were not shown because too many files have changed in this diff Show more