diff --git a/.dockerignore b/.dockerignore index b4d35e3b9..79f748dae 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,4 +1,8 @@ +./ci/ conf/stack +conf/stack/** +conf/stack/config.yaml +config.yaml screenshots tools test/data/registry @@ -6,10 +10,26 @@ venv .git !.git/HEAD .gitignore +.github Bobfile README.md ROADMAP.md requirements-nover.txt run-local.sh .DS_Store -*.pyc +**/*.pyc +.tox +htmlcov +.coverage +coverage +.cache +.npm-debug.log +test/__pycache__ +__pycache__ +**/__pycache__ +static/build/** +.gitlab-ci/* +.gitlab-ci.* +docker-compose.yaml +test/dockerclients/** +node_modules diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000..d79c4a917 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,29 @@ + +### Description of Changes + +* details about the implementation of the changes +* motivation for the change (broken code, new feature, etc) +* contrast with previous behavior + + +#### Changes: + +* .. +* .. + +#### Issue: + + +**TESTING** -> + +**BREAKING CHANGE** -> + + +--- + +## Reviewer Checklist + +- [ ] It works! +- [ ] Comments provide sufficient explanations for the next contributor +- [ ] Tests cover changes and corner cases +- [ ] Follows Quay syntax patterns and format diff --git a/.gitignore b/.gitignore index ecd08a107..8b958631a 100644 --- a/.gitignore +++ b/.gitignore @@ -1,14 +1,30 @@ *.pyc venv -static/snapshots/ screenshots/screenshots/ -stack -grunt/node_modules +conf/stack +*/node_modules dist dest node_modules static/ldn static/fonts +static/build stack_local test/data/registry/ GIT_HEAD +.idea +.python-version +.pylintrc +.coverage +coverage +htmlcov +.tox +.cache +.npm-debug.log +Dockerfile-e +.vscode +*.iml +.DS_Store +.pytest_cache/* +test/dockerclients/Vagrantfile +test/dockerclients/.* diff --git a/.style.yapf b/.style.yapf new file mode 100644 index 000000000..c29ca6bdb --- /dev/null +++ b/.style.yapf @@ -0,0 +1,37 @@ +[style] +based_on_style = chromium +COLUMN_LIMIT=99 +INDENT_WIDTH=2 +BLANK_LINE_BEFORE_CLASS_DOCSTRING=False +#True +ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT=True +# False +ALLOW_MULTILINE_DICTIONARY_KEYS=True +# False +BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF=False +# False +COALESCE_BRACKETS=True +DEDENT_CLOSING_BRACKETS=False +CONTINUATION_INDENT_WIDTH=2 +# False +INDENT_DICTIONARY_VALUE=True +JOIN_MULTIPLE_LINES=False +# True +SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET=False +# True +SPLIT_BEFORE_NAMED_ASSIGNS=False +SPLIT_PENALTY_AFTER_OPENING_BRACKET=30 +SPLIT_PENALTY_AFTER_UNARY_OPERATOR=10000 +SPLIT_PENALTY_BEFORE_IF_EXPR=0 +SPLIT_PENALTY_BITWISE_OPERATOR=300 +SPLIT_PENALTY_EXCESS_CHARACTER=10000 +SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT=30 +SPLIT_PENALTY_IMPORT_NAMES=450 +SPLIT_PENALTY_LOGICAL_OPERATOR=300 +USE_TABS=False +SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED=False +# Align closing bracket with visual indentation. +align_closing_bracket_with_visual_indent=True + +# Allow lambdas to be formatted on more than one line. +allow_multiline_lambdas=True diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 000000000..da04e7fa7 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,103 @@ +--- +language: python +python: 2.7 + +sudo: required + +services: + - docker + +install: true + +branches: + only: + - master + +# Stop default database instances here to avoid port conflicts. +before_script: + - sudo service mysql stop + - sudo service postgresql stop + +# Clean the cache if any step fails. +before_cache: + - scripts/ci fail-clean + +cache: + timeout: 1000 + directories: + - $HOME/docker + +stages: + - build + - test + - clean + +# We should label the steps if Travis ever supports it: +# https://github.com/travis-ci/travis-ci/issues/5898 +jobs: + include: + - stage: build + name: Build + script: scripts/ci build + + # To further shard, change the script to shard_X_of_XS and add new steps + - stage: test + name: Unit tests (shard 1) + script: scripts/ci unit shard_1_of_2 + - stage: test + name: Unit tests (shard 2) + script: scripts/ci unit shard_2_of_2 + + - stage: test + name: Registry tests (shard 1) + script: scripts/ci registry shard_1_of_5 + - stage: test + name: Registry tests (shard 2) + script: scripts/ci registry shard_2_of_5 + - stage: test + name: Registry tests (shard 3) + script: scripts/ci registry shard_3_of_5 + - stage: test + name: Registry tests (shard 4) + script: scripts/ci registry shard_4_of_5 + - stage: test + name: Registry tests (shard 5) + script: scripts/ci registry shard_5_of_5 + + - stage: test + name: Legacy registry tests + script: scripts/ci registry_old + + - stage: test + name: Custom TLS certs test + script: scripts/ci certs_test + + - stage: test + name: Gunicorn worker test + script: scripts/ci gunicorn_test + + - stage: test + name: MySQL unit tests (shard 1) + script: scripts/ci mysql shard_1_of_2 + - stage: test + name: MySQL unit tests (shard 2) + script: scripts/ci mysql shard_2_of_2 + + - stage: test + name: Postgres unit tests (shard 1) + script: scripts/ci postgres shard_1_of_2 + - stage: test + name: Postgres unit tests (shard 2) + script: scripts/ci postgres shard_2_of_2 + + - stage: clean + name: Cleanup + script: scripts/ci clean + +notifications: + slack: + rooms: + - secure: "fBR3YMXaOkoX2Iz7oSJVAw9zrcDoqwadiMEWTWhx7Ic0zoM8IieD2EWIcDHAoGpqf3ixHkc1v/iLBpbWHgvK7TkrSrGEbFyEmu/uomuHU8oGTiazWCbMWg9T2mhWYFyVaKtt8bzMbFo8k72kYK/NWV8bR4W/Qe/opkH2GGzfhZA=" + on_success: change + on_failure: always + on_pull_requests: false diff --git a/Bobfile b/Bobfile deleted file mode 100644 index 80f4c39c7..000000000 --- a/Bobfile +++ /dev/null @@ -1,16 +0,0 @@ -version = 1 - -[docker] - build_opts = ["--rm"] - -[container_globals] - registry = "quay.io/quay" - skip_push = true - -[[container]] - name = "quay" - Dockerfile = "Dockerfile" - project = "quay" - tags = ["git:short"] - -# vim:ft=toml diff --git a/CHANGELOG.md b/CHANGELOG.md index 9d37db82f..34a9b1cac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,605 @@ +### v3.1.2 + +- Fixed: Repository mirroring properly updates status +- Fixed: Application repositories in public namespaces shown in UI +- Fixed: Description of log operations in UI +- Fixed: Quay V3 upgrade fails with "id field missing from v1Compatibility JSON" +- Fixed: Security token for storage proxy properly URL encoded + +### v3.1.1 + +- Fixed: Quoting of username/password for repository mirror +- Fixed: Changing next sync date in repository mirror UI +- Fixed: Enable cancel button in repository mirror UI + +### v3.1.0 + +- Added: New Repository Mirror functionality to continously synchronize repositories from external source registries into Quay +- Added: New Repository Mode setting (Normal, Mirrored, Read-Only) to indicate how a repository is updated +- Added: New Quay Setup Operator (Dev Preview) to automate configuring Quay on OpenShift +- Added: Support for using Red Hat OpenShift Container Storage 3 as a Quay storage backend +- Added: Support for using the Crunchy Data Operator to deploy Postgresql as Quay database +- Added: Ability to use build ARGS as first line in Dockerfiles in Quay builds +- Added: New Red Hat color scheme in Quay web UI +- Fixed: Display of repo_verb logs in logs panel +- Fixed: Ensure robot accounts being granted access actually belongs in same namespace +- Fixed: Numerous documentation improvements + +### v3.0.5 + +- Fixed: LDAP config error when user search results exceeds 1000 objects (#1736)[https://jira.coreos.com/browse/QUAY-1736] +- Fixed: Remove obsolete 01_copy_syslog_config.sh (#1768)[https://jira.coreos.com/browse/QUAY-1768) +- Fixed: Config tool fails to set up database when password string contains "$" (#1510)[https://jira.coreos.com/browse/QUAY-1510) +- Added: Config flag to disable TLSv1.0 support (#1726)[https://jira.coreos.com/browse/QUAY-1726] + +### v3.0.4 + +- Fixed: Package vulnerability notifications now shown in UI +- Fixed: Error deleting manifest after pushing new tag +- Fixed: Manifest now shown in UI for all types +- Fixed: CSRF rotation corrected +- Fixed: nginx access and error logs now to stdout + +### v3.0.3 + +- Fixed: Security scan notifications endpoint not working (part #2) (#3472) +- Fixed: Exception raised during parallel pushes of same manifest on Postgres (#3478) +- Fixed: Connection pooling was ignoring environment variable (#3480) +- Fixed: Exception when in OAuth approval flow (#3491) + +### v3.0.2 + +- Fixed: Configuration tool now operates in disconnected environments (#3468) +- Fixed: Security scan notifications endpoint not working (#3472) + +### v3.0.1 + +- Fixed: Instance health endpoint (`/health/instance`) (#3467) + +### v3.0.0 + +**IMPORTANT NOTE:** This release is a **major** release and has special upgrade instructions. Please see the upgrade instructions documentation. + +- Added: Full support for Docker Manifest Version 2, Schema 2, including support for manifest lists and Windows images +- Added: New, distinct configuration tool for Quay that can be run outside of Quay itself and perform in-place configuration changes +- Added: Disabling of V1 push support by default and support for whitelist-enabling specific namespaces for this legacy protocol (#3398) +- Added: Full support for blob mounting via the Docker protocol (#3057) +- Added: Have all registry operations be disabled if a namespace is disabled (#3091) +- Added: Allow syncing of team members from LDAP/Keystone groups, even if user creation is disabled (#3089) +- Added: Add a feature flag to allow username confirmation to be disabled (#3099) +- Added: New indexes which should result in significant database performance when accessing lists of tags +- Added: Add support for POST on OIDC endpoints, to support those providers that POST back (#3246) +- Added: Add support for configuration of the claims required for OIDC authentication (#3246) +- Added: Have the instance health check verify the disk space available to ensure it doesn’t run out and cause problems for nginx (#3241) +- Added: Support for basic auth on security scanner API endpoints (#3255) +- Added: Support for geo-blocking pulls in a namespace from a country (#3300) + +- Fixed: Ensure that starred public repositories appear in the starred repositories list (#3098) +- Fixed: Add rate limiting to the catalog endpoint (#3106) +- Fixed: Have the catalog endpoint return empty for a namespace if it is disabled (#3106) +- Fixed: Have user logs start writing to a new LogEntry3 table, which has a BigInteger ID column, to ensure no overflow +- Fixed: Improve loading of action logs to be less jumpy (#3299) +- Fixed: Ensure that all upload segments are deleted in Swift storage engine once no longer necessary (#3260) +- Fixed: Handling of unicode in manifests (#3325) +- Fixed: Unauthorized request handling under podman for public repositories when anonymous access is disabled (#3365) + +### v2.9.2 + +**IMPORTANT NOTE:** This release fixes a bug in which the deletion of namespaces did not result in the deletion of robot accounts under that namespace. While this is not a security issue (no permissions or credentials are leaked), it can appear unusual to users, so an upgrade is highly recommended. This change also includes a migration that cleans up the aforementioned robot accounts, so the migration step can take **several minutes**. Please plan accordingly. + +- Added: Support for custom query parameters on OIDC endpoints (#3050) +- Added: Configurable options for search page length and maximum number of pages (#3060) +- Added: Better messaging for when the maximum search page is reached (#3060) +- Added: Support for browser notifications (#3068) + +- Fixed: Robot accounts were not being immediately deleted under namespaces (#3071) +- Fixed: Setup under latest versions of Kubernetes (#3051) +- Fixed: Viewing of logs in repositories with many, many logs (#3082) +- Fixed: Filtering of deleting users and organizations in superuser panel (#3080) +- Fixed: Incorrect information displayed for builds triggered by deleted build triggers (#3078) +- Fixed: Robots could not be created with empty descriptions (#3073) +- Fixed: Inability to find Dockerfile in certain archives (#3072) +- Fixed: Display of empty tab in credentials dialog under certain circumstances (#3061) +- Fixed: Overflow of robot names when extremely long (#3062) +- Fixed: Respect CPU affinity when determining number of workers to run (#3064) +- Fixed: Breakage in RECATPCHA support (#3065) + +### v2.9.1 + +**IMPORTANT NOTE:** This release fixes the 2.9.0 migration. If you experienced an error during the 2.9.0 migration, manually rollback and then upgrade your quay instance to 2.9.1. + +- Fixed: Specify default server value for new integer fields added (#3052) +- Fixed: Overflow of repository grid UI (#3049) + +### v2.9.0 + +- Added: Automatic cleanup of expired external application tokens (#3002) +- Added: Make deletions of namespaces occur in the background (#3014) +- Added: Ability to disable build triggers (#2892) +- Added: Have repeatedly failing build triggers be automatically disabled (#2892) +- Added: Automatic caching of registry Blob data for faster pull operations (#3022) +- Added: Creation date/time, last usage date/time and other metadata for robot accounts (#3024) +- Added: Collaborators view under organizations, for viewing non-members (#3025) + +- Fixed: Make superusers APIs for users and organizations visible in the API browser (#3017) +- Fixed: Better messaging when attempting to create a team that already exists (#3006) +- Fixed: Prevent possible reflected text attacks by limiting API access (#2987) +- Fixed: Have checkable menus in UI respect filters (#3013) +- Fixed: Users being invited to a new organization must always be invited (#3029) +- Fixed: Removed all license requirements in Quay (#3031) +- Fixed: Squashed images with hard links pointing to deleted files no longer fail (#3032) +- Fixed: 500 error when trying to pull certain images via torrent (#3036) + +### v2.8.0 + +- Added: Support for Azure Blob Storage (#2902) +- Added: Ability to filter out disabled users in users list API (#2954) +- Added: Image ID in expanded tags view (#2965) +- Added: Processes auto-scale based on CPU count (#2971, 2978) +- Added: Health checks for all workers (#2977) +- Added: Health checks and auto-rotation for service keys (#2909) +- Added: Ability to back GitHub or Google login with LDAP/Keystone (#2983) +- Added: Configurable page size for Docker Registry V2 API pagination (#2993) + +- Fixed: Anonymous calls to API discovery endpoint (#2953) +- Fixed: Optimized creation of repositories +- Fixed: Optimized manifest pushing +- Fixed: LDAP password input is now password field (#2970) +- Fixed: 500 raised when sending an invalid release name for app repos (#2979) +- Fixed: Deletion of expired external app tokens (#2981) +- Fixed: Sizing of OIDC login buttons (#2990) +- Fixed: Hide build-related UI when builds are not enabled (#2991) +- Fixed: Incorrect caching of external application token expiration (#2996) +- Fixed: Warning bar should not be displayed for already expired application tokens (#3003) + +### v2.7.0 + +**NOTE:** This release *removes* support for the OIDC token internal authentication mechanism and replaces it with support for a new app-specific token system. All customers using the old OIDC token auth mechanism must change their configuration after updating manually in `config.yaml`. + +- Added: Support for external application tokens to be used on the Docker CLI (#2942) +- Added: Explore tab for browsing visible repositories (#2921) +- Added: Ability to view and copy full manifest SHAs in tags view (#2898) +- Added: Support for robot tokens in App Registry pushes and pulls (#2899) + +- Fixed: Failure when attempting to use Skopeo tool to access the registry (#2950) +- Fixed: Ordering of segments in Swift to match spec (#2920) +- Fixed: Squashed image downloading when using Postgres DB (#2930) +- Fixed: Hide "Start Build" button if the action is not allowed (#2916) +- Fixed: Exception when pushing certain labels with JSON-like contents (#2912) +- Fixed: Don't add password required notification for non-database auth (#2910) +- Fixed: Tags UI spacing on small displays (#2904) +- Fixed: Push updated notification now shows correct tags (#2897) +- Fixed: "Restart Container" button in superuser config panel (#2928) +- Fixed: Various small JavaScript security fixes + +### v2.6.2 + +- Fixed: Failure to register uploaded TLS certificates (#2946) + +### v2.6.1 + +- Added: Optimized overhead for direct downloads from Swift storage (#2889) +- Fixed: Immediately expire image builds that fail to start (#2887) +- Fixed: Failure to list all GitHub Enterprise namespaces (#2894) +- Fixed: Incorrect links to builds in notifications (#2895) +- Fixed: Failure to delete certain app repositories (#2893) +- Fixed: Inability to display Tag Signing status (#2890) +- Fixed: Broken health check for OIDC authentication (#2888) + +### v2.6.0 + +- Added: Ability to use OIDC token for CLI login (#2695) +- Added: Documentation for OIDC callback URLs in setup tool +- Added: Ability for users to change their family and given name and company info (#2870) +- Added: Support for invite-only user sign up (#2867) +- Added: Option to disable partial autocompletion of users (#2864) +- Added: Georeplication support in Swift storage (#2874) +- Fixed: Namespace links ending in slashes (#2871) +- Fixed: Contact info setup in setup tool (#2866) +- Fixed: Lazy loading of teams and robots (#2883) +- Fixed: OIDC auth headers (#2695) + +### v2.5.0 + +- Added: Better TLS caching (#2860) +- Added: Feature flag to allow read-only users to see build logs (#2850) +- Added: Feature flag to enable team sync setup when not a superuser (#2813) +- Added: Preferred public organizations list (#2850) +- Added: OIDC support for OIDC implementations without user info endpoint (#2817) +- Added: Support for tag expiration, in UI and view a special `quay.expires-after` label (#2718) +- Added: Health checks report failure reasons (#2636) +- Added: Enable database connection pooling (#2834) + +- Fixed: setting of team resync option +- Fixed: Purge repository on very large repositories + +### v2.4.0 + +- Added: Kubernetes Applications Support +- Added: Full-page search UI (#2529) +- Added: Always generate V2 manifests for tag operations in UI (#2608) +- Added: Option to enable public repositories in v2 catalog API (#2654) +- Added: Disable repository notifications after 3 failures (#2652) +- Added: Remove requirement for flash for copy button in UI (#2667) + +- Fixed: Upgrade support for Markdown (#2624) +- Fixed: Kubernetes secret generation with secrets with CAPITAL names (#2640) +- Fixed: Content-Length reporting on HEAD requests (#2616) +- Fixed: Use configured email address as the sender in email notifications (#2635) +- Fixed: Better peformance on permissions lookup (#2628) +- Fixed: Disable federated login for new users if user creation is disabled (#2623) +- Fixed: Show build logs timestamps by default (#2647) +- Fixed: Custom TLS certificates tooling in superuser panel under Kubernetes (#2646, #2663) +- Fixed: Disable debug logs in superuser panel when under multiple instances (#2663) +- Fixed: External Notification Modal UI bug (#2650) +- Fixed: Security worker thrashing when security scanner not available +- Fixed: Torrent validation in superuser config panel (#2694) +- Fixed: Expensive database call in build badges (#2688) + +### v2.3.4 + +- Added: Always show tag expiration options in superuser panel + +### v2.3.3 + +- Added: Prometheus metric for queued builds (#2596) + +- Fixed: Allow selection of Gitlab repository when Gitlab sends no permissions (#2601) +- Fixed: Failure when viewing Gitlab repository with unexpected schema (#2599) +- Fixed: LDAP stability fixes (#2598, #2584, #2595) +- Fixed: Viewing of repositories with trust enabled caused a 500 (#2594, #2593) +- Fixed: Failure in setup tool when time machine config is not set (#2589) + +### v2.3.2 + +- Added: Configuration of time machine in UI (#2516) + +- Fixed: Auth header in OIDC login UserInfo call (#2585) +- Fixed: Flash of red error box on loading (#2562) +- Fixed: Search under postgres (#2568) +- Fixed: Gitlab namespaces with null avatars (#2570) +- Fixed: Build log archiver race condition which results in missing logs (#2575) +- Fixed: Team synchronization when encountering a user with a shared email address (#2580) +- Fixed: Create New tooltip hiding dropdown menu (#2579) +- Fixed: Ensure build logs archive lookup URL checks build permissions (#2578) + +### v2.3.1 + +**IMPORTANT NOTE:** This release fixes the 2.3.0 migration. If you experienced an error during the 2.3.0 migration, manually rollback and then upgrade your quay instance to 2.3.1. +- Fixed: Specify default server value for new bool field added to the repository table + +### v2.3.0 + +- Added: LDAP Team Sync support (#2387, #2527) +- Added: Improved search performance through pre-computed scores (#2441, #2531, #2533, #2539) +- Added: Ability to allow pulls even if audit logging fails (#2306) +- Added: Full error information for build errors in Superuser panel (#2505) +- Added: Better error messages passed to the Docker client (#2499) +- Added: Custom git triggers can specify separate build context directory (#2517, #2509) +- Added: Improved performance on repository list API (#2542, #2544, #2546) + +- Fixed: Handle undefined case in build message (#2501) +- Fixed: OIDC configuration in Superuser panel (#2520) +- Fixed: Ability to invite team members by email address (#2522) +- Fixed: Avatars for non-owner namespaces in GitLab (#2507, #2532) +- Fixed: Update dependencies and remove warnings (#2518, #2511, #2535, #2545, #2553) +- Fixed: Remove link to blog (#2523) +- Fixed: Better handling for unavailable frontend dependencies (#2503) +- Fixed: Top level redirect logic for missing repositories (#2540) +- Fixed: Remove extra slash from missing base image permissions error in build logs (#2548) +- Fixed: Backfill replication script when adjusting replication destinations (#2555) +- Fixed: Errors when deleting repositories without security scanning enabled (#2554) + +### v2.2.0 + +**IMPORTANT NOTE:** This release contains a migration which adds a new feature to the build system. This requires shutting down the entire cluster _including builders_ and running one instance to migrate the database forward. You _must_ use a v2.2.0 builder with a v2.2.0 Quay cluster. + +- Added: Separate build contexts from Dockerfile locations (#2398, #2410, #2438, #2449, #2480, #2481) +- Added: Configuration and enforcement of maximum layer size (#2388) +- Added: OIDC configuration in the Super User Panel (#2393) +- Added: Batching of Security Scanner notifications (#2397) +- Added: Auth Failures now display messages on the docker client (#2428, #2474) +- Added: Redesigned Tags page to include Labels, Image ID Type, and more informative Security Scanner information (#2416) + +- Fixed: Parsing new docker client version format (#2378) +- Fixed: Improved repository search performance (#2392, #2440) +- Fixed: Miscellaneous Build Trigger page issues (#2405, #2406, #2407, #2408, #2409, #2414, #2418, #2445) +- Fixed: Remove all actionable CVEs from the docker image (#2422, #2468) +- Fixed: Minor bugs in Repository views (#2423, #2430, #2431) +- Fixed: Improve performance by deleting keys in redis rather than expiring (#2439) +- Fixed: Better error messages when configuring cloud storage (#2444) +- Fixed: Validation and installation of custom TLS certificates (#2473) +- Fixed: Garbage Collection corner case (#2404) + +### v2.1.0 + +**IMPORTANT NOTE FOR POSTGRES USERS:** This release contains a migration which adds full-text searching capabilities to Red Hat Quay. In order to support this feature, the migration will attempt to create the `pg_trgm` extension in the database. This operation requires **superuser access** to run and requires the extension to be installed. See https://coreos.com/quay-enterprise/docs/latest/postgres-additional-modules.html for more information on installing the extension. + +If the user given to Red Hat Quay is not a superuser, please temporarily grant superuser access to the Red Hat Quay user in the database (or change the user in config) **before** upgrading. + +- Added: Full text search support (#2272) +- Added: OIDC support (#2300, #2348) +- Added: API for lookup of security status of a manifest (#2334) +- Added: More descriptive logs (#2358) + +- Fixed: Datetime bug in logs view (#2318) +- Fixed: Display bug in logs view (#2345) +- Fixed: Display of expiration date for licenses with multiple entries (#2354) +- Fixed: V1 search compatibility (#2344) + +### v2.0.5 + +- Added: Build logs viewer in superuser panel + +- Fixed: Support for wildcard certs in the superuser config panel + +### v2.0.4 + +- Added: Expand allowed length of namespaces to be between 2 and 255 characters (#2291) +- Added: Better messaging for namespaces (#2283) +- Added: More customization of Message Of The Day (MOTD) (#2282) +- Added: Configurable and default timeout for LDAP (#2247) +- Added: Custom SSL certificate panel in superuser panel (#2271, #2274) +- Added: User and Organization list pagination on superuser panel (#2250) +- Added: Performance improvements for georeplication queuing (#2254) +- Added: Automatic garbage collection in security scanner (#2257) +- Added: RECAPTCHA support during create account flow (#2245) +- Added: Always display full git error in build logs (#2277) +- Added: Superuser config clarification warnings (#2279) +- Added: Performance improvements around queues (#2276, #2286, #2287) +- Added: Automatic retry for security scanning (#2242) +- Added: Better error messaging on security scanner lookup failure (#2235) +- Added: Ensure robot accounts show at top of entity autocomplete (#2243) + +- Fixed: Exception when autocompleting users in teams (#2255) +- Fixed: Port mapping in ACI conversion (#2251, #2273) +- Fixed: Error messaging for attempting to join a team with invalid email (#2240) +- Fixed: Prometheus metrics for scale (#2237) +- Fixed: Security scanner notification pagination (#2233, #2249) + +- Regressed: Support for wildcard certs in the superuser config panel + +### v2.0.3 + +- Added: Allow extra_ca_certs to be a folder or a file (#2180) + +- Fixed: Cancelling builds (#2203) +- Fixed: Allow license to be set in setup tool (#2200) +- Fixed: Improve queue performance (#2207, #2211) +- Fixed: Improve security scan performance (#2209) +- Fixed: Fix user lookup for external auth engines (#2206) + +### v2.0.2 + +- Added: Ability to cancel builds that are already building. (#2041, #2127, #2186, #2189, #2190) +- Added: Notifications when a build is canceled (#2173, #2184) +- Added: Remove deprecated email flag from generated `docker login` commands (#2146) +- Added: Upgrade nginx to v1.11.5 (#2140) +- Added: Improve performance of robots management UI (#2145) +- Added: Add data about specific manifest or tag pulled in audit logs (#2152) +- Added: Debug nginx logs from non-proxy protocol connection (#2167) +- Added: Accept multiple team invitations simultaneously (#2169) +- Added: Password recovery defaults to resetting password (#2170) +- Added: Gzip javascript and svg assets (#2171) +- Added: Add support for custom ports in RADOS and S3 storage engines (#2185) +- Added: Prometheus metric for number of unscanned images (#2183) + +- Fixed: Fix entity search under Postgres (regression in v2.0.0) (#2172) +- Fixed: Error displayed for OAuth if an existing token already matches scopes (#2139) +- Fixed: Reduce timeouts of the build manager when under heavy load (#2143, #2157) +- Fixed: Fix guage metrics on prometheus endpoint (#2153) +- Fixed: Disable CoreOS update-engine on ephemeral Kubernetes builders (#2159) +- Fixed: Fix notifications generated by the build manager (#2163) +- Fixed: JSON encoding for chunk cleanup in Swift storage engine (#2162) +- Fixed: Fix configuration validator when setting up storage engine (#2176) +- Fixed: Multiline message of the day to not cover the search box (#2181) + +- Regressed: User lookup for external auth engines broken + +### v2.0.1 + +- Added: A defined timeout on all HTTP calls in notification methods +- Added: Customized Build start timeouts and better debug logs +- Added: A warning bar when the license will become invalid in a week +- Added: Collection of user metadata: name and company +- Added: New Prometheus metrics +- Added: Support for temp usernames and an interstitial to confirm username +- Added: Missing parameter on RADOS storage +- Added: Stagger worker startup +- Added: Make email addresses optional in external auth if email feature is turned off +- Added: External auth emails to entity search +- Added: Banner bar message when license has expired or is invalid + +- Fixed: Make sure to check for user before redirecting in update user +- Fixed: 500 on get label endpoint and add a test +- Fixed: KeyError in Github trigger setup +- Fixed: Change LDAP errors into debug statements to reduce log clutter +- Fixed: Bugs due to conflicting operation names in the API +- Fixed: Cannot-use-robot for private base image bug in build dialog +- Fixed: Swift exception reporting on deletion and add async chunk cleanup +- Fixed: Logs view for dates that start in zero +- Fixed: Small JS error fixes +- Fixed: A bug with accessing the su config panel without a license +- Fixed: Buildcomponent: raise heartbeat timeout to 60s +- Fixed: KeyError in config when not present in BitBucket trigger +- Fixed: Namespace lookup in V1 registry search +- Fixed: Build notification ref filtering setup in UI +- Fixed: Entity search API to not IndexError +- Fixed: Remove setup and superuser routes when SUPER_USERS is not enabled +- Fixed: TypeError in Gitlab trigger when user not found + +- Regressed: Superuser config panel cannot save + +### v2.0.0 + +This release is a **required release** and must be run before attempting an upgrade to v2.0.0+. + +In order to upgrade to this version, your cluster must contain a valid license, which can be found and downloaded at: [tectonic.com](https://account.tectonic.com) + +- Added: Require valid license to enable registry actions (#2009, #2018) +- Added: The ability to delete users and organizations (#1698) +- Added: Add option to properly handle TLS terminated outside of the container (#1986) +- Added: Updated run trigger/build dialog (#1895) +- Added: Update dependencies to latest versions (#2012) +- Added: Ability to use dots and dashes in namespaces intended for use with newer Docker clients (#1852) +- Added: Changed dead queue item cleanup from 7 days to 1 day (#2019) +- Added: Add a default database timeout to prevent failed DB connections from hanging registry and API operations (#1764) + +- Fixed: Fix error if a vulnerability notification doesn't have a level filter (#1995) +- Fixed: Registry WWW-Authenticate and Link headers are now Registry API compliant (#2004) +- Fixed: Small fixes for Message of the Day feature (#2005, #2006) +- Fixed: Disallow underscores at the beginning of namespaces (#1852) +- Fixed: Installation tool liveness checks during container restarts (#2023) + +- Regressed: Entity search broken under Postgres + +### v1.18.0 + +- Added: Add message of the day (#1953) +- Added: Add repository list pagination (#1858) +- Added: Better 404 (and 403) pages (#1857) + +- Fixed: Always use absolute URLs in Location headers to fix blob uploads on nonstandard ports (#1957) +- Fixed: Improved reliability of several JS functions (#1959) (#1980) (#1981) +- Fixed: Handle unicode in entity search (#1939) +- Fixed: Fix tags API pagination (#1926) +- Fixed: Add configurable timeout and debug flags to Keystone users (#1867) +- Fixed: Build notifications were failing to fire (#1859) +- Fixed: Add feature flag to turn off requirement for team invitations (#1845) +- Fixed: Don't exception log for expected 404s in Swift storage (#1851) + +### v1.17.1 + +- Added: Repository admins can now invoke build triggers manually (#1822) +- Added: Improved notifications UI and features (#1839) +- Added: Improved UX for managing teams (#1509) + +- Fixed: Timeline's delete-then-tag display bug (#1824) +- Fixed: Add .well-known endpoint for Quay (#1790) +- Fixed: .tar.gz does not work when building from archive via web UI (#1832) +- Fixed: Delete empty Swift chunks (#1844) +- Fixed: Handling of custom LDAP cert (#1846) + +### v1.17.0 + +- Added: Added Labels API (#1631) +- Added: Kubernetes namespace existence check (#1771) +- Added: New UI and permissions handling for robots and teams (#1754, #1815) +- Added: Retry attempts to the S3-like storages (#1748, #1801, #1802) +- Added: Improved messaging when changing email addresses (#1735) +- Added: Emails now include logos (#1691) +- Added: Improved messaging around expired builds (#1681) + +- Fixed: Logs inside the container failing to rotate (#1812) +- Fixed: Filtering of repositories only visible to organization admins (#1795) +- Fixed: Invalid HTTP response when creating a duplicate tag (#1780) +- Fixed: Asynchronous Worker robustness (#1778, #1781) +- Fixed: Manual build failure when using Bitbucket triggers (#1767) +- Fixed: Missing "Sign Out" link on mobile UI (#1765) +- Fixed: Miscellaneous changes to title usage (#1763) +- Fixed: Repository star appearing when not logged in (#1758) +- Fixed: Invalid AppC manifests generated when missing an ENV (#1753) +- Fixed: Timezones now incorporated into audit logs (#1747) +- Fixed: Fixed redirection to specific tags using short URLs (#1743) +- Fixed: Broken pagination over only public repositories (#1724, #1726, #1730) +- Fixed: Invisible glyph icons on date selectors (#1717) +- Fixed: Possibility storage of duplicate images (#1706) +- Fixed: Broken "Your Account" links in emails (#1694) +- Fixed: Non-admin users no longer default to organization-wide read (#1685) +- Fixed: Database performance (#1680, #1688, #1690, #1722, #1744, #1772) + +### v1.16.6 + +- Added: Ability to override secure cookie setting when using HTTPS protocol (#1712) + +### v1.16.5 + +- Added: Better logging for delete issues in Swift (#1676) +- Added: Storage validation on /status endpoint (#1660) +- Added: Better logging for upload issues (#1639, #1670) +- Added: Support for Swift retries (#1638) +- Added: Support for Swift timeouts (#1634) +- Fixed: Pagination off-by-one issue in repository tags API (#1672) +- Fixed: Missing requires_cors on archived build logs URL (#1673) +- Fixed: Tutorial disconnect UI (#1657) +- Fixed: Enter key in password dialogs in Firefox (#1655) +- Fixed: Custom trigger links in UI (#1652) +- Fixed: GC database query optimizations (#1645, 1662) +- Fixed: Multipart refs on builds (#1651) +- Fixed: Invalid tags on builds (#1648) +- Fixed: Fresh login check failure (#1646) +- Fixed: Support for empty RDN in LDAP configuration (#1644) +- Fixed: Error raised on duplicate placements when replicating (#1633) + +### v1.16.4 + +- Added: Configuration of multiple RDNs for LDAP login (#1601) +- Added: Key Server health check (#1598) +- Added: Promtheus endpoint (#1596) +- Added: Upgrade to latest upstream PyGitHub (#1592) +- Fixed: Race condition around starting builds (#1621) +- Fixed: Geo-replication for CAS objects (#1608) +- Fixed: Popularity metrics on list repositories API endpoint (#1599) +- Fixed: Removed redundant namespaces from repository listings (#1595) +- Fixed: Internal error when paginating a PostgreSQL-backed Quay (#1593, #1622) +- Fixed: GitHub API URLs are properly stripped of trailing slashes (#1590) +- Fixed: Tutorial fails gracefully without Redis (#1587) + +### v1.16.3 + +- Added: Repository Activity Heatmap (#1569, #1571) +- Added: Restyled Robots View (#1568) +- Added: LDAP certificates specified by name (#1549) +- Added: Multiselect toggles for permissions (#1562) +- Added: Dynamically generated sitemap.txt (#1552) +- Fixed: Fixed URLs missing ports in setup process (#1583) +- Fixed: OAuth key not found error when setting up Dex (#1583) +- Fixed: Timestamps in syslog now display the proper time (#1579) +- Fixed: Added offset for clock skew in JWT expiration (#1578) +- Fixed: Replacement of illegal characters in usernames (#1565) +- Fixed: Differentiate between different tags on generated ACIs (#1523) +- Fixed: Decreased lifetime of various redis keys (#1561) +- Fixed: Build pages now robust to redis outage (#1560) +- Fixed: Validation of build arguments before contacting a build worker (#1557) +- Fixed: Removed hosted Quay.io status from Enterprise 500 page (#1548) +- Fixed: Performance of database queries (#1512) + +### v1.16.2 + +- Added: Ability for admins to "Take Ownership" of a namespace (#1526) +- Fixed: Encrypted Password Dialog can use External Auth Usernames (#1541) +- Fixed: Logging race condition in container startup (#1537) +- Fixed: Improved database performance on various pages (#1511, #1514) +- Fixed: The 'Return' key now works in password dialogs (#1533) +- Fixed: Repository descriptions breaking log page styles (#1532) +- Fixed: Styles on Privacy and Terms of Service pages (#1531) + +### v1.16.1 + +- Added: Registry JWT now uses Quay's Service Keys (#1498, #1527) +- Added: Upgrade to Ubuntu 16.04 LTS base image (#1496) +- Added: Storage Replication for Registry v2 images (#1502) +- Added: Better error messaging for build logs (#1500) +- Added: Granting of OAuth tokens for users via xAuth (#1457) +- Added: Random generation of key configuration values (#1485) +- Added: Upgrade to AngularJS v1.5 (#1473) +- Added: Swift API v3 storage support (#1472) +- Added: Clarification on various tool tip dialogs (#1468) +- Added: Various backend performance increases (#1459, #1493, #1510, #950) +- Added: New Credentials, Team, Robot Dialogs (#1421, #1455) +- Fixed: Pagination keys must be url-safe base64 encoded (#1485) +- Fixed: Sign In to work with more password managers (#1508) +- Fixed: Role deletion UI (#1491) +- Fixed: UI expansion when large HTML "pre" tags are used in markdown (#1489) +- Fixed: Usernames not properly linking with external auth providers (#1483) +- Fixed: Display of dates in action logs UI (#1486) +- Fixed: Selection bug with checkboxes in the setup process (#1458) +- Fixed: Display error with Sign In (#1466) +- Fixed: Race condition in ACI generation (#1463, #1490) +- Fixed: Incorrect calculation of the actions log archiver +- Fixed: Displaying many image tracks on the Repository tags page (#1451) +- Fixed: Handling of admin OAuth Scope (#1447) + ### v1.16.0 - Added: Unified dashboard for viewing vulnerabilities and packages (#268) @@ -121,7 +723,7 @@ ### v1.13.0 -- Added new Quay Enterprise rebranding (#723, #738, #735, #745, #746, #748, #747, #751) +- Added new Red Hat Quay rebranding (#723, #738, #735, #745, #746, #748, #747, #751) - Added a styled 404 page (#683) - Hid the run button from users that haven't created a trigger (#727) - Added timeouts to calls to GitLab, Bitbucket, GitHub APIs (#636, #633, #631, #722) diff --git a/Dockerfile b/Dockerfile index cb20b2746..36324c569 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,96 +1,128 @@ -# vim:ft=dockerfile +FROM centos:7 +LABEL maintainer "thomasmckay@redhat.com" -FROM phusion/baseimage:0.9.18 +ENV PYTHON_VERSION=2.7 \ + PATH=$HOME/.local/bin/:$PATH \ + PYTHONUNBUFFERED=1 \ + PYTHONIOENCODING=UTF-8 \ + LC_ALL=en_US.UTF-8 \ + LANG=en_US.UTF-8 \ + PIP_NO_CACHE_DIR=off -ENV DEBIAN_FRONTEND noninteractive -ENV HOME /root +ENV QUAYDIR /quay-registry +ENV QUAYCONF /quay-registry/conf +ENV QUAYPATH "." -# Install the dependencies. -RUN apt-get update # 07APR2016 +RUN mkdir $QUAYDIR +WORKDIR $QUAYDIR -# New ubuntu packages should be added as their own apt-get install lines below the existing install commands -RUN apt-get install -y git python-virtualenv python-dev libjpeg8 libjpeg62 libjpeg62-dev libevent-2.0.5 libevent-dev gdebi-core g++ libmagic1 phantomjs nodejs npm libldap-2.4-2 libldap2-dev libsasl2-modules libsasl2-dev libpq5 libpq-dev libfreetype6-dev libffi-dev libgpgme11 libgpgme11-dev +RUN INSTALL_PKGS="\ + python27 \ + python27-python-pip \ + rh-nginx112 rh-nginx112-nginx \ + openldap \ + scl-utils \ + gcc-c++ git \ + openldap-devel \ + gpgme-devel \ + dnsmasq \ + memcached \ + openssl \ + skopeo \ + " && \ + yum install -y yum-utils && \ + yum install -y epel-release centos-release-scl && \ + yum -y --setopt=tsflags=nodocs --setopt=skip_missing_names_on_install=False install $INSTALL_PKGS && \ + yum -y update && \ + yum -y clean all -# Build the python dependencies -ADD requirements.txt requirements.txt -RUN virtualenv --distribute venv -RUN venv/bin/pip install -r requirements.txt # 01MAR2016 -RUN venv/bin/pip freeze +COPY . . -# Install the binary dependencies -ADD binary_dependencies binary_dependencies -RUN gdebi --n binary_dependencies/*.deb +RUN scl enable python27 "\ + pip install --upgrade setuptools pip && \ + pip install -r requirements.txt --no-cache && \ + pip install -r requirements-tests.txt --no-cache && \ + pip freeze && \ + mkdir -p $QUAYDIR/static/webfonts && \ + mkdir -p $QUAYDIR/static/fonts && \ + mkdir -p $QUAYDIR/static/ldn && \ + PYTHONPATH=$QUAYPATH python -m external_libraries \ + " -# Install cfssl -RUN mkdir /gocode -ENV GOPATH /gocode -RUN curl -O https://storage.googleapis.com/golang/go1.6.linux-amd64.tar.gz && \ - tar -xvf go1.6.linux-amd64.tar.gz && \ - sudo mv go /usr/local && \ - rm -rf go1.6.linux-amd64.tar.gz && \ - /usr/local/go/bin/go get -u github.com/cloudflare/cfssl/cmd/cfssl && \ - /usr/local/go/bin/go get -u github.com/cloudflare/cfssl/cmd/cfssljson && \ - sudo cp /gocode/bin/cfssljson /bin/cfssljson && \ - sudo cp /gocode/bin/cfssl /bin/cfssl && \ - sudo rm -rf /gocode && sudo rm -rf /usr/local/go +RUN cp -r $QUAYDIR/static/ldn $QUAYDIR/config_app/static/ldn && \ + cp -r $QUAYDIR/static/fonts $QUAYDIR/config_app/static/fonts && \ + cp -r $QUAYDIR/static/webfonts $QUAYDIR/config_app/static/webfonts -# Install jwtproxy -RUN curl -L -o /usr/local/bin/jwtproxy https://github.com/coreos/jwtproxy/releases/download/v0.0.1/jwtproxy-linux-x64 -RUN chmod +x /usr/local/bin/jwtproxy +# Check python dependencies for GPL +# Due to the following bug, pip results must be piped to a file before grepping: +# https://github.com/pypa/pip/pull/3304 +# 'docutils' is a setup dependency of botocore required by s3transfer. It's under +# GPLv3, and so is manually removed. +RUN rm -Rf /opt/rh/python27/root/usr/lib/python2.7/site-packages/docutils && \ + scl enable python27 "pip freeze" | grep -v '^-e' | awk -F == '{print $1}' | grep -v docutils > piplist.txt && \ + scl enable python27 "xargs -a piplist.txt pip --disable-pip-version-check show" > pipinfo.txt && \ + test -z "$(cat pipinfo.txt | grep GPL | grep -v LGPL)" && \ + rm -f piplist.txt pipinfo.txt -# Install Grunt -RUN ln -s /usr/bin/nodejs /usr/bin/node -RUN npm install -g grunt-cli +# # Front-end +RUN curl --silent --location https://rpm.nodesource.com/setup_8.x | bash - && \ + yum install -y nodejs && \ + curl --silent --location https://dl.yarnpkg.com/rpm/yarn.repo | tee /etc/yum.repos.d/yarn.repo && \ + rpm --import https://dl.yarnpkg.com/rpm/pubkey.gpg && \ + yum install -y yarn && \ + yarn install --ignore-engines && \ + yarn build && \ + yarn build-config-app -# Install Grunt depenencies -ADD grunt grunt -RUN cd grunt && npm install +# TODO: Build jwtproxy in dist-git +# https://jira.coreos.com/browse/QUAY-1315 +RUN curl -fsSL -o /usr/local/bin/jwtproxy https://github.com/coreos/jwtproxy/releases/download/v0.0.3/jwtproxy-linux-x64 && \ + chmod +x /usr/local/bin/jwtproxy -# Run grunt -ADD static static -RUN cd grunt && grunt +# TODO: Build prometheus-aggregator in dist-git +# https://jira.coreos.com/browse/QUAY-1324 +RUN curl -fsSL -o /usr/local/bin/prometheus-aggregator https://github.com/coreos/prometheus-aggregator/releases/download/v0.0.1-alpha/prometheus-aggregator &&\ + chmod +x /usr/local/bin/prometheus-aggregator -RUN apt-get remove -y --auto-remove python-dev g++ libjpeg62-dev libevent-dev libldap2-dev libsasl2-dev libpq-dev libffi-dev libgpgme11-dev nodejs npm -RUN apt-get autoremove -y -RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* -RUN rm -rf grunt +# Update local copy of AWS IP Ranges. +RUN curl -fsSL https://ip-ranges.amazonaws.com/ip-ranges.json -o util/ipresolver/aws-ip-ranges.json -ADD conf/init/copy_config_files.sh /etc/my_init.d/ -ADD conf/init/doupdatelimits.sh /etc/my_init.d/ -ADD conf/init/copy_syslog_config.sh /etc/my_init.d/ -ADD conf/init/create_certs.sh /etc/my_init.d/ -ADD conf/init/runmigration.sh /etc/my_init.d/ -ADD conf/init/syslog-ng.conf /etc/syslog-ng/ -ADD conf/init/zz_boot.sh /etc/my_init.d/ +RUN ln -s $QUAYCONF /conf && \ + mkdir /var/log/nginx && \ + ln -sf /dev/stdout /var/log/nginx/access.log && \ + ln -sf /dev/stdout /var/log/nginx/error.log && \ + chmod -R a+rwx /var/log/nginx -ADD conf/init/service/ /etc/service/ +# Cleanup +RUN UNINSTALL_PKGS="\ + gcc-c++ \ + openldap-devel \ + gpgme-devel \ + optipng \ + kernel-headers \ + " && \ + yum remove -y $UNINSTALL_PKGS && \ + yum clean all && \ + rm -rf /var/cache/yum /tmp/* /var/tmp/* /root/.cache -RUN rm -rf /etc/service/syslog-forwarder +EXPOSE 8080 8443 7443 -# Download any external libs. -RUN mkdir static/fonts static/ldn -ADD external_libraries.py external_libraries.py -RUN venv/bin/python -m external_libraries -RUN mkdir /usr/local/nginx/logs/ +RUN chgrp -R 0 $QUAYDIR && \ + chmod -R g=u $QUAYDIR -# TODO(ssewell): only works on a detached head, make work with ref -ADD .git/HEAD GIT_HEAD +RUN mkdir /datastorage && chgrp 0 /datastorage && chmod g=u /datastorage && \ + mkdir -p /var/log/nginx && chgrp 0 /var/log/nginx && chmod g=u /var/log/nginx && \ + mkdir -p /conf/stack && chgrp 0 /conf/stack && chmod g=u /conf/stack && \ + mkdir -p /tmp && chgrp 0 /tmp && chmod g=u /tmp && \ + chmod g=u /etc/passwd -# Add all of the files! -ADD . . +RUN chgrp 0 /var/opt/rh/rh-nginx112/log/nginx && chmod g=u /var/opt/rh/rh-nginx112/log/nginx -# Run the tests -ARG RUN_TESTS=true -ENV RUN_TESTS ${RUN_TESTS} +VOLUME ["/var/log", "/datastorage", "/tmp", "/conf/stack"] -RUN if [ "$RUN_TESTS" = true ]; then \ - TEST=true venv/bin/python -m unittest discover -f; \ - fi -RUN if [ "$RUN_TESTS" = true ]; then \ - TEST=true venv/bin/python -m test.registry_tests -f; \ - fi -RUN PYTHONPATH=. venv/bin/alembic heads | grep -E '^[0-9a-f]+ \(head\)$' > ALEMBIC_HEAD +ENTRYPOINT ["/quay-registry/quay-entrypoint.sh"] +CMD ["registry"] -VOLUME ["/conf/stack", "/var/log", "/datastorage", "/tmp", "/conf/etcd"] - -EXPOSE 443 8443 80 +# root required to create and install certs +# https://jira.coreos.com/browse/QUAY-1468 +# USER 1001 diff --git a/Dockerfile.cirun b/Dockerfile.cirun new file mode 100644 index 000000000..fb906dce8 --- /dev/null +++ b/Dockerfile.cirun @@ -0,0 +1,8 @@ +FROM quay-ci-base +RUN mkdir -p conf/stack +RUN rm -rf test/data/test.db +ENV ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE remove-old-fields +ADD cirun.config.yaml conf/stack/config.yaml +RUN /usr/bin/scl enable python27 rh-nginx112 "LOGGING_LEVEL=INFO python initdb.py" +ENTRYPOINT ["/quay-registry/quay-entrypoint.sh"] +CMD ["registry"] diff --git a/Dockerfile.dev b/Dockerfile.dev new file mode 100644 index 000000000..03c97ce1a --- /dev/null +++ b/Dockerfile.dev @@ -0,0 +1,19 @@ +# -*- mode: dockerfile -*- +# vi: set ft=dockerfile : + +FROM quay.io/quay/quay-base:latest + +WORKDIR $QUAYDIR + +COPY requirements.txt requirements-tests.txt ./ + +# Put the virtualenv outside the source directory. This lets us mount +# the Quay source as a volume for local development. +RUN virtualenv --distribute /venv \ + && /venv/bin/pip install -r requirements.txt \ + && /venv/bin/pip install -r requirements-tests.txt \ + && /venv/bin/pip freeze + +ENV PATH /venv/bin:${PATH} + +RUN ln -s $QUAYCONF /conf diff --git a/Dockerfile.osbs b/Dockerfile.osbs new file mode 100644 index 000000000..1450312df --- /dev/null +++ b/Dockerfile.osbs @@ -0,0 +1,142 @@ +FROM registry.redhat.io/rhel7:7.7 +LABEL maintainer "thomasmckay@redhat.com" + +ENV PYTHON_VERSION=2.7 \ + PATH=$HOME/.local/bin/:$PATH \ + PYTHONUNBUFFERED=1 \ + PYTHONIOENCODING=UTF-8 \ + LC_ALL=en_US.UTF-8 \ + LANG=en_US.UTF-8 \ + PIP_NO_CACHE_DIR=off + +ENV QUAYDIR /quay-registry +ENV QUAYCONF /quay-registry/conf +ENV QUAYPATH "." + +RUN mkdir $QUAYDIR +WORKDIR $QUAYDIR + +RUN INSTALL_PKGS="\ + python27 \ + python27-python-pip \ + rh-nginx112 rh-nginx112-nginx \ + openldap \ + scl-utils \ + gcc-c++ git \ + openldap-devel \ + gpgme-devel \ + dnsmasq \ + memcached \ + openssl \ + skopeo \ + " && \ + yum install -y yum-utils && \ + yum-config-manager --quiet --disable "*" >/dev/null && \ + yum-config-manager --quiet --enable \ + rhel-7-server-rpms \ + rhel-server-rhscl-7-rpms \ + rhel-7-server-optional-rpms \ + rhel-7-server-extras-rpms \ + --save >/dev/null && \ + yum -y --setopt=tsflags=nodocs --setopt=skip_missing_names_on_install=False install $INSTALL_PKGS && \ + yum -y update && \ + yum -y clean all + +COPY . . + +RUN scl enable python27 "\ + pip install --upgrade setuptools pip && \ + pip install -r requirements.txt --no-cache && \ + pip freeze && \ + mkdir -p $QUAYDIR/static/webfonts && \ + mkdir -p $QUAYDIR/static/fonts && \ + mkdir -p $QUAYDIR/static/ldn && \ + PYTHONPATH=$QUAYPATH python -m external_libraries \ + " + +RUN cp -r $QUAYDIR/static/ldn $QUAYDIR/config_app/static/ldn && \ + cp -r $QUAYDIR/static/fonts $QUAYDIR/config_app/static/fonts && \ + cp -r $QUAYDIR/static/webfonts $QUAYDIR/config_app/static/webfonts + +# Check python dependencies for GPL +# Due to the following bug, pip results must be piped to a file before grepping: +# https://github.com/pypa/pip/pull/3304 +# 'docutils' is a setup dependency of botocore required by s3transfer. It's under +# GPLv3, and so is manually removed. +RUN rm -Rf /opt/rh/python27/root/usr/lib/python2.7/site-packages/docutils && \ + scl enable python27 "pip freeze" | grep -v '^-e' | awk -F == '{print $1}' | grep -v docutils > piplist.txt && \ + scl enable python27 "xargs -a piplist.txt pip --disable-pip-version-check show" > pipinfo.txt && \ + test -z "$(cat pipinfo.txt | grep GPL | grep -v LGPL)" && \ + rm -f piplist.txt pipinfo.txt + +# Front-end +RUN curl --silent --location https://rpm.nodesource.com/setup_8.x | bash - && \ + yum install -y nodejs && \ + curl --silent --location https://dl.yarnpkg.com/rpm/yarn.repo | tee /etc/yum.repos.d/yarn.repo && \ + rpm --import https://dl.yarnpkg.com/rpm/pubkey.gpg && \ + yum install -y yarn && \ + yarn install --ignore-engines && \ + yarn build && \ + yarn build-config-app + +# TODO: Build jwtproxy in dist-git +# https://jira.coreos.com/browse/QUAY-1315 +RUN curl -fsSL -o /usr/local/bin/jwtproxy https://github.com/coreos/jwtproxy/releases/download/v0.0.3/jwtproxy-linux-x64 && \ + chmod +x /usr/local/bin/jwtproxy + +# TODO: Build prometheus-aggregator in dist-git +# https://jira.coreos.com/browse/QUAY-1324 +RUN curl -fsSL -o /usr/local/bin/prometheus-aggregator https://github.com/coreos/prometheus-aggregator/releases/download/v0.0.1-alpha/prometheus-aggregator &&\ + chmod +x /usr/local/bin/prometheus-aggregator + +# Update local copy of AWS IP Ranges. +RUN curl -fsSL https://ip-ranges.amazonaws.com/ip-ranges.json -o util/ipresolver/aws-ip-ranges.json + +RUN ln -s $QUAYCONF /conf && \ + mkdir /var/log/nginx && \ + ln -sf /dev/stdout /var/log/nginx/access.log && \ + ln -sf /dev/stdout /var/log/nginx/error.log && \ + chmod -R a+rwx /var/log/nginx + +# Cleanup +RUN UNINSTALL_PKGS="\ + gcc-c++ git \ + openldap-devel \ + gpgme-devel \ + optipng \ + kernel-headers \ + " && \ + yum remove -y $UNINSTALL_PKGS && \ + yum clean all && \ + rm -rf /var/cache/yum /tmp/* /var/tmp/* /root/.cache + +EXPOSE 8080 8443 7443 + +RUN chgrp -R 0 $QUAYDIR && \ + chmod -R g=u $QUAYDIR + +RUN mkdir /datastorage && chgrp 0 /datastorage && chmod g=u /datastorage && \ + mkdir -p /var/log/nginx && chgrp 0 /var/log/nginx && chmod g=u /var/log/nginx && \ + mkdir -p /conf/stack && chgrp 0 /conf/stack && chmod g=u /conf/stack && \ + mkdir -p /tmp && chgrp 0 /tmp && chmod g=u /tmp && \ + chmod g=u /etc/passwd + +RUN chgrp 0 /var/opt/rh/rh-nginx112/log/nginx && chmod g=u /var/opt/rh/rh-nginx112/log/nginx + +# Allow TLS certs to be created and installed as non-root user +RUN chgrp -R 0 /etc/pki/ca-trust/extracted && \ + chmod -R g=u /etc/pki/ca-trust/extracted && \ + chgrp -R 0 /etc/pki/ca-trust/source/anchors && \ + chmod -R g=u /etc/pki/ca-trust/source/anchors && \ + chgrp -R 0 /opt/rh/python27/root/usr/lib/python2.7/site-packages/requests && \ + chmod -R g=u /opt/rh/python27/root/usr/lib/python2.7/site-packages/requests && \ + chgrp -R 0 /opt/rh/python27/root/usr/lib/python2.7/site-packages/certifi && \ + chmod -R g=u /opt/rh/python27/root/usr/lib/python2.7/site-packages/certifi + +VOLUME ["/var/log", "/datastorage", "/tmp", "/conf/stack"] + +USER 1001 + +ENTRYPOINT ["/quay-registry/quay-entrypoint.sh"] +CMD ["registry"] + diff --git a/Dockerfile.rhel7 b/Dockerfile.rhel7 new file mode 100644 index 000000000..c52822462 --- /dev/null +++ b/Dockerfile.rhel7 @@ -0,0 +1,133 @@ +FROM registry.redhat.io/rhel7:7.7 +LABEL maintainer "thomasmckay@redhat.com" + +ENV PYTHON_VERSION=2.7 \ + PATH=$HOME/.local/bin/:$PATH \ + PYTHONUNBUFFERED=1 \ + PYTHONIOENCODING=UTF-8 \ + LC_ALL=en_US.UTF-8 \ + LANG=en_US.UTF-8 \ + PIP_NO_CACHE_DIR=off + +ENV QUAYDIR /quay-registry +ENV QUAYCONF /quay-registry/conf +ENV QUAYPATH "." + +RUN mkdir $QUAYDIR +WORKDIR $QUAYDIR + +RUN INSTALL_PKGS="\ + python27 \ + python27-python-pip \ + rh-nginx112 rh-nginx112-nginx \ + openldap \ + scl-utils \ + gcc-c++ git \ + openldap-devel \ + gpgme-devel \ + dnsmasq \ + memcached \ + openssl \ + skopeo \ + " && \ + yum install -y yum-utils && \ + yum-config-manager --quiet --disable "*" >/dev/null && \ + yum-config-manager --quiet --enable \ + rhel-7-server-rpms \ + rhel-server-rhscl-7-rpms \ + rhel-7-server-optional-rpms \ + rhel-7-server-extras-rpms \ + --save >/dev/null && \ + yum -y --setopt=tsflags=nodocs --setopt=skip_missing_names_on_install=False install $INSTALL_PKGS && \ + yum -y update && \ + yum -y clean all + +COPY . . + +RUN scl enable python27 "\ + pip install --upgrade setuptools pip && \ + pip install -r requirements.txt --no-cache && \ + pip freeze && \ + mkdir -p $QUAYDIR/static/webfonts && \ + mkdir -p $QUAYDIR/static/fonts && \ + mkdir -p $QUAYDIR/static/ldn && \ + PYTHONPATH=$QUAYPATH python -m external_libraries \ + " + +RUN cp -r $QUAYDIR/static/ldn $QUAYDIR/config_app/static/ldn && \ + cp -r $QUAYDIR/static/fonts $QUAYDIR/config_app/static/fonts && \ + cp -r $QUAYDIR/static/webfonts $QUAYDIR/config_app/static/webfonts + +# Check python dependencies for GPL +# Due to the following bug, pip results must be piped to a file before grepping: +# https://github.com/pypa/pip/pull/3304 +# 'docutils' is a setup dependency of botocore required by s3transfer. It's under +# GPLv3, and so is manually removed. +RUN rm -Rf /opt/rh/python27/root/usr/lib/python2.7/site-packages/docutils && \ + scl enable python27 "pip freeze" | grep -v '^-e' | awk -F == '{print $1}' | grep -v docutils > piplist.txt && \ + scl enable python27 "xargs -a piplist.txt pip --disable-pip-version-check show" > pipinfo.txt && \ + test -z "$(cat pipinfo.txt | grep GPL | grep -v LGPL)" && \ + rm -f piplist.txt pipinfo.txt + +# Front-end +RUN curl --silent --location https://rpm.nodesource.com/setup_8.x | bash - && \ + yum install -y nodejs && \ + curl --silent --location https://dl.yarnpkg.com/rpm/yarn.repo | tee /etc/yum.repos.d/yarn.repo && \ + rpm --import https://dl.yarnpkg.com/rpm/pubkey.gpg && \ + yum install -y yarn && \ + yarn install --ignore-engines && \ + yarn build && \ + yarn build-config-app + +# TODO: Build jwtproxy in dist-git +# https://jira.coreos.com/browse/QUAY-1315 +RUN curl -fsSL -o /usr/local/bin/jwtproxy https://github.com/coreos/jwtproxy/releases/download/v0.0.3/jwtproxy-linux-x64 && \ + chmod +x /usr/local/bin/jwtproxy + +# TODO: Build prometheus-aggregator in dist-git +# https://jira.coreos.com/browse/QUAY-1324 +RUN curl -fsSL -o /usr/local/bin/prometheus-aggregator https://github.com/coreos/prometheus-aggregator/releases/download/v0.0.1-alpha/prometheus-aggregator &&\ + chmod +x /usr/local/bin/prometheus-aggregator + +# Update local copy of AWS IP Ranges. +RUN curl -fsSL https://ip-ranges.amazonaws.com/ip-ranges.json -o util/ipresolver/aws-ip-ranges.json + +RUN ln -s $QUAYCONF /conf && \ + mkdir /var/log/nginx && \ + ln -sf /dev/stdout /var/log/nginx/access.log && \ + ln -sf /dev/stdout /var/log/nginx/error.log && \ + chmod -R a+rwx /var/log/nginx + +# Cleanup +RUN UNINSTALL_PKGS="\ + gcc-c++ git \ + openldap-devel \ + gpgme-devel \ + optipng \ + kernel-headers \ + " && \ + yum remove -y $UNINSTALL_PKGS && \ + yum clean all && \ + rm -rf /var/cache/yum /tmp/* /var/tmp/* /root/.cache + +EXPOSE 8080 8443 7443 + +RUN chgrp -R 0 $QUAYDIR && \ + chmod -R g=u $QUAYDIR + +RUN mkdir /datastorage && chgrp 0 /datastorage && chmod g=u /datastorage && \ + mkdir -p /var/log/nginx && chgrp 0 /var/log/nginx && chmod g=u /var/log/nginx && \ + mkdir -p /conf/stack && chgrp 0 /conf/stack && chmod g=u /conf/stack && \ + mkdir -p /tmp && chgrp 0 /tmp && chmod g=u /tmp && \ + chmod g=u /etc/passwd + +RUN chgrp 0 /var/opt/rh/rh-nginx112/log/nginx && chmod g=u /var/opt/rh/rh-nginx112/log/nginx + +VOLUME ["/var/log", "/datastorage", "/tmp", "/conf/stack"] + +ENTRYPOINT ["/quay-registry/quay-entrypoint.sh"] +CMD ["registry"] + +# root required to create and install certs +# https://jira.coreos.com/browse/QUAY-1468 +# USER 1001 diff --git a/GOVERNANCE.md b/GOVERNANCE.md new file mode 100644 index 000000000..b4368c97e --- /dev/null +++ b/GOVERNANCE.md @@ -0,0 +1,66 @@ +# Project Quay Governance + +Project Quay is run according to the guidelines specified below. This is a living document and is expected to evolve along with Project Quay itself. + +## Principles + +Project Quay strives to follow these principles at all times: +* Openness - Quay evolves and improves out in the open, with transparent work and decision making that is clear and well understood. +* Respectfulness - Quay is a project for a diverse community where different points of view are welcomed. Healthy and respectful discussions help us meet our goals and deliver a better end product. +* Meritocracy - In the Quay community all ideas are heard but only the best ideas help drive the project forward. As an open, respectful community we will judge all ideas on their technical merit and alignment with Quay's design principles. +* Accountability - The Quay community is accountable + * to our users to deliver the best software possible + * to the project to ensure each Contributor and Maintainer carries out their duties to the best of their abilities + * to itself to ensure the Quay remains a project where indviduals can be passionate about contributing their time and energy + +## Maintainers + +Maintainers play a special role to ensure that contributions align with the expected quality, consistency and long term vision for Project Quay. Each Maintainer is vital to the success of Project Quay and has decided to make the commitment to that cause. Being a Maintainer is difficult work and not for everyone. Therefore Project Quay will have a small group of Maintainers- as many as deemed necessary to handle the pipeline of contributions being made to the project. + +### Becoming a Maintainer + +Each Maintainer must also be a Contributor. Candidates for the Maintainer role are individuals who have made recent, substantial and recurring contributions to the project. The existing Maintainers will periodically identify Contributors and make recommendations to the community that those individuals become Maintainers. The Maintainers will then vote on the candidate and if so agreed the candidate will be invited to raise a PR to add their name into the MAINTAINERS.md file. Approval of that PR signals the Contributor is now a Maintainer. + +### Responsibilities of a Maintainer + +Project Quay's success depends on how well Maintainers perform their duties. Maintainers are responsible to monitor Slack and e-mail lists, help triage issues on the Project Quay JIRA board, review PRs and ensure responses are being provided to Contributors, assist with regular Project Quay releases. If Contributors are the lifeblood of an open source community, the Maintainers act as the heart, hands, eyes and ears, helping to keep the project moving and viable. + +### Stepping Down as a Maintainer + +A Maintainer may decide they are no longer interested in or able to carry out the role. In such a situation the Maintainer should notify the other Maintainers of their intentions to step down and help identify a replacement from existing Contributors. Ideally the outgoing Maintainer will ensure that any outstanding work has been transitioned to another Maintainer. To carry out the actual removal the outgoing Maintainer raises a PR against MAINTAINERS.md file to remove their name. + +## Contributors + +Anyone can be a Contributor to Project Quay. No special approval is required- simply go through our Getting Started guide, fork one of our repositories and submit a PR. All types of conributions will be welcome, whether it is through bug reports via JIRA, code, or documentation. + +## Sub-Projects + +Project Quay will be primarily focused on the delivery of Quay itself but also contains various sub-projects such as Clair and Quay-Builders. Each sub-project must have their own dedicated repositories containing a MAINTAINERS.md file. Each sub-project will abide by this Governance model. + +Requests for new sub-projects under Project Quay should be raised to the Maintainers. + +## Code of Conduct + +Project Quay abides by the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). + +## How Decisons Are Made + +Most of the decison making for Project Quay will happen through the regular PR approval process. We stand by the notion that what exists in the Project Quay repositories are the end result of countless community-driven decisions. + +When a more complex decision is required, for example a technical issue related to a PR, it is expected that involved parties will resolve the dispute in a respectful and efficent manner. If the dispute cannot be resolved between the involved parties then the Maintainers will review the dispute and come to an agreement via majority vote amongst themselves. All decision making should be tracked via a JIRA issue and performed transparently via the Project Quay communications channels. + +## Project Quay Releases + +On a regular basis, Project Quay will issue a release. The release cadence will not be strictly defined but should happen approximately every 3 months. Maintainers will be part of a rotating "Release Nanny" role whereby each Maintainer shares the responsibility of creating a Quay release. + +Release duties include: +* Creating the Release Notes +* Verifying the automated tests have passed +* Building the necessary Quay, Clair-JWT, and Quay-Builder container images +* Publishing the container images to quay.io +* Updating the github release pages +* Notifying the community of the new release + +## DCO and Licenses + +Project Quay uses the [Apache 2.0](https://opensource.org/licenses/Apache-2.0) license. \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..5c304d1a4 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/MAINTAINERS b/MAINTAINERS deleted file mode 100644 index acb8578ca..000000000 --- a/MAINTAINERS +++ /dev/null @@ -1,3 +0,0 @@ -Joseph Schorr (@josephschorr) -Jimmy Zelinskie (@jzelinskie) -Jake Moshenko (@jakedt) diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..e4e4e7f95 --- /dev/null +++ b/Makefile @@ -0,0 +1,180 @@ +SHELL := /bin/bash + +export PATH := ./venv/bin:$(PATH) + +SHA := $(shell git rev-parse --short HEAD ) +REPO := quay.io/quay/quay +TAG := $(REPO):$(SHA) + +MODIFIED_FILES_COUNT = $(shell git diff --name-only origin/master | grep -E .+\.py$ | wc -l) +GIT_MERGE_BASED = $(shell git merge-base origin/master HEAD) +MODIFIED_FILES = $(shell git diff --name-only $(GIT_MERGE_BASED) | grep -E .+\.py$ | paste -sd ' ') + +show-modified: + echo $(MODIFIED_FILES) + +.PHONY: all unit-test registry-test registry-test-old buildman-test test pkgs build run clean + +all: clean pkgs test build + +pkgs: requirements.txt requirements-dev.txt requirements-tests.txt + pip install -r $< + +requirements.txt: requirements-nover.txt + # Create a new virtualenv and activate it + pyenv virtualenv 2.7.12 quay-deps + pyenv activate quay-deps + + # Install unversioned dependencies with your changes + pip install -r requirements-nover.txt + + # Run the unit test suite + $(MAKE) unit + + # Freeze the versions of all of the dependencies + pip freeze > requirements.txt + + # Delete the virtualenv + pyenv uninstall quay-deps + +QUAY_CONFIG ?= ../quay-config +conf/stack/license: $(QUAY_CONFIG)/local/license + mkdir -p conf/stack + ln -s $(QUAY_CONFIG)/local/license conf/stack/license + +unit-test: + ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields TEST=true PYTHONPATH="." py.test \ + --cov="." --cov-report=html --cov-report=term-missing \ + --timeout=3600 --verbose -x \ + ./ + +registry-test: + TEST=true ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields PYTHONPATH="." py.test \ + --cov="." --cov-report=html --cov-report=term-missing \ + --timeout=3600 --verbose --show-count -x \ + test/registry/registry_tests.py + +registry-test-old: + TEST=true PYTHONPATH="." ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields py.test \ + --cov="." --cov-report=html --cov-report=term-missing \ + --timeout=3600 --verbose --show-count -x \ + ./test/registry_tests.py + +buildman-test: + TEST=true PYTHONPATH="." ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields py.test \ + --cov="." --cov-report=html --cov-report=term-missing \ + --timeout=3600 --verbose --show-count -x \ + ./buildman/ + +certs-test: + ./test/test_certs_install.sh + +full-db-test: ensure-test-db + TEST=true PYTHONPATH=. QUAY_OVERRIDE_CONFIG='{"DATABASE_SECRET_KEY": "anothercrazykey!"}' \ + ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields alembic upgrade head + TEST=true PYTHONPATH=. ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields \ + SKIP_DB_SCHEMA=true py.test --timeout=7200 \ + --verbose --show-count -x --ignore=endpoints/appr/test/ \ + ./ + +clients-test: + cd test/clients; python clients_test.py + +test: unit-test registry-test registry-test-old certs-test + +ensure-test-db: + @if [ -z $(TEST_DATABASE_URI) ]; then \ + echo "TEST_DATABASE_URI is undefined"; \ + exit 1; \ + fi + +PG_PASSWORD := quay +PG_USER := quay +PG_HOST := postgresql://$(PG_USER):$(PG_PASSWORD)@localhost/quay + +test_postgres : TEST_ENV := SKIP_DB_SCHEMA=true TEST=true \ + TEST_DATABASE_URI=$(PG_HOST) PYTHONPATH=. + +test_postgres: + docker rm -f postgres-testrunner-postgres || true + docker run --name postgres-testrunner-postgres \ + -e POSTGRES_PASSWORD=$(PG_PASSWORD) -e POSTGRES_USER=${PG_USER} \ + -p 5432:5432 -d postgres:9.2 + until pg_isready -d $(PG_HOST); do sleep 1; echo "Waiting for postgres"; done + $(TEST_ENV) alembic upgrade head + $(TEST_ENV) py.test --timeout=7200 --verbose --show-count ./ --color=no \ + --ignore=endpoints/appr/test/ -x + docker rm -f postgres-testrunner-postgres || true + +WEBPACK := node_modules/.bin/webpack +$(WEBPACK): package.json + npm install webpack + npm install + +BUNDLE := static/js/build/bundle.js +$(BUNDLE): $(WEBPACK) tsconfig.json webpack.config.js typings.json + $(WEBPACK) + +GRUNT := grunt/node_modules/.bin/grunt +$(GRUNT): grunt/package.json + cd grunt && npm install + +JS := quay-frontend.js quay-frontend.min.js template-cache.js +CSS := quay-frontend.css +DIST := $(addprefix static/dist/, $(JS) $(CSS) cachebusters.json) +$(DIST): $(GRUNT) + cd grunt && ../$(GRUNT) + +build: $(WEBPACK) $(GRUNT) + +docker-build: pkgs build + ifneq (0,$(shell git status --porcelain | awk 'BEGIN {print $N}')) + echo 'dirty build not supported - run `FORCE=true make clean` to remove' + exit 1 + endif + # get named head (ex: branch, tag, etc..) + NAME = $(shell git rev-parse --abbrev-ref HEAD) + # checkout commit so .git/HEAD points to full sha (used in Dockerfile) + git checkout $(SHA) + docker build -t $(TAG) . + git checkout $(NAME) + echo $(TAG) + +app-sre-docker-build: + # get named head (ex: branch, tag, etc..) + export NAME=$(shell git rev-parse --abbrev-ref HEAD) + # checkout commit so .git/HEAD points to full sha (used in Dockerfile) + echo "$(SHA)" + git checkout $(SHA) + $(BUILD_CMD) -t ${IMG} . + git checkout $(NAME) + +run: license + goreman start + + +clean: + find . -name "*.pyc" -exec rm -rf {} \; + rm -rf node_modules 2> /dev/null + rm -rf grunt/node_modules 2> /dev/null + rm -rf dest 2> /dev/null + rm -rf dist 2> /dev/null + rm -rf .cache 2> /dev/null + rm -rf static/js/build + rm -rf static/build + rm -rf static/dist + rm -rf build + rm -rf conf/stack + rm -rf screenshots + + +yapf-all: + yapf -r . -p -i + + +yapf-diff: + if [ $(MODIFIED_FILES_COUNT) -ne 0 ]; then yapf -d -p $(MODIFIED_FILES) ; fi + + +yapf-test: + if [ `yapf -d -p $(MODIFIED_FILES) | wc -l` -gt 0 ] ; then false ; else true ;fi diff --git a/Makefile.ci b/Makefile.ci new file mode 100644 index 000000000..ec0b4e963 --- /dev/null +++ b/Makefile.ci @@ -0,0 +1,69 @@ +SHELL := /bin/bash +PYTEST_MARK ?= shard_1_of_1 + +export PATH := ./venv/bin:$(PATH) + +.PHONY: all unit-test registry-test registry-test-old test + +all: test + +unit-test: + TEST=true PYTHONPATH="." ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields py.test \ + -m $(PYTEST_MARK) \ + --cov="." --cov-report=html --cov-report=term-missing \ + --timeout=3600 --verbose -x \ + ./ + +registry-test: + TEST=true PYTHONPATH="." ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields py.test \ + -m $(PYTEST_MARK) \ + --cov="." --cov-report=html --cov-report=term-missing \ + --timeout=3600 --verbose --show-count -x \ + test/registry/registry_tests.py + +registry-test-old: + TEST=true PYTHONPATH="." ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields py.test \ + --cov="." --cov-report=html --cov-report=term-missing \ + --timeout=3600 --verbose --show-count -x \ + ./test/registry_tests.py + +certs-test: + ./test/test_certs_install.sh + +gunicorn-tests: + ./test/test_gunicorn_running.sh + +full-db-test: ensure-test-db + TEST=true PYTHONPATH=. QUAY_OVERRIDE_CONFIG='{"DATABASE_SECRET_KEY": "anothercrazykey!"}' \ + ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields alembic upgrade head + TEST=true PYTHONPATH=. ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE=remove-old-fields \ + SKIP_DB_SCHEMA=true py.test --timeout=7200 \ + -m $(PYTEST_MARK) \ + --verbose --show-count -x --ignore=endpoints/appr/test/ \ + ./ + +test: unit-test registry-test + +ensure-test-db: + @if [ -z $(TEST_DATABASE_URI) ]; then \ + echo "TEST_DATABASE_URI is undefined"; \ + exit 1; \ + fi + +PG_PASSWORD := quay +PG_USER := quay +PG_HOST := postgresql://$(PG_USER):$(PG_PASSWORD)@localhost/quay + +test_postgres : TEST_ENV := SKIP_DB_SCHEMA=true TEST=true \ + TEST_DATABASE_URI=$(PG_HOST) PYTHONPATH=. + +test_postgres: + docker rm -f postgres-testrunner-postgres || true + docker run --name postgres-testrunner-postgres \ + -e POSTGRES_PASSWORD=$(PG_PASSWORD) -e POSTGRES_USER=${PG_USER} \ + -p 5432:5432 -d postgres:9.2 + until pg_isready -d $(PG_HOST); do sleep 1; echo "Waiting for postgres"; done + $(TEST_ENV) alembic upgrade head + $(TEST_ENV) py.test --timeout=7200 --verbose --show-count ./ --color=no \ + --ignore=endpoints/appr/test/ -x + docker rm -f postgres-testrunner-postgres || true diff --git a/Procfile b/Procfile new file mode 100644 index 000000000..b659c6260 --- /dev/null +++ b/Procfile @@ -0,0 +1,4 @@ +app: gunicorn -c conf/gunicorn_local.py application:application +webpack: npm run watch +builder: python -m buildman.builder + diff --git a/README.md b/README.md index fe895c18e..776fde590 100644 --- a/README.md +++ b/README.md @@ -1,95 +1,74 @@ -# Quay - container image registry +# Project Quay -`master` branch build status: ![Docker Repository on Quay](https://quay.io/repository/quay/quay/status?token=7bffbc13-8bb0-4fb4-8a70-684a0cf485d3 "Docker Repository on Quay") +[![Build Status](https://travis-ci.com/quay/quay.svg?token=pWvEz2TeyDsVn69Hkiwq&branch=master)](https://travis-ci.com/quay/quay) -Quay is a container image registry with managements APIs, a Docker registry API, a container build system. -The application is implemented as a set of API endpoints written in python and an Angular.js frontend. +:warning: The `master` branch may be in an *unstable or even broken state* during development. +Please use [releases] instead of the `master` branch in order to get stable software. -## Setup Development Environment +[releases]: https://github.com/quay/quay/releases -If you are doing local development on your workstation against the code base follow these instructions. +![Project Quay Logo](project_quay_logo.png) -### Docker +Project Quay builds, stores, and distributes your container images. -Quay and its parts can run inside of docker containers. -This method requires no installation of any python packages on your host machine. -The `local-docker.sh` script is provided to prepare and run parts of quay. -First, start redis: +High-level features include: +- Docker Registry Protocol [v2] +- Docker Manifest Schema [v2.1], [v2.2] +- [AppC Image Discovery] via on-demand transcoding +- Image Squashing via on-demand transcoding +- Authentication provided by [LDAP], [Keystone], [OIDC], [Google], and [GitHub] +- ACLs, team management, and auditability logs +- Geo-replicated storage provided by local filesystems, [S3], [GCS], [Swift], and [Ceph] +- Continuous Integration integrated with [GitHub], [Bitbucket], [GitLab], and [git] +- Security Vulnerability Analysis via [Clair] +- [Swagger]-compliant HTTP API -``` -docker run -d -p 6379:6379 redis -``` +[v2]: https://docs.docker.com/registry/spec/api/ +[v2.1]: https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md +[v2.2]: https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-2.md +[AppC Image Discovery]: https://github.com/appc/spec/blob/master/spec/discovery.md +[LDAP]: https://en.wikipedia.org/wiki/Lightweight_Directory_Access_Protocol +[Keystone]: http://docs.openstack.org/developer/keystone +[OIDC]: https://en.wikipedia.org/wiki/OpenID_Connect +[Google]: https://developers.google.com/identity/sign-in/web/sign-in +[GitHub]: https://developer.github.com/v3/oauth +[S3]: https://aws.amazon.com/s3 +[GCS]: https://cloud.google.com/storage +[Swift]: http://swift.openstack.org +[Ceph]: http://docs.ceph.com/docs/master/radosgw/config +[GitHub]: https://github.com +[Bitbucket]: https://bitbucket.com +[GitLab]: https://gitlab.com +[git]: https://git-scm.com +[Clair]: https://github.com/quay/clair +[Swagger]: http://swagger.io -And clone the configuration repo: +## Getting Started -``` -git clone git@github.com:coreos-inc/quay-config.git ../quay-config -ln -s ../../quay-config/local conf/stack -``` +* Explore a live instance of Project Quay hosted at [Quay.io] +* Watch [talks] given about Project Quay +* Review the [documentation] for Red Hat Quay +* Get up and running with a containerized [development environment] -To build and run a docker container, pass one argument to local-docker.sh: +[Quay.io]: https://quay.io +[talks]: /docs/talks.md +[documentation]: https://access.redhat.com/documentation/en-us/red_hat_quay +[development environment]: /docs/development-container.md -- `dev`: run quay on port 5000 -- `buildman`: run the buildmanager -- `notifications`: run the notification worker -- `test`: run the unit tests -- `initdb`: clear and initialize the test database +## Community -For example: +* Mailing List: [quay-dev@googlegroups.com] +* IRC: #quay on [freenode.net] +* Bug tracking: [JBoss JIRA] +* Security Issues: [security@redhat.com] -``` -./local-docker.sh dev -```` +[quay-dev@googlegroups.com]: https://groups.google.com/forum/#!forum/quay-dev +[freenode.net]: https://webchat.freenode.net +[JBoss JIRA]: https://issues.jboss.org/projects/PROJQUAY +[security@redhat.com]: mailto:security@redhat.com -will start quay in a docker container. -Now quay will be running on: http://127.0.0.1:5000 -The username is `devtable` and the password is `password`. +## License -### OS X - -``` -git clone git@github.com:coreos-inc/quay.git -cd quay -./contrib/osx/local-setup.sh -``` - -Now run the server; it will use sqlite as the SQL server. - -``` -./local-run.sh -``` - -Now quay will be running on: http://127.0.0.1:5000 -The username is `devtable` and the password is `password`. - -Some packages may fail to build with clang (which now defaults to C11). -They can be installed with C99 with: - -``` -CFLAGS='-std=c99' pip install --no-cache --no-binary :all: gevent -CFLAGS='-std=c99' pip install --no-cache --no-binary :all: cffi -CFLAGS='-std=c99' pip install --no-cache --no-binary :all: cryptography -``` - -## Update Requirements - -1. Create a new virtualenv -2. Activate new virtualenv -3. Install unversioned dependencies -4. Run tests -5. Freeze requirements -6. Deactivate virtualenv -7. Remove virtualenv - -Example (commands may differ dependending on virtualenv tools): - -``` -pyenv virtualenv 2.7.11 quay-deps -pyenv activate quay-deps -pip install -r requirements-nover.txt -./local-test.sh -pip freeze > requirements.txt -pyenv deactivate quay-deps -pyenv uninstall quay-deps -``` +Project Quay is under the Apache 2.0 license. +See the LICENSE file for details. diff --git a/ROADMAP.md b/ROADMAP.md deleted file mode 100644 index 17bf5fd96..000000000 --- a/ROADMAP.md +++ /dev/null @@ -1,101 +0,0 @@ -# Quay Roadmap - - -| Abbrebiation | Feature | -|---|---| -| **(H)** | Hosted Quay.io | -| **(ER)**| Enterprise Registry Only | -| **(B)** | Builders | - -### Sprint 3/2 - 3/16 -- **(H)** Launch Clair 1.0 - - Tasks - - Backfill DB - - Provide timely logo feedback - - Quay blog post - - Clair blog post - - Screencast -- **(H)** Test and launch torrent GA - - Have a use case which shows improved performance - - Tasks - - Docs detailing reference use cases - - Publish quayctl - - Quayctl man page README - - Notify marketing when the above is done -- **(ER)** Figure out how to handle client cert generation - - Auto approval rules - - Auto generation - - UI for approving - - Tasks - - See if there is anything from Ed's tool that we can re-use - - Test assumptions around nginx client cert auth - - Figure out if we can verify certs in python if nginx approves - - Have a hangout with gtank w.r.t. client certs vs hmac vs jwt - -- **(ER)** Clair in ER - - Tasks - - Integrate Clair with cert generation tool - - Blog post for Clair in ER - - Add Clair config to the setup tool -- Bugs - - Fix Quay permission loading performance for Clair - - OR: Make the Clair API on Quay batch - - Fix Clair readme - - Address Huawei PR for new Clair endpoint - -### Unallocated -- **(ER)** Torrent support in ER - - Setup tool support - - Docs on how to get Chihaya running -- **(ER)** Online upgrade tool - - Migrations while site is live - - Nag people to upgrade -- **(B)** Dockerfile flag support - - Requires quay.yaml -- **(B)** Move build traffic to Packet - - Preliminary tests reduce build start latency from 2 minutes to 20 seconds -- **(B)** Multi-step builds - - build artifact - - bundle artifact - - test bundle -- **(H)** Docker Notary - - Support signed images with a known key -- **(H/ER)** Labels - - Support for Midas Package Manager-like distribution - - Integrated with Docker labels - - Mutable and immutable - - Searchable and fleshed out API -- **(H)** Integrate with tectonic.com sales pipeline - - Mirror Quay customers in tectonic (SVOC)? - - Callbacks to inform tectonic about quay events - - Accept and apply QE licenses to the stack -- **(ER)** Tectonic care and feeding - - Build tools to give us a concrete/declarative cluster deploy story - - Build a tool to migrate an app between tectonic clusters - - Assess the feasibility of upgrading a running cluster -- **(H)** Geo distribution through tectonic - - Spin up a tectonic cluster in another region - - Modify registry to run standalone on a tectonic cluster -- **(H)** Read available Quay.io - - Ability to choose uptime of data-plane auditability -- **(H)** Launch our API GA - - Versioned and backward compatible - - Adequate documentation -- **(B)** Builds as top level concept - - Multiple Quay.io repos from a single git push -- **(H)** Become the Tectonic app store - - Pods/apps as top level concept -- **(H)** Distribution tool - - Help people to get their apps from quay to Tectonic - - Requires App manifest or adequate flexibility -- **(H)** AppC support - - rkt push - - discovery -- **(H/ER)** Mirroring from another registry (pull) - -### Speculative -- **(H)** Immediately consistent multi-region data availability - - Cockroach? -- **(H)** 2 factor auth - - How to integrate with Docker CLI? -- **(H)** Mirroring to a dependent registry (push) diff --git a/TESTING.md b/TESTING.md new file mode 100644 index 000000000..a5adf43d7 --- /dev/null +++ b/TESTING.md @@ -0,0 +1,49 @@ +# Testing quay + +## Unit tests (run in CI automatically) + +Basic unit tests for testing all the functionality of Quay: + +```sh +make unit-test +``` + +## Registry tests (run in CI automatically) + +Quay has two sets of registry tests (current and legacy), which simulate Docker clients by executing +REST operations against a spanwed Quay. + +```sh +make registry-test +make registry-test-old +``` + +## Certs tests (run in CI automatically) + +Ensures that custom TLS certificates are correctly loaded into the Quay container on startup. + +```sh +make certs-test +``` + +## Full database tests (run in CI automatically) + +The full database tests runs the entire suite of Quay unit tests against a real running database +instance. + +NOTE: The database *must be running* on the local machine before this test can be run. + +```sh +TEST_DATABASE_URI=database-connection-string make full-db-test +``` + +## Clients tests (must be manually run) + +The clients test spawns CoreOS virtual machines via Vagrant and VirtualBox and runs real Docker/podman +commands against a *running Quay*. + +NOTE: A Quay *must be running* on the local machine before this test can be run. + +```sh +make clients-test 10.0.2.2:5000 # IP+Port of the Quay on the host machine. +``` diff --git a/_init.py b/_init.py new file mode 100644 index 000000000..84a574b0f --- /dev/null +++ b/_init.py @@ -0,0 +1,47 @@ +import os +import re +import subprocess + +from util.config.provider import get_config_provider + + +ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) +CONF_DIR = os.getenv("QUAYCONF", os.path.join(ROOT_DIR, "conf/")) +STATIC_DIR = os.path.join(ROOT_DIR, 'static/') +STATIC_LDN_DIR = os.path.join(STATIC_DIR, 'ldn/') +STATIC_FONTS_DIR = os.path.join(STATIC_DIR, 'fonts/') +STATIC_WEBFONTS_DIR = os.path.join(STATIC_DIR, 'webfonts/') +TEMPLATE_DIR = os.path.join(ROOT_DIR, 'templates/') + +IS_TESTING = 'TEST' in os.environ +IS_BUILDING = 'BUILDING' in os.environ +IS_KUBERNETES = 'KUBERNETES_SERVICE_HOST' in os.environ +OVERRIDE_CONFIG_DIRECTORY = os.path.join(CONF_DIR, 'stack/') + + +config_provider = get_config_provider(OVERRIDE_CONFIG_DIRECTORY, 'config.yaml', 'config.py', + testing=IS_TESTING, kubernetes=IS_KUBERNETES) + + +def _get_version_number_changelog(): + try: + with open(os.path.join(ROOT_DIR, 'CHANGELOG.md')) as f: + return re.search(r'(v[0-9]+\.[0-9]+\.[0-9]+)', f.readline()).group(0) + except IOError: + return '' + + +def _get_git_sha(): + if os.path.exists("GIT_HEAD"): + with open(os.path.join(ROOT_DIR, "GIT_HEAD")) as f: + return f.read() + else: + try: + return subprocess.check_output(["git", "rev-parse", "HEAD"]).strip()[0:8] + except (OSError, subprocess.CalledProcessError, Exception): + pass + return "unknown" + + +__version__ = _get_version_number_changelog() +__gitrev__ = _get_git_sha() diff --git a/active_migration.py b/active_migration.py new file mode 100644 index 000000000..693bcaac6 --- /dev/null +++ b/active_migration.py @@ -0,0 +1,22 @@ +from enum import Enum, unique +from data.migrationutil import DefinedDataMigration, MigrationPhase + +@unique +class ERTMigrationFlags(Enum): + """ Flags for the encrypted robot token migration. """ + READ_OLD_FIELDS = 'read-old' + WRITE_OLD_FIELDS = 'write-old' + + +ActiveDataMigration = DefinedDataMigration( + 'encrypted_robot_tokens', + 'ENCRYPTED_ROBOT_TOKEN_MIGRATION_PHASE', + [ + MigrationPhase('add-new-fields', 'c13c8052f7a6', [ERTMigrationFlags.READ_OLD_FIELDS, + ERTMigrationFlags.WRITE_OLD_FIELDS]), + MigrationPhase('backfill-then-read-only-new', + '703298a825c2', [ERTMigrationFlags.WRITE_OLD_FIELDS]), + MigrationPhase('stop-writing-both', '703298a825c2', []), + MigrationPhase('remove-old-fields', 'c059b952ed76', []), + ] +) diff --git a/app.py b/app.py index 5dd11062f..33245bee1 100644 --- a/app.py +++ b/app.py @@ -1,60 +1,82 @@ +import hashlib +import json import logging import os -import json from functools import partial -from flask import Flask, request, Request, _request_ctx_stack -from flask.ext.principal import Principal -from flask.ext.login import LoginManager, UserMixin -from flask.ext.mail import Mail -from werkzeug.routing import BaseConverter -from jwkest.jwk import RSAKey + from Crypto.PublicKey import RSA +from flask import Flask, request, Request +from flask_login import LoginManager +from flask_mail import Mail +from flask_principal import Principal +from jwkest.jwk import RSAKey +from werkzeug.contrib.fixers import ProxyFix +from werkzeug.exceptions import HTTPException import features +from _init import (config_provider, CONF_DIR, IS_KUBERNETES, IS_TESTING, OVERRIDE_CONFIG_DIRECTORY, + IS_BUILDING) + +from auth.auth_context import get_authenticated_user from avatars.avatars import Avatar -from storage import Storage -from data import model +from buildman.manager.buildcanceller import BuildCanceller from data import database -from data.userfiles import Userfiles -from data.users import UserAuthentication +from data import model +from data import logs_model +from data.archivedlogs import LogArchive from data.billing import Billing from data.buildlogs import BuildLogs -from data.archivedlogs import LogArchive +from data.cache import get_model_cache +from data.model.user import LoginWrappedDBUser +from data.queue import WorkQueue, BuildMetricQueueReporter from data.userevent import UserEventsBuilderModule -from data.queue import WorkQueue, MetricQueueReporter +from data.userfiles import Userfiles +from data.users import UserAuthentication +from data.registry_model import registry_model +from path_converters import RegexConverter, RepositoryPathConverter, APIRepositoryPathConverter +from oauth.services.github import GithubOAuthService +from oauth.services.gitlab import GitLabOAuthService +from oauth.loginmanager import OAuthLoginManager +from storage import Storage +from util.config import URLSchemeAndHostname +from util.log import filter_logs from util import get_app_url +from util.secscan.secscan_util import get_blob_download_uri_getter +from util.ipresolver import IPResolver from util.saas.analytics import Analytics +from util.saas.useranalytics import UserAnalytics from util.saas.exceptionlog import Sentry from util.names import urn_generator -from util.config.oauth import (GoogleOAuthConfig, GithubOAuthConfig, GitLabOAuthConfig, - DexOAuthConfig) - -from util.security.signing import Signer -from util.saas.cloudwatch import start_cloudwatch_sender -from util.saas.metricqueue import MetricQueue -from util.config.provider import get_config_provider from util.config.configutil import generate_secret_key from util.config.superusermanager import SuperUserManager +from util.label_validator import LabelValidator +from util.metrics.metricqueue import MetricQueue +from util.metrics.prometheus import PrometheusPlugin +from util.saas.cloudwatch import start_cloudwatch_sender from util.secscan.api import SecurityScannerAPI +from util.repomirror.api import RepoMirrorAPI +from util.tufmetadata.api import TUFMetadataAPI +from util.security.instancekeys import InstanceKeys +from util.security.signing import Signer -OVERRIDE_CONFIG_DIRECTORY = 'conf/stack/' -OVERRIDE_CONFIG_YAML_FILENAME = 'conf/stack/config.yaml' -OVERRIDE_CONFIG_PY_FILENAME = 'conf/stack/config.py' + +OVERRIDE_CONFIG_YAML_FILENAME = os.path.join(CONF_DIR, 'stack/config.yaml') +OVERRIDE_CONFIG_PY_FILENAME = os.path.join(CONF_DIR, 'stack/config.py') OVERRIDE_CONFIG_KEY = 'QUAY_OVERRIDE_CONFIG' DOCKER_V2_SIGNINGKEY_FILENAME = 'docker_v2.pem' +INIT_SCRIPTS_LOCATION = '/conf/init/' app = Flask(__name__) logger = logging.getLogger(__name__) # Instantiate the configuration. -is_testing = 'TEST' in os.environ -is_kubernetes = 'KUBERNETES_SERVICE_HOST' in os.environ -config_provider = get_config_provider(OVERRIDE_CONFIG_DIRECTORY, 'config.yaml', 'config.py', - testing=is_testing, kubernetes=is_kubernetes) +is_testing = IS_TESTING +is_kubernetes = IS_KUBERNETES +is_building = IS_BUILDING if is_testing: from test.testconfig import TestConfig @@ -73,6 +95,31 @@ config_provider.update_app_config(app.config) environ_config = json.loads(os.environ.get(OVERRIDE_CONFIG_KEY, '{}')) app.config.update(environ_config) +# Fix remote address handling for Flask. +if app.config.get('PROXY_COUNT', 1): + app.wsgi_app = ProxyFix(app.wsgi_app, num_proxies=app.config.get('PROXY_COUNT', 1)) + +# Ensure the V3 upgrade key is specified correctly. If not, simply fail. +# TODO: Remove for V3.1. +if not is_testing and not is_building and app.config.get('SETUP_COMPLETE', False): + v3_upgrade_mode = app.config.get('V3_UPGRADE_MODE') + if v3_upgrade_mode is None: + raise Exception('Configuration flag `V3_UPGRADE_MODE` must be set. Please check the upgrade docs') + + if (v3_upgrade_mode != 'background' + and v3_upgrade_mode != 'complete' + and v3_upgrade_mode != 'production-transition' + and v3_upgrade_mode != 'post-oci-rollout' + and v3_upgrade_mode != 'post-oci-roll-back-compat'): + raise Exception('Invalid value for config `V3_UPGRADE_MODE`. Please check the upgrade docs') + +# Split the registry model based on config. +# TODO: Remove once we are fully on the OCI data model. +registry_model.setup_split(app.config.get('OCI_NAMESPACE_PROPORTION') or 0, + app.config.get('OCI_NAMESPACE_WHITELIST') or set(), + app.config.get('V22_NAMESPACE_WHITELIST') or set(), + app.config.get('V3_UPGRADE_MODE')) + # Allow user to define a custom storage preference for the local instance. _distributed_storage_preference = os.environ.get('QUAY_DISTRIBUTED_STORAGE_PREFERENCE', '').split() if _distributed_storage_preference: @@ -85,12 +132,17 @@ if app.config['SECRET_KEY'] is None: # If the "preferred" scheme is https, then http is not allowed. Therefore, ensure we have a secure # session cookie. -if app.config['PREFERRED_URL_SCHEME'] == 'https': +if (app.config['PREFERRED_URL_SCHEME'] == 'https' and + not app.config.get('FORCE_NONSECURE_SESSION_COOKIE', False)): app.config['SESSION_COOKIE_SECURE'] = True # Load features from config. features.import_features(app.config) +CONFIG_DIGEST = hashlib.sha256(json.dumps(app.config, default=str)).hexdigest()[0:8] + +logger.debug("Loaded config", extra={"config": app.config}) + class RequestWithId(Request): request_gen = staticmethod(urn_generator(['request'])) @@ -102,100 +154,136 @@ class RequestWithId(Request): @app.before_request def _request_start(): - logger.debug('Starting request: %s', request.path) + if os.getenv('PYDEV_DEBUG', None): + import pydevd + host, port = os.getenv('PYDEV_DEBUG').split(':') + pydevd.settrace(host, port=int(port), stdoutToServer=True, stderrToServer=True, suspend=False) + + logger.debug('Starting request: %s (%s)', request.request_id, request.path, + extra={"request_id": request.request_id}) + + +DEFAULT_FILTER = lambda x: '[FILTERED]' +FILTERED_VALUES = [ + {'key': ['password'], 'fn': DEFAULT_FILTER}, + {'key': ['user', 'password'], 'fn': DEFAULT_FILTER}, + {'key': ['blob'], 'fn': lambda x: x[0:8]} +] @app.after_request -def _request_end(r): - logger.debug('Ending request: %s', request.path) - return r +def _request_end(resp): + try: + jsonbody = request.get_json(force=True, silent=True) + except HTTPException: + jsonbody = None + + values = request.values.to_dict() + + if jsonbody and not isinstance(jsonbody, dict): + jsonbody = {'_parsererror': jsonbody} + + if isinstance(values, dict): + filter_logs(values, FILTERED_VALUES) + + extra = { + "endpoint": request.endpoint, + "request_id" : request.request_id, + "remote_addr": request.remote_addr, + "http_method": request.method, + "original_url": request.url, + "path": request.path, + "parameters": values, + "json_body": jsonbody, + "confsha": CONFIG_DIGEST, + } + + if request.user_agent is not None: + extra["user-agent"] = request.user_agent.string + + logger.debug('Ending request: %s (%s)', request.request_id, request.path, extra=extra) + return resp -class InjectingFilter(logging.Filter): - def filter(self, record): - if _request_ctx_stack.top is not None: - record.msg = '[%s] %s' % (request.request_id, record.msg) - return True root_logger = logging.getLogger() -# Add the request id filter to all handlers of the root logger -for handler in root_logger.handlers: - handler.addFilter(InjectingFilter()) - app.request_class = RequestWithId # Register custom converters. -class RegexConverter(BaseConverter): - """ Converter for handling custom regular expression patterns in paths. """ - def __init__(self, url_map, regex_value): - super(RegexConverter, self).__init__(url_map) - self.regex = regex_value - - -class RepositoryPathConverter(BaseConverter): - """ Converter for handling repository paths. Handles both library and non-library paths (if - configured). - """ - def __init__(self, url_map): - super(RepositoryPathConverter, self).__init__(url_map) - self.weight = 200 - - if features.LIBRARY_SUPPORT: - # Allow names without namespaces. - self.regex = r'[^/]+(/[^/]+)?' - else: - self.regex = r'([^/]+/[^/]+)' - - -class APIRepositoryPathConverter(BaseConverter): - """ Converter for handling repository paths. Does not handle library paths. - """ - def __init__(self, url_map): - super(APIRepositoryPathConverter, self).__init__(url_map) - self.weight = 200 - self.regex = r'([^/]+/[^/]+)' - - app.url_map.converters['regex'] = RegexConverter app.url_map.converters['repopath'] = RepositoryPathConverter app.url_map.converters['apirepopath'] = APIRepositoryPathConverter Principal(app, use_sessions=False) +tf = app.config['DB_TRANSACTION_FACTORY'] + +model_cache = get_model_cache(app.config) avatar = Avatar(app) login_manager = LoginManager(app) mail = Mail(app) -metric_queue = MetricQueue() -storage = Storage(app, metric_queue) +prometheus = PrometheusPlugin(app) +metric_queue = MetricQueue(prometheus) +chunk_cleanup_queue = WorkQueue(app.config['CHUNK_CLEANUP_QUEUE_NAME'], tf, metric_queue=metric_queue) +instance_keys = InstanceKeys(app) +ip_resolver = IPResolver(app) +storage = Storage(app, metric_queue, chunk_cleanup_queue, instance_keys, config_provider, ip_resolver) userfiles = Userfiles(app, storage) log_archive = LogArchive(app, storage) analytics = Analytics(app) +user_analytics = UserAnalytics(app) billing = Billing(app) sentry = Sentry(app) build_logs = BuildLogs(app) authentication = UserAuthentication(app, config_provider, OVERRIDE_CONFIG_DIRECTORY) userevents = UserEventsBuilderModule(app) superusers = SuperUserManager(app) -signer = Signer(app, OVERRIDE_CONFIG_DIRECTORY) +signer = Signer(app, config_provider) +instance_keys = InstanceKeys(app) +label_validator = LabelValidator(app) +build_canceller = BuildCanceller(app) + start_cloudwatch_sender(metric_queue, app) -tf = app.config['DB_TRANSACTION_FACTORY'] +github_trigger = GithubOAuthService(app.config, 'GITHUB_TRIGGER_CONFIG') +gitlab_trigger = GitLabOAuthService(app.config, 'GITLAB_TRIGGER_CONFIG') -github_login = GithubOAuthConfig(app.config, 'GITHUB_LOGIN_CONFIG') -github_trigger = GithubOAuthConfig(app.config, 'GITHUB_TRIGGER_CONFIG') -gitlab_trigger = GitLabOAuthConfig(app.config, 'GITLAB_TRIGGER_CONFIG') -google_login = GoogleOAuthConfig(app.config, 'GOOGLE_LOGIN_CONFIG') -dex_login = DexOAuthConfig(app.config, 'DEX_LOGIN_CONFIG') +oauth_login = OAuthLoginManager(app.config) +oauth_apps = [github_trigger, gitlab_trigger] -oauth_apps = [github_login, github_trigger, gitlab_trigger, google_login, dex_login] - -image_replication_queue = WorkQueue(app.config['REPLICATION_QUEUE_NAME'], tf) +image_replication_queue = WorkQueue(app.config['REPLICATION_QUEUE_NAME'], tf, + has_namespace=False, metric_queue=metric_queue) dockerfile_build_queue = WorkQueue(app.config['DOCKERFILE_BUILD_QUEUE_NAME'], tf, - reporter=MetricQueueReporter(metric_queue)) -notification_queue = WorkQueue(app.config['NOTIFICATION_QUEUE_NAME'], tf) -secscan_notification_queue = WorkQueue(app.config['SECSCAN_NOTIFICATION_QUEUE_NAME'], tf) -secscan_api = SecurityScannerAPI(app, app.config, storage) + metric_queue=metric_queue, + reporter=BuildMetricQueueReporter(metric_queue), + has_namespace=True) +notification_queue = WorkQueue(app.config['NOTIFICATION_QUEUE_NAME'], tf, has_namespace=True, + metric_queue=metric_queue) +secscan_notification_queue = WorkQueue(app.config['SECSCAN_NOTIFICATION_QUEUE_NAME'], tf, + has_namespace=False, + metric_queue=metric_queue) +export_action_logs_queue = WorkQueue(app.config['EXPORT_ACTION_LOGS_QUEUE_NAME'], tf, + has_namespace=True, + metric_queue=metric_queue) + +# Note: We set `has_namespace` to `False` here, as we explicitly want this queue to not be emptied +# when a namespace is marked for deletion. +namespace_gc_queue = WorkQueue(app.config['NAMESPACE_GC_QUEUE_NAME'], tf, has_namespace=False, + metric_queue=metric_queue) + +all_queues = [image_replication_queue, dockerfile_build_queue, notification_queue, + secscan_notification_queue, chunk_cleanup_queue, namespace_gc_queue] + +url_scheme_and_hostname = URLSchemeAndHostname(app.config['PREFERRED_URL_SCHEME'], app.config['SERVER_HOSTNAME']) +secscan_api = SecurityScannerAPI(app.config, storage, app.config['SERVER_HOSTNAME'], app.config['HTTPCLIENT'], + uri_creator=get_blob_download_uri_getter(app.test_request_context('/'), url_scheme_and_hostname), + instance_keys=instance_keys) + +repo_mirror_api = RepoMirrorAPI(app.config, app.config['SERVER_HOSTNAME'], app.config['HTTPCLIENT'], + instance_keys=instance_keys) + +tuf_metadata_api = TUFMetadataAPI(app, app.config) # Check for a key in config. If none found, generate a new signing key for Docker V2 manifests. _v2_key_path = os.path.join(OVERRIDE_CONFIG_DIRECTORY, DOCKER_V2_SIGNINGKEY_FILENAME) @@ -204,34 +292,23 @@ if os.path.exists(_v2_key_path): else: docker_v2_signing_key = RSAKey(key=RSA.generate(2048)) +# Configure the database. +if app.config.get('DATABASE_SECRET_KEY') is None and app.config.get('SETUP_COMPLETE', False): + raise Exception('Missing DATABASE_SECRET_KEY in config; did you perhaps forget to add it?') + database.configure(app.config) + model.config.app_config = app.config model.config.store = storage +model.config.register_image_cleanup_callback(secscan_api.cleanup_layers) +model.config.register_repo_cleanup_callback(tuf_metadata_api.delete_metadata) + @login_manager.user_loader def load_user(user_uuid): - logger.debug('User loader loading deferred user with uuid: %s' % user_uuid) + logger.debug('User loader loading deferred user with uuid: %s', user_uuid) return LoginWrappedDBUser(user_uuid) -class LoginWrappedDBUser(UserMixin): - def __init__(self, user_uuid, db_user=None): - self._uuid = user_uuid - self._db_user = db_user - - def db_user(self): - if not self._db_user: - self._db_user = model.user.get_user_by_uuid(self._uuid) - return self._db_user - - @property - def is_authenticated(self): - return self.db_user() is not None - - @property - def is_active(self): - return self.db_user().verified - - def get_id(self): - return unicode(self._uuid) +logs_model.configure(app.config) get_app_url = partial(get_app_url, app.config) diff --git a/application.py b/application.py index 810109d1d..b7f478841 100644 --- a/application.py +++ b/application.py @@ -1,6 +1,12 @@ +# NOTE: Must be before we import or call anything that may be synchronous. +from gevent import monkey +monkey.patch_all() + +import os import logging import logging.config +from util.log import logfile_path from app import app as application @@ -12,5 +18,5 @@ import secscan if __name__ == '__main__': - logging.config.fileConfig('conf/logging_debug.conf', disable_existing_loggers=False) + logging.config.fileConfig(logfile_path(debug=True), disable_existing_loggers=False) application.run(port=5000, debug=True, threaded=True, host='0.0.0.0') diff --git a/auth/auth.py b/auth/auth.py deleted file mode 100644 index e225ab5c4..000000000 --- a/auth/auth.py +++ /dev/null @@ -1,230 +0,0 @@ -import logging - -from functools import wraps -from uuid import UUID -from datetime import datetime -from flask import request, session -from flask.ext.principal import identity_changed, Identity -from flask.ext.login import current_user -from flask.sessions import SecureCookieSessionInterface, BadSignature -from base64 import b64decode - -import scopes - -from data import model -from app import app, authentication -from endpoints.exception import InvalidToken, ExpiredToken -from permissions import QuayDeferredPermissionUser -from auth_context import (set_authenticated_user, set_validated_token, set_grant_context, - set_validated_oauth_token) -from util.http import abort - - -logger = logging.getLogger(__name__) - -SIGNATURE_PREFIX = 'sigv2=' - -def _load_user_from_cookie(): - if not current_user.is_anonymous: - try: - # Attempt to parse the user uuid to make sure the cookie has the right value type - UUID(current_user.get_id()) - except ValueError: - return None - - logger.debug('Loading user from cookie: %s', current_user.get_id()) - db_user = current_user.db_user() - if db_user is not None: - # Don't allow disabled users to login. - if not db_user.enabled: - return None - - set_authenticated_user(db_user) - loaded = QuayDeferredPermissionUser.for_user(db_user) - identity_changed.send(app, identity=loaded) - return db_user - - return None - - -def _validate_and_apply_oauth_token(token): - validated = model.oauth.validate_access_token(token) - if not validated: - logger.warning('OAuth access token could not be validated: %s', token) - raise InvalidToken('OAuth access token could not be validated: {token}'.format(token=token)) - elif validated.expires_at <= datetime.utcnow(): - logger.info('OAuth access with an expired token: %s', token) - raise ExpiredToken('OAuth access token has expired: {token}'.format(token=token)) - - # Don't allow disabled users to login. - if not validated.authorized_user.enabled: - return None - - # We have a valid token - scope_set = scopes.scopes_from_scope_string(validated.scope) - logger.debug('Successfully validated oauth access token: %s with scope: %s', token, - scope_set) - - set_authenticated_user(validated.authorized_user) - set_validated_oauth_token(validated) - - new_identity = QuayDeferredPermissionUser.for_user(validated.authorized_user, scope_set) - identity_changed.send(app, identity=new_identity) - - -def _process_basic_auth(auth): - normalized = [part.strip() for part in auth.split(' ') if part] - if normalized[0].lower() != 'basic' or len(normalized) != 2: - logger.debug('Invalid basic auth format.') - return - - credentials = [part.decode('utf-8') for part in b64decode(normalized[1]).split(':', 1)] - - if len(credentials) != 2: - logger.debug('Invalid basic auth credential format.') - - elif credentials[0] == '$token': - # Use as token auth - try: - token = model.token.load_token_data(credentials[1]) - logger.debug('Successfully validated token: %s', credentials[1]) - set_validated_token(token) - - identity_changed.send(app, identity=Identity(token.code, 'token')) - return - - except model.DataModelException: - logger.debug('Invalid token: %s', credentials[1]) - - elif credentials[0] == '$oauthtoken': - oauth_token = credentials[1] - _validate_and_apply_oauth_token(oauth_token) - - elif '+' in credentials[0]: - logger.debug('Trying robot auth with credentials %s', str(credentials)) - # Use as robot auth - try: - robot = model.user.verify_robot(credentials[0], credentials[1]) - logger.debug('Successfully validated robot: %s', credentials[0]) - set_authenticated_user(robot) - - deferred_robot = QuayDeferredPermissionUser.for_user(robot) - identity_changed.send(app, identity=deferred_robot) - return - except model.InvalidRobotException: - logger.debug('Invalid robot or password for robot: %s', credentials[0]) - - else: - (authenticated, _) = authentication.verify_and_link_user(credentials[0], credentials[1], - basic_auth=True) - - if authenticated: - logger.debug('Successfully validated user: %s', authenticated.username) - set_authenticated_user(authenticated) - - new_identity = QuayDeferredPermissionUser.for_user(authenticated) - identity_changed.send(app, identity=new_identity) - return - - # We weren't able to authenticate via basic auth. - logger.debug('Basic auth present but could not be validated.') - - -def generate_signed_token(grants, user_context): - ser = SecureCookieSessionInterface().get_signing_serializer(app) - data_to_sign = { - 'grants': grants, - 'user_context': user_context, - } - - encrypted = ser.dumps(data_to_sign) - return '{0}{1}'.format(SIGNATURE_PREFIX, encrypted) - - -def _process_signed_grant(auth): - normalized = [part.strip() for part in auth.split(' ') if part] - if normalized[0].lower() != 'token' or len(normalized) != 2: - logger.debug('Not a token: %s', auth) - return - - if not normalized[1].startswith(SIGNATURE_PREFIX): - logger.debug('Not a signed grant token: %s', auth) - return - - encrypted = normalized[1][len(SIGNATURE_PREFIX):] - ser = SecureCookieSessionInterface().get_signing_serializer(app) - - try: - token_data = ser.loads(encrypted, max_age=app.config['SIGNED_GRANT_EXPIRATION_SEC']) - except BadSignature: - logger.warning('Signed grant could not be validated: %s', encrypted) - abort(401, message='Signed grant could not be validated: %(auth)s', issue='invalid-auth-token', - auth=auth) - - logger.debug('Successfully validated signed grant with data: %s', token_data) - - loaded_identity = Identity(None, 'signed_grant') - - if token_data['user_context']: - set_grant_context({ - 'user': token_data['user_context'], - 'kind': 'user', - }) - - loaded_identity.provides.update(token_data['grants']) - identity_changed.send(app, identity=loaded_identity) - - -def process_oauth(func): - @wraps(func) - def wrapper(*args, **kwargs): - auth = request.headers.get('authorization', '') - if auth: - normalized = [part.strip() for part in auth.split(' ') if part] - if normalized[0].lower() != 'bearer' or len(normalized) != 2: - logger.debug('Invalid oauth bearer token format.') - return func(*args, **kwargs) - - token = normalized[1] - _validate_and_apply_oauth_token(token) - elif _load_user_from_cookie() is None: - logger.debug('No auth header or login cookie.') - return func(*args, **kwargs) - return wrapper - - -def process_auth(func): - @wraps(func) - def wrapper(*args, **kwargs): - auth = request.headers.get('authorization', '') - - if auth: - logger.debug('Validating auth header: %s', auth) - _process_signed_grant(auth) - _process_basic_auth(auth) - else: - logger.debug('No auth header.') - - return func(*args, **kwargs) - return wrapper - - -def require_session_login(func): - @wraps(func) - def wrapper(*args, **kwargs): - loaded = _load_user_from_cookie() - if loaded is None or loaded.organization: - abort(401, message='Method requires login and no valid login could be loaded.') - return func(*args, **kwargs) - return wrapper - - -def extract_namespace_repo_from_session(func): - @wraps(func) - def wrapper(*args, **kwargs): - if 'namespace' not in session or 'repository' not in session: - logger.error('Unable to load namespace or repository from session: %s', session) - abort(400, message='Missing namespace in request') - - return func(session['namespace'], session['repository'], *args, **kwargs) - return wrapper diff --git a/auth/auth_context.py b/auth/auth_context.py index f4a1206aa..8cb57f691 100644 --- a/auth/auth_context.py +++ b/auth/auth_context.py @@ -1,69 +1,21 @@ -import logging - from flask import _request_ctx_stack -from data import model - - -logger = logging.getLogger(__name__) +def get_authenticated_context(): + """ Returns the auth context for the current request context, if any. """ + return getattr(_request_ctx_stack.top, 'authenticated_context', None) def get_authenticated_user(): - user = getattr(_request_ctx_stack.top, 'authenticated_user', None) - if not user: - user_uuid = getattr(_request_ctx_stack.top, 'authenticated_user_uuid', None) - if not user_uuid: - logger.debug('No authenticated user or deferred user uuid.') - return None - - logger.debug('Loading deferred authenticated user.') - loaded = model.user.get_user_by_uuid(user_uuid) - if not loaded.enabled: - return None - - set_authenticated_user(loaded) - user = loaded - - if user: - logger.debug('Returning authenticated user: %s', user.username) - return user - - -def set_authenticated_user(user_or_robot): - if not user_or_robot.enabled: - raise Exception('Attempt to authenticate a disabled user/robot: %s' % user_or_robot.username) - - ctx = _request_ctx_stack.top - ctx.authenticated_user = user_or_robot - - -def get_grant_context(): - return getattr(_request_ctx_stack.top, 'grant_context', None) - - -def set_grant_context(grant_context): - ctx = _request_ctx_stack.top - ctx.grant_context = grant_context - - -def set_authenticated_user_deferred(user_or_robot_db_uuid): - logger.debug('Deferring loading of authenticated user object with uuid: %s', user_or_robot_db_uuid) - ctx = _request_ctx_stack.top - ctx.authenticated_user_uuid = user_or_robot_db_uuid - + """ Returns the authenticated user, if any, or None if none. """ + context = get_authenticated_context() + return context.authed_user if context else None def get_validated_oauth_token(): - return getattr(_request_ctx_stack.top, 'validated_oauth_token', None) + """ Returns the authenticated and validated OAuth access token, if any, or None if none. """ + context = get_authenticated_context() + return context.authed_oauth_token if context else None - -def set_validated_oauth_token(token): +def set_authenticated_context(auth_context): + """ Sets the auth context for the current request context to that given. """ ctx = _request_ctx_stack.top - ctx.validated_oauth_token = token - - -def get_validated_token(): - return getattr(_request_ctx_stack.top, 'validated_token', None) - - -def set_validated_token(token): - ctx = _request_ctx_stack.top - ctx.validated_token = token + ctx.authenticated_context = auth_context + return auth_context diff --git a/auth/auth_context_type.py b/auth/auth_context_type.py new file mode 100644 index 000000000..012222243 --- /dev/null +++ b/auth/auth_context_type.py @@ -0,0 +1,437 @@ +import logging + +from abc import ABCMeta, abstractmethod +from cachetools.func import lru_cache +from six import add_metaclass + +from app import app +from data import model + +from flask_principal import Identity, identity_changed + +from auth.auth_context import set_authenticated_context +from auth.context_entity import ContextEntityKind, CONTEXT_ENTITY_HANDLERS +from auth.permissions import QuayDeferredPermissionUser +from auth.scopes import scopes_from_scope_string + +logger = logging.getLogger(__name__) + +@add_metaclass(ABCMeta) +class AuthContext(object): + """ + Interface that represents the current context of authentication. + """ + + @property + @abstractmethod + def entity_kind(self): + """ Returns the kind of the entity in this auth context. """ + pass + + @property + @abstractmethod + def is_anonymous(self): + """ Returns true if this is an anonymous context. """ + pass + + @property + @abstractmethod + def authed_oauth_token(self): + """ Returns the authenticated OAuth token, if any. """ + pass + + @property + @abstractmethod + def authed_user(self): + """ Returns the authenticated user, whether directly, or via an OAuth or access token. Note that + this property will also return robot accounts. + """ + pass + + @property + @abstractmethod + def has_nonrobot_user(self): + """ Returns whether a user (not a robot) was authenticated successfully. """ + pass + + @property + @abstractmethod + def identity(self): + """ Returns the identity for the auth context. """ + pass + + @property + @abstractmethod + def description(self): + """ Returns a human-readable and *public* description of the current auth context. """ + pass + + @property + @abstractmethod + def credential_username(self): + """ Returns the username to create credentials for this context's entity, if any. """ + pass + + @abstractmethod + def analytics_id_and_public_metadata(self): + """ Returns the analytics ID and public log metadata for this auth context. """ + pass + + @abstractmethod + def apply_to_request_context(self): + """ Applies this auth result to the auth context and Flask-Principal. """ + pass + + @abstractmethod + def to_signed_dict(self): + """ Serializes the auth context into a dictionary suitable for inclusion in a JWT or other + form of signed serialization. + """ + pass + + @property + @abstractmethod + def unique_key(self): + """ Returns a key that is unique to this auth context type and its data. For example, an + instance of the auth context type for the user might be a string of the form + `user-{user-uuid}`. Callers should treat this key as opaque and not rely on the contents + for anything besides uniqueness. This is typically used by callers when they'd like to + check cache but not hit the database to get a fully validated auth context. + """ + pass + + +class ValidatedAuthContext(AuthContext): + """ ValidatedAuthContext represents the loaded, authenticated and validated auth information + for the current request context. + """ + def __init__(self, user=None, token=None, oauthtoken=None, robot=None, appspecifictoken=None, + signed_data=None): + # Note: These field names *MUST* match the string values of the kinds defined in + # ContextEntityKind. + self.user = user + self.robot = robot + self.token = token + self.oauthtoken = oauthtoken + self.appspecifictoken = appspecifictoken + self.signed_data = signed_data + + def tuple(self): + return vars(self).values() + + def __eq__(self, other): + return self.tuple() == other.tuple() + + @property + def entity_kind(self): + """ Returns the kind of the entity in this auth context. """ + for kind in ContextEntityKind: + if hasattr(self, kind.value) and getattr(self, kind.value): + return kind + + return ContextEntityKind.anonymous + + @property + def authed_user(self): + """ Returns the authenticated user, whether directly, or via an OAuth token. Note that this + will also return robot accounts. + """ + authed_user = self._authed_user() + if authed_user is not None and not authed_user.enabled: + logger.warning('Attempt to reference a disabled user/robot: %s', authed_user.username) + return None + + return authed_user + + @property + def authed_oauth_token(self): + return self.oauthtoken + + def _authed_user(self): + if self.oauthtoken: + return self.oauthtoken.authorized_user + + if self.appspecifictoken: + return self.appspecifictoken.user + + if self.signed_data: + return model.user.get_user(self.signed_data['user_context']) + + return self.user if self.user else self.robot + + @property + def is_anonymous(self): + """ Returns true if this is an anonymous context. """ + return not self.authed_user and not self.token and not self.signed_data + + @property + def has_nonrobot_user(self): + """ Returns whether a user (not a robot) was authenticated successfully. """ + return bool(self.authed_user and not self.robot) + + @property + def identity(self): + """ Returns the identity for the auth context. """ + if self.oauthtoken: + scope_set = scopes_from_scope_string(self.oauthtoken.scope) + return QuayDeferredPermissionUser.for_user(self.oauthtoken.authorized_user, scope_set) + + if self.authed_user: + return QuayDeferredPermissionUser.for_user(self.authed_user) + + if self.token: + return Identity(self.token.get_code(), 'token') + + if self.signed_data: + identity = Identity(None, 'signed_grant') + identity.provides.update(self.signed_data['grants']) + return identity + + return None + + @property + def entity_reference(self): + """ Returns the DB object reference for this context's entity. """ + if self.entity_kind == ContextEntityKind.anonymous: + return None + + return getattr(self, self.entity_kind.value) + + @property + def description(self): + """ Returns a human-readable and *public* description of the current auth context. """ + handler = CONTEXT_ENTITY_HANDLERS[self.entity_kind]() + return handler.description(self.entity_reference) + + @property + def credential_username(self): + """ Returns the username to create credentials for this context's entity, if any. """ + handler = CONTEXT_ENTITY_HANDLERS[self.entity_kind]() + return handler.credential_username(self.entity_reference) + + def analytics_id_and_public_metadata(self): + """ Returns the analytics ID and public log metadata for this auth context. """ + handler = CONTEXT_ENTITY_HANDLERS[self.entity_kind]() + return handler.analytics_id_and_public_metadata(self.entity_reference) + + def apply_to_request_context(self): + """ Applies this auth result to the auth context and Flask-Principal. """ + # Save to the request context. + set_authenticated_context(self) + + # Set the identity for Flask-Principal. + if self.identity: + identity_changed.send(app, identity=self.identity) + + @property + def unique_key(self): + signed_dict = self.to_signed_dict() + return '%s-%s' % (signed_dict['entity_kind'], signed_dict.get('entity_reference', '(anon)')) + + def to_signed_dict(self): + """ Serializes the auth context into a dictionary suitable for inclusion in a JWT or other + form of signed serialization. + """ + dict_data = { + 'version': 2, + 'entity_kind': self.entity_kind.value, + } + + if self.entity_kind != ContextEntityKind.anonymous: + handler = CONTEXT_ENTITY_HANDLERS[self.entity_kind]() + dict_data.update({ + 'entity_reference': handler.get_serialized_entity_reference(self.entity_reference), + }) + + # Add legacy information. + # TODO: Remove this all once the new code is fully deployed. + if self.token: + dict_data.update({ + 'kind': 'token', + 'token': self.token.code, + }) + + if self.oauthtoken: + dict_data.update({ + 'kind': 'oauth', + 'oauth': self.oauthtoken.uuid, + 'user': self.authed_user.username, + }) + + if self.user or self.robot: + dict_data.update({ + 'kind': 'user', + 'user': self.authed_user.username, + }) + + if self.appspecifictoken: + dict_data.update({ + 'kind': 'user', + 'user': self.authed_user.username, + }) + + if self.is_anonymous: + dict_data.update({ + 'kind': 'anonymous', + }) + + # End of legacy information. + return dict_data + +class SignedAuthContext(AuthContext): + """ SignedAuthContext represents an auth context loaded from a signed token of some kind, + such as a JWT. Unlike ValidatedAuthContext, SignedAuthContext operates lazily, only loading + the actual {user, robot, token, etc} when requested. This allows registry operations that + only need to check if *some* entity is present to do so, without hitting the database. + """ + def __init__(self, kind, signed_data, v1_dict_format): + self.kind = kind + self.signed_data = signed_data + self.v1_dict_format = v1_dict_format + + @property + def unique_key(self): + if self.v1_dict_format: + # Since V1 data format is verbose, just use the validated version to get the key. + return self._get_validated().unique_key + + signed_dict = self.signed_data + return '%s-%s' % (signed_dict['entity_kind'], signed_dict.get('entity_reference', '(anon)')) + + @classmethod + def build_from_signed_dict(cls, dict_data, v1_dict_format=False): + if not v1_dict_format: + entity_kind = ContextEntityKind(dict_data.get('entity_kind', 'anonymous')) + return SignedAuthContext(entity_kind, dict_data, v1_dict_format) + + # Legacy handling. + # TODO: Remove this all once the new code is fully deployed. + kind_string = dict_data.get('kind', 'anonymous') + if kind_string == 'oauth': + kind_string = 'oauthtoken' + + kind = ContextEntityKind(kind_string) + return SignedAuthContext(kind, dict_data, v1_dict_format) + + @lru_cache(maxsize=1) + def _get_validated(self): + """ Returns a ValidatedAuthContext for this signed context, resolving all the necessary + references. + """ + if not self.v1_dict_format: + if self.kind == ContextEntityKind.anonymous: + return ValidatedAuthContext() + + serialized_entity_reference = self.signed_data['entity_reference'] + handler = CONTEXT_ENTITY_HANDLERS[self.kind]() + entity_reference = handler.deserialize_entity_reference(serialized_entity_reference) + if entity_reference is None: + logger.debug('Could not deserialize entity reference `%s` under kind `%s`', + serialized_entity_reference, self.kind) + return ValidatedAuthContext() + + return ValidatedAuthContext(**{self.kind.value: entity_reference}) + + # Legacy handling. + # TODO: Remove this all once the new code is fully deployed. + kind_string = self.signed_data.get('kind', 'anonymous') + if kind_string == 'oauth': + kind_string = 'oauthtoken' + + kind = ContextEntityKind(kind_string) + if kind == ContextEntityKind.anonymous: + return ValidatedAuthContext() + + if kind == ContextEntityKind.user or kind == ContextEntityKind.robot: + user = model.user.get_user(self.signed_data.get('user', '')) + if not user: + return None + + return ValidatedAuthContext(robot=user) if user.robot else ValidatedAuthContext(user=user) + + if kind == ContextEntityKind.token: + token = model.token.load_token_data(self.signed_data.get('token')) + if not token: + return None + + return ValidatedAuthContext(token=token) + + if kind == ContextEntityKind.oauthtoken: + user = model.user.get_user(self.signed_data.get('user', '')) + if not user: + return None + + token_uuid = self.signed_data.get('oauth', '') + oauthtoken = model.oauth.lookup_access_token_for_user(user, token_uuid) + if not oauthtoken: + return None + + return ValidatedAuthContext(oauthtoken=oauthtoken) + + raise Exception('Unknown auth context kind `%s` when deserializing %s' % (kind, + self.signed_data)) + # End of legacy handling. + + @property + def entity_kind(self): + """ Returns the kind of the entity in this auth context. """ + return self.kind + + @property + def is_anonymous(self): + """ Returns true if this is an anonymous context. """ + return self.kind == ContextEntityKind.anonymous + + @property + def authed_user(self): + """ Returns the authenticated user, whether directly, or via an OAuth or access token. Note that + this property will also return robot accounts. + """ + if self.kind == ContextEntityKind.anonymous: + return None + + return self._get_validated().authed_user + + @property + def authed_oauth_token(self): + if self.kind == ContextEntityKind.anonymous: + return None + + return self._get_validated().authed_oauth_token + + @property + def has_nonrobot_user(self): + """ Returns whether a user (not a robot) was authenticated successfully. """ + if self.kind == ContextEntityKind.anonymous: + return False + + return self._get_validated().has_nonrobot_user + + @property + def identity(self): + """ Returns the identity for the auth context. """ + return self._get_validated().identity + + @property + def description(self): + """ Returns a human-readable and *public* description of the current auth context. """ + return self._get_validated().description + + @property + def credential_username(self): + """ Returns the username to create credentials for this context's entity, if any. """ + return self._get_validated().credential_username + + def analytics_id_and_public_metadata(self): + """ Returns the analytics ID and public log metadata for this auth context. """ + return self._get_validated().analytics_id_and_public_metadata() + + def apply_to_request_context(self): + """ Applies this auth result to the auth context and Flask-Principal. """ + return self._get_validated().apply_to_request_context() + + def to_signed_dict(self): + """ Serializes the auth context into a dictionary suitable for inclusion in a JWT or other + form of signed serialization. + """ + return self.signed_data diff --git a/auth/basic.py b/auth/basic.py new file mode 100644 index 000000000..926450ad6 --- /dev/null +++ b/auth/basic.py @@ -0,0 +1,58 @@ +import logging + +from base64 import b64decode +from flask import request + +from auth.credentials import validate_credentials +from auth.validateresult import ValidateResult, AuthKind + +logger = logging.getLogger(__name__) + +def has_basic_auth(username): + """ Returns true if a basic auth header exists with a username and password pair that validates + against the internal authentication system. Returns True on full success and False on any + failure (missing header, invalid header, invalid credentials, etc). + """ + auth_header = request.headers.get('authorization', '') + result = validate_basic_auth(auth_header) + return result.has_nonrobot_user and result.context.user.username == username + + +def validate_basic_auth(auth_header): + """ Validates the specified basic auth header, returning whether its credentials point + to a valid user or token. + """ + if not auth_header: + return ValidateResult(AuthKind.basic, missing=True) + + logger.debug('Attempt to process basic auth header') + + # Parse the basic auth header. + assert isinstance(auth_header, basestring) + credentials, err = _parse_basic_auth_header(auth_header) + if err is not None: + logger.debug('Got invalid basic auth header: %s', auth_header) + return ValidateResult(AuthKind.basic, missing=True) + + auth_username, auth_password_or_token = credentials + result, _ = validate_credentials(auth_username, auth_password_or_token) + return result.with_kind(AuthKind.basic) + + +def _parse_basic_auth_header(auth): + """ Parses the given basic auth header, returning the credentials found inside. + """ + normalized = [part.strip() for part in auth.split(' ') if part] + if normalized[0].lower() != 'basic' or len(normalized) != 2: + return None, 'Invalid basic auth header' + + try: + credentials = [part.decode('utf-8') for part in b64decode(normalized[1]).split(':', 1)] + except (TypeError, UnicodeDecodeError, ValueError): + logger.exception('Exception when parsing basic auth header: %s', auth) + return None, 'Could not parse basic auth header' + + if len(credentials) != 2: + return None, 'Unexpected number of credentials found in basic auth header' + + return credentials, None diff --git a/auth/context_entity.py b/auth/context_entity.py new file mode 100644 index 000000000..038624b0c --- /dev/null +++ b/auth/context_entity.py @@ -0,0 +1,203 @@ +from abc import ABCMeta, abstractmethod +from six import add_metaclass +from enum import Enum + +from data import model + +from auth.credential_consts import (ACCESS_TOKEN_USERNAME, OAUTH_TOKEN_USERNAME, + APP_SPECIFIC_TOKEN_USERNAME) + +class ContextEntityKind(Enum): + """ Defines the various kinds of entities in an auth context. Note that the string values of + these fields *must* match the names of the fields in the ValidatedAuthContext class, as + we fill them in directly based on the string names here. + """ + anonymous = 'anonymous' + user = 'user' + robot = 'robot' + token = 'token' + oauthtoken = 'oauthtoken' + appspecifictoken = 'appspecifictoken' + signed_data = 'signed_data' + + +@add_metaclass(ABCMeta) +class ContextEntityHandler(object): + """ + Interface that represents handling specific kinds of entities under an auth context. + """ + + @abstractmethod + def credential_username(self, entity_reference): + """ Returns the username to create credentials for this entity, if any. """ + pass + + @abstractmethod + def get_serialized_entity_reference(self, entity_reference): + """ Returns the entity reference for this kind of auth context, serialized into a form that can + be placed into a JSON object and put into a JWT. This is typically a DB UUID or another + unique identifier for the object in the DB. + """ + pass + + @abstractmethod + def deserialize_entity_reference(self, serialized_entity_reference): + """ Returns the deserialized reference to the entity in the database, or None if none. """ + pass + + @abstractmethod + def description(self, entity_reference): + """ Returns a human-readable and *public* description of the current entity. """ + pass + + @abstractmethod + def analytics_id_and_public_metadata(self, entity_reference): + """ Returns the analyitics ID and a dict of public metadata for the current entity. """ + pass + + +class AnonymousEntityHandler(ContextEntityHandler): + def credential_username(self, entity_reference): + return None + + def get_serialized_entity_reference(self, entity_reference): + return None + + def deserialize_entity_reference(self, serialized_entity_reference): + return None + + def description(self, entity_reference): + return "anonymous" + + def analytics_id_and_public_metadata(self, entity_reference): + return "anonymous", {} + + +class UserEntityHandler(ContextEntityHandler): + def credential_username(self, entity_reference): + return entity_reference.username + + def get_serialized_entity_reference(self, entity_reference): + return entity_reference.uuid + + def deserialize_entity_reference(self, serialized_entity_reference): + return model.user.get_user_by_uuid(serialized_entity_reference) + + def description(self, entity_reference): + return "user %s" % entity_reference.username + + def analytics_id_and_public_metadata(self, entity_reference): + return entity_reference.username, { + 'username': entity_reference.username, + } + + +class RobotEntityHandler(ContextEntityHandler): + def credential_username(self, entity_reference): + return entity_reference.username + + def get_serialized_entity_reference(self, entity_reference): + return entity_reference.username + + def deserialize_entity_reference(self, serialized_entity_reference): + return model.user.lookup_robot(serialized_entity_reference) + + def description(self, entity_reference): + return "robot %s" % entity_reference.username + + def analytics_id_and_public_metadata(self, entity_reference): + return entity_reference.username, { + 'username': entity_reference.username, + 'is_robot': True, + } + + +class TokenEntityHandler(ContextEntityHandler): + def credential_username(self, entity_reference): + return ACCESS_TOKEN_USERNAME + + def get_serialized_entity_reference(self, entity_reference): + return entity_reference.get_code() + + def deserialize_entity_reference(self, serialized_entity_reference): + return model.token.load_token_data(serialized_entity_reference) + + def description(self, entity_reference): + return "token %s" % entity_reference.friendly_name + + def analytics_id_and_public_metadata(self, entity_reference): + return 'token:%s' % entity_reference.id, { + 'token': entity_reference.friendly_name, + } + + +class OAuthTokenEntityHandler(ContextEntityHandler): + def credential_username(self, entity_reference): + return OAUTH_TOKEN_USERNAME + + def get_serialized_entity_reference(self, entity_reference): + return entity_reference.uuid + + def deserialize_entity_reference(self, serialized_entity_reference): + return model.oauth.lookup_access_token_by_uuid(serialized_entity_reference) + + def description(self, entity_reference): + return "oauthtoken for user %s" % entity_reference.authorized_user.username + + def analytics_id_and_public_metadata(self, entity_reference): + return 'oauthtoken:%s' % entity_reference.id, { + 'oauth_token_id': entity_reference.id, + 'oauth_token_application_id': entity_reference.application.client_id, + 'oauth_token_application': entity_reference.application.name, + 'username': entity_reference.authorized_user.username, + } + + +class AppSpecificTokenEntityHandler(ContextEntityHandler): + def credential_username(self, entity_reference): + return APP_SPECIFIC_TOKEN_USERNAME + + def get_serialized_entity_reference(self, entity_reference): + return entity_reference.uuid + + def deserialize_entity_reference(self, serialized_entity_reference): + return model.appspecifictoken.get_token_by_uuid(serialized_entity_reference) + + def description(self, entity_reference): + tpl = (entity_reference.title, entity_reference.user.username) + return "app specific token %s for user %s" % tpl + + def analytics_id_and_public_metadata(self, entity_reference): + return 'appspecifictoken:%s' % entity_reference.id, { + 'app_specific_token': entity_reference.uuid, + 'app_specific_token_title': entity_reference.title, + 'username': entity_reference.user.username, + } + + +class SignedDataEntityHandler(ContextEntityHandler): + def credential_username(self, entity_reference): + return None + + def get_serialized_entity_reference(self, entity_reference): + raise NotImplementedError + + def deserialize_entity_reference(self, serialized_entity_reference): + raise NotImplementedError + + def description(self, entity_reference): + return "signed" + + def analytics_id_and_public_metadata(self, entity_reference): + return 'signed', {'signed': entity_reference} + + +CONTEXT_ENTITY_HANDLERS = { + ContextEntityKind.anonymous: AnonymousEntityHandler, + ContextEntityKind.user: UserEntityHandler, + ContextEntityKind.robot: RobotEntityHandler, + ContextEntityKind.token: TokenEntityHandler, + ContextEntityKind.oauthtoken: OAuthTokenEntityHandler, + ContextEntityKind.appspecifictoken: AppSpecificTokenEntityHandler, + ContextEntityKind.signed_data: SignedDataEntityHandler, +} diff --git a/auth/cookie.py b/auth/cookie.py new file mode 100644 index 000000000..68ed0f8ee --- /dev/null +++ b/auth/cookie.py @@ -0,0 +1,37 @@ +import logging + +from uuid import UUID +from flask_login import current_user + +from auth.validateresult import AuthKind, ValidateResult + +logger = logging.getLogger(__name__) + +def validate_session_cookie(auth_header_unusued=None): + """ Attempts to load a user from a session cookie. """ + if current_user.is_anonymous: + return ValidateResult(AuthKind.cookie, missing=True) + + try: + # Attempt to parse the user uuid to make sure the cookie has the right value type + UUID(current_user.get_id()) + except ValueError: + logger.debug('Got non-UUID for session cookie user: %s', current_user.get_id()) + return ValidateResult(AuthKind.cookie, error_message='Invalid session cookie format') + + logger.debug('Loading user from cookie: %s', current_user.get_id()) + db_user = current_user.db_user() + if db_user is None: + return ValidateResult(AuthKind.cookie, error_message='Could not find matching user') + + # Don't allow disabled users to login. + if not db_user.enabled: + logger.debug('User %s in session cookie is disabled', db_user.username) + return ValidateResult(AuthKind.cookie, error_message='User account is disabled') + + # Don't allow organizations to "login". + if db_user.organization: + logger.debug('User %s in session cookie is in-fact organization', db_user.username) + return ValidateResult(AuthKind.cookie, error_message='Cannot login to organization') + + return ValidateResult(AuthKind.cookie, user=db_user) diff --git a/auth/credential_consts.py b/auth/credential_consts.py new file mode 100644 index 000000000..dda9834d1 --- /dev/null +++ b/auth/credential_consts.py @@ -0,0 +1,3 @@ +ACCESS_TOKEN_USERNAME = '$token' +OAUTH_TOKEN_USERNAME = '$oauthtoken' +APP_SPECIFIC_TOKEN_USERNAME = '$app' diff --git a/auth/credentials.py b/auth/credentials.py new file mode 100644 index 000000000..5d8c8b4dd --- /dev/null +++ b/auth/credentials.py @@ -0,0 +1,85 @@ +import logging + +from enum import Enum + +import features + +from app import authentication +from auth.oauth import validate_oauth_token +from auth.validateresult import ValidateResult, AuthKind +from auth.credential_consts import (ACCESS_TOKEN_USERNAME, OAUTH_TOKEN_USERNAME, + APP_SPECIFIC_TOKEN_USERNAME) +from data import model +from util.names import parse_robot_username + +logger = logging.getLogger(__name__) + + +class CredentialKind(Enum): + user = 'user' + robot = 'robot' + token = ACCESS_TOKEN_USERNAME + oauth_token = OAUTH_TOKEN_USERNAME + app_specific_token = APP_SPECIFIC_TOKEN_USERNAME + + +def validate_credentials(auth_username, auth_password_or_token): + """ Validates a pair of auth username and password/token credentials. """ + # Check for access tokens. + if auth_username == ACCESS_TOKEN_USERNAME: + logger.debug('Found credentials for access token') + try: + token = model.token.load_token_data(auth_password_or_token) + logger.debug('Successfully validated credentials for access token %s', token.id) + return ValidateResult(AuthKind.credentials, token=token), CredentialKind.token + except model.DataModelException: + logger.warning('Failed to validate credentials for access token %s', auth_password_or_token) + return (ValidateResult(AuthKind.credentials, error_message='Invalid access token'), + CredentialKind.token) + + # Check for App Specific tokens. + if features.APP_SPECIFIC_TOKENS and auth_username == APP_SPECIFIC_TOKEN_USERNAME: + logger.debug('Found credentials for app specific auth token') + token = model.appspecifictoken.access_valid_token(auth_password_or_token) + if token is None: + logger.debug('Failed to validate credentials for app specific token: %s', + auth_password_or_token) + return (ValidateResult(AuthKind.credentials, error_message='Invalid token'), + CredentialKind.app_specific_token) + + if not token.user.enabled: + logger.debug('Tried to use an app specific token for a disabled user: %s', + token.uuid) + return (ValidateResult(AuthKind.credentials, + error_message='This user has been disabled. Please contact your administrator.'), + CredentialKind.app_specific_token) + + logger.debug('Successfully validated credentials for app specific token %s', token.id) + return (ValidateResult(AuthKind.credentials, appspecifictoken=token), + CredentialKind.app_specific_token) + + # Check for OAuth tokens. + if auth_username == OAUTH_TOKEN_USERNAME: + return validate_oauth_token(auth_password_or_token), CredentialKind.oauth_token + + # Check for robots and users. + is_robot = parse_robot_username(auth_username) + if is_robot: + logger.debug('Found credentials header for robot %s', auth_username) + try: + robot = model.user.verify_robot(auth_username, auth_password_or_token) + logger.debug('Successfully validated credentials for robot %s', auth_username) + return ValidateResult(AuthKind.credentials, robot=robot), CredentialKind.robot + except model.InvalidRobotException as ire: + logger.warning('Failed to validate credentials for robot %s: %s', auth_username, ire) + return ValidateResult(AuthKind.credentials, error_message=str(ire)), CredentialKind.robot + + # Otherwise, treat as a standard user. + (authenticated, err) = authentication.verify_and_link_user(auth_username, auth_password_or_token, + basic_auth=True) + if authenticated: + logger.debug('Successfully validated credentials for user %s', authenticated.username) + return ValidateResult(AuthKind.credentials, user=authenticated), CredentialKind.user + else: + logger.warning('Failed to validate credentials for user %s: %s', auth_username, err) + return ValidateResult(AuthKind.credentials, error_message=err), CredentialKind.user diff --git a/auth/decorators.py b/auth/decorators.py new file mode 100644 index 000000000..5fc966140 --- /dev/null +++ b/auth/decorators.py @@ -0,0 +1,96 @@ +import logging + +from functools import wraps +from flask import request, session + +from app import metric_queue +from auth.basic import validate_basic_auth +from auth.oauth import validate_bearer_auth +from auth.cookie import validate_session_cookie +from auth.signedgrant import validate_signed_grant + +from util.http import abort + + +logger = logging.getLogger(__name__) + +def _auth_decorator(pass_result=False, handlers=None): + """ Builds an auth decorator that runs the given handlers and, if any return successfully, + sets up the auth context. The wrapped function will be invoked *regardless of success or + failure of the auth handler(s)* + """ + def processor(func): + @wraps(func) + def wrapper(*args, **kwargs): + auth_header = request.headers.get('authorization', '') + result = None + + for handler in handlers: + result = handler(auth_header) + # If the handler was missing the necessary information, skip it and try the next one. + if result.missing: + continue + + # Check for a valid result. + if result.auth_valid: + logger.debug('Found valid auth result: %s', result.tuple()) + + # Set the various pieces of the auth context. + result.apply_to_context() + + # Log the metric. + metric_queue.authentication_count.Inc(labelvalues=[result.kind, True]) + break + + # Otherwise, report the error. + if result.error_message is not None: + # Log the failure. + metric_queue.authentication_count.Inc(labelvalues=[result.kind, False]) + break + + if pass_result: + kwargs['auth_result'] = result + + return func(*args, **kwargs) + return wrapper + return processor + + +process_oauth = _auth_decorator(handlers=[validate_bearer_auth, validate_session_cookie]) +process_auth = _auth_decorator(handlers=[validate_signed_grant, validate_basic_auth]) +process_auth_or_cookie = _auth_decorator(handlers=[validate_basic_auth, validate_session_cookie]) +process_basic_auth = _auth_decorator(handlers=[validate_basic_auth], pass_result=True) +process_basic_auth_no_pass = _auth_decorator(handlers=[validate_basic_auth]) + + +def require_session_login(func): + """ Decorates a function and ensures that a valid session cookie exists or a 401 is raised. If + a valid session cookie does exist, the authenticated user and identity are also set. + """ + @wraps(func) + def wrapper(*args, **kwargs): + result = validate_session_cookie() + if result.has_nonrobot_user: + result.apply_to_context() + metric_queue.authentication_count.Inc(labelvalues=[result.kind, True]) + return func(*args, **kwargs) + elif not result.missing: + metric_queue.authentication_count.Inc(labelvalues=[result.kind, False]) + + abort(401, message='Method requires login and no valid login could be loaded.') + return wrapper + + +def extract_namespace_repo_from_session(func): + """ Extracts the namespace and repository name from the current session (which must exist) + and passes them into the decorated function as the first and second arguments. If the + session doesn't exist or does not contain these arugments, a 400 error is raised. + """ + @wraps(func) + def wrapper(*args, **kwargs): + if 'namespace' not in session or 'repository' not in session: + logger.error('Unable to load namespace or repository from session: %s', session) + abort(400, message='Missing namespace in request') + + return func(session['namespace'], session['repository'], *args, **kwargs) + return wrapper diff --git a/auth/oauth.py b/auth/oauth.py new file mode 100644 index 000000000..aaea92831 --- /dev/null +++ b/auth/oauth.py @@ -0,0 +1,48 @@ +import logging + +from datetime import datetime + +from auth.scopes import scopes_from_scope_string +from auth.validateresult import AuthKind, ValidateResult +from data import model + +logger = logging.getLogger(__name__) + +def validate_bearer_auth(auth_header): + """ Validates an OAuth token found inside a basic auth `Bearer` token, returning whether it + points to a valid OAuth token. + """ + if not auth_header: + return ValidateResult(AuthKind.oauth, missing=True) + + normalized = [part.strip() for part in auth_header.split(' ') if part] + if normalized[0].lower() != 'bearer' or len(normalized) != 2: + logger.debug('Got invalid bearer token format: %s', auth_header) + return ValidateResult(AuthKind.oauth, missing=True) + + (_, oauth_token) = normalized + return validate_oauth_token(oauth_token) + + +def validate_oauth_token(token): + """ Validates the specified OAuth token, returning whether it points to a valid OAuth token. + """ + validated = model.oauth.validate_access_token(token) + if not validated: + logger.warning('OAuth access token could not be validated: %s', token) + return ValidateResult(AuthKind.oauth, + error_message='OAuth access token could not be validated') + + if validated.expires_at <= datetime.utcnow(): + logger.warning('OAuth access with an expired token: %s', token) + return ValidateResult(AuthKind.oauth, error_message='OAuth access token has expired') + + # Don't allow disabled users to login. + if not validated.authorized_user.enabled: + return ValidateResult(AuthKind.oauth, + error_message='Granter of the oauth access token is disabled') + + # We have a valid token + scope_set = scopes_from_scope_string(validated.scope) + logger.debug('Successfully validated oauth access token with scope: %s', scope_set) + return ValidateResult(AuthKind.oauth, oauthtoken=validated) diff --git a/auth/permissions.py b/auth/permissions.py index 641cfce33..c967aa046 100644 --- a/auth/permissions.py +++ b/auth/permissions.py @@ -1,13 +1,14 @@ import logging -from flask.ext.principal import identity_loaded, Permission, Identity, identity_changed from collections import namedtuple, defaultdict from functools import partial -import scopes +from flask_principal import identity_loaded, Permission, Identity, identity_changed + -from data import model from app import app, superusers +from auth import scopes +from data import model logger = logging.getLogger(__name__) @@ -29,10 +30,10 @@ REPO_ROLES = [None, 'read', 'write', 'admin'] TEAM_ROLES = [None, 'member', 'creator', 'admin'] USER_ROLES = [None, 'read', 'admin'] -TEAM_REPO_ROLES = { +TEAM_ORGWIDE_REPO_ROLES = { 'admin': 'admin', - 'creator': 'read', - 'member': 'read', + 'creator': None, + 'member': None, } SCOPE_MAX_REPO_ROLES = defaultdict(lambda: None) @@ -54,6 +55,7 @@ SCOPE_MAX_USER_ROLES = defaultdict(lambda: None) SCOPE_MAX_USER_ROLES.update({ scopes.READ_USER: 'read', scopes.DIRECT_LOGIN: 'admin', + scopes.ADMIN_USER: 'admin', }) def repository_read_grant(namespace, repository): @@ -64,6 +66,10 @@ def repository_write_grant(namespace, repository): return _RepositoryNeed(namespace, repository, 'write') +def repository_admin_grant(namespace, repository): + return _RepositoryNeed(namespace, repository, 'admin') + + class QuayDeferredPermissionUser(Identity): def __init__(self, uuid, auth_type, auth_scopes, user=None): super(QuayDeferredPermissionUser, self).__init__(uuid, auth_type) @@ -142,7 +148,7 @@ class QuayDeferredPermissionUser(Identity): logger.debug('Organization team added permission: {0}'.format(team_org_grant)) self.provides.add(team_org_grant) - team_repo_role = TEAM_REPO_ROLES[team.role.name] + team_repo_role = TEAM_ORGWIDE_REPO_ROLES[team.role.name] org_repo_grant = _OrganizationRepoNeed(team.organization.username, self._repo_role_for_scopes(team_repo_role)) logger.debug('Organization team added repo permission: {0}'.format(org_repo_grant)) diff --git a/auth/registry_jwt_auth.py b/auth/registry_jwt_auth.py index 79f240187..75be63d73 100644 --- a/auth/registry_jwt_auth.py +++ b/auth/registry_jwt_auth.py @@ -1,31 +1,24 @@ import logging -import re from functools import wraps from jsonschema import validate, ValidationError from flask import request, url_for -from flask.ext.principal import identity_changed, Identity -from cryptography.x509 import load_pem_x509_certificate -from cryptography.hazmat.backends import default_backend -from cachetools import lru_cache +from flask_principal import identity_changed, Identity -from app import app, get_app_url -from .auth_context import set_grant_context, get_grant_context -from .permissions import repository_read_grant, repository_write_grant -from util.names import parse_namespace_repository +from app import app, get_app_url, instance_keys, metric_queue +from auth.auth_context import set_authenticated_context +from auth.auth_context_type import SignedAuthContext +from auth.permissions import repository_read_grant, repository_write_grant, repository_admin_grant from util.http import abort -from util.security import strictjwt -from util.security.registry_jwt import ANONYMOUS_SUB -from data import model +from util.names import parse_namespace_repository +from util.security.registry_jwt import (ANONYMOUS_SUB, decode_bearer_header, + InvalidBearerTokenException) logger = logging.getLogger(__name__) -TOKEN_REGEX = re.compile(r'^Bearer (([a-zA-Z0-9+/]+\.)+[a-zA-Z0-9+-_/]+)$') -CONTEXT_KINDS = ['user', 'token', 'oauth'] - ACCESS_SCHEMA = { 'type': 'array', 'description': 'List of access granted to the subject', @@ -56,6 +49,7 @@ ACCESS_SCHEMA = { 'enum': [ 'push', 'pull', + '*', ], }, }, @@ -68,63 +62,6 @@ class InvalidJWTException(Exception): pass -class GrantedEntity(object): - def __init__(self, user=None, token=None, oauth=None): - self.user = user - self.token = token - self.oauth = oauth - - -def get_granted_entity(): - """ Returns the entity granted in the current context, if any. Returns the GrantedEntity or None - if none. - """ - context = get_grant_context() - if not context: - return None - - kind = context.get('kind', 'anonymous') - - if not kind in CONTEXT_KINDS: - return None - - if kind == 'user': - user = model.user.get_user(context.get('user', '')) - if not user: - return None - - return GrantedEntity(user=user) - - if kind == 'token': - token = model.token.load_token_data(context.get('token')) - if not token: - return None - - return GrantedEntity(token=token) - - if kind == 'oauth': - user = model.user.get_user(context.get('user', '')) - if not user: - return None - - oauthtoken = model.oauth.lookup_access_token_for_user(user, context.get('oauth', '')) - if not oauthtoken: - return None - - return GrantedEntity(oauth=oauthtoken, user=user) - - return None - - -def get_granted_username(): - """ Returns the username inside the grant, if any. """ - granted = get_granted_entity() - if not granted or not granted.user: - return None - - return granted.user.username - - def get_auth_headers(repository=None, scopes=None): """ Returns a dictionary of headers for auth responses. """ headers = {} @@ -133,43 +70,30 @@ def get_auth_headers(repository=None, scopes=None): realm_auth_path, app.config['SERVER_HOSTNAME']) if repository: - authenticate += ',scope=repository:{0}'.format(repository) + scopes_string = "repository:{0}".format(repository) if scopes: - authenticate += ':' + ','.join(scopes) + scopes_string += ':' + ','.join(scopes) + + authenticate += ',scope="{0}"'.format(scopes_string) headers['WWW-Authenticate'] = authenticate headers['Docker-Distribution-API-Version'] = 'registry/2.0' return headers -def identity_from_bearer_token(bearer_token, max_signed_s, public_key): - """ Process a bearer token and return the loaded identity, or raise InvalidJWTException if an +def identity_from_bearer_token(bearer_header): + """ Process a bearer header and return the loaded identity, or raise InvalidJWTException if an identity could not be loaded. Expects tokens and grants in the format of the Docker registry v2 auth spec: https://docs.docker.com/registry/spec/auth/token/ """ - logger.debug('Validating auth header: %s', bearer_token) + logger.debug('Validating auth header: %s', bearer_header) - # Extract the jwt token from the header - match = TOKEN_REGEX.match(bearer_token) - if match is None: - raise InvalidJWTException('Invalid bearer token format') - - encoded = match.group(1) - logger.debug('encoded JWT: %s', encoded) - - # Load the JWT returned. try: - expected_issuer = app.config['JWT_AUTH_TOKEN_ISSUER'] - audience = app.config['SERVER_HOSTNAME'] - max_exp = strictjwt.exp_max_s_option(max_signed_s) - payload = strictjwt.decode(encoded, public_key, algorithms=['RS256'], audience=audience, - issuer=expected_issuer, options=max_exp) - except strictjwt.InvalidTokenError: - logger.exception('Invalid token reason') - raise InvalidJWTException('Invalid token') - - if not 'sub' in payload: - raise InvalidJWTException('Missing sub field in JWT') + payload = decode_bearer_header(bearer_header, instance_keys, app.config, + metric_queue=metric_queue) + except InvalidBearerTokenException as bte: + logger.exception('Invalid bearer token: %s', bte) + raise InvalidJWTException(bte) loaded_identity = Identity(payload['sub'], 'signed_jwt') @@ -185,7 +109,9 @@ def identity_from_bearer_token(bearer_token, max_signed_s, public_key): for grant in payload['access']: namespace, repo_name = parse_namespace_repository(grant['name'], lib_namespace) - if 'push' in grant['actions']: + if '*' in grant['actions']: + loaded_identity.provides.add(repository_admin_grant(namespace, repo_name)) + elif 'push' in grant['actions']: loaded_identity.provides.add(repository_write_grant(namespace, repo_name)) elif 'pull' in grant['actions']: loaded_identity.provides.add(repository_read_grant(namespace, repo_name)) @@ -203,31 +129,26 @@ def identity_from_bearer_token(bearer_token, max_signed_s, public_key): return loaded_identity, payload.get('context', default_context) -@lru_cache(maxsize=1) -def load_public_key(certificate_file_path): - with open(certificate_file_path) as cert_file: - cert_obj = load_pem_x509_certificate(cert_file.read(), default_backend()) - return cert_obj.public_key() - - def process_registry_jwt_auth(scopes=None): + """ Processes the registry JWT auth token found in the authorization header. If none found, + no error is returned. If an invalid token is found, raises a 401. + """ def inner(func): @wraps(func) def wrapper(*args, **kwargs): logger.debug('Called with params: %s, %s', args, kwargs) auth = request.headers.get('authorization', '').strip() if auth: - max_signature_seconds = app.config.get('JWT_AUTH_MAX_FRESH_S', 3660) - certificate_file_path = app.config['JWT_AUTH_CERTIFICATE_PATH'] - public_key = load_public_key(certificate_file_path) - try: - extracted_identity, context = identity_from_bearer_token(auth, max_signature_seconds, - public_key) - + extracted_identity, context_dict = identity_from_bearer_token(auth) identity_changed.send(app, identity=extracted_identity) - set_grant_context(context) logger.debug('Identity changed to %s', extracted_identity.id) + + auth_context = SignedAuthContext.build_from_signed_dict(context_dict) + if auth_context is not None: + logger.debug('Auth context set to %s', auth_context.signed_data) + set_authenticated_context(auth_context) + except InvalidJWTException as ije: repository = None if 'namespace_name' in kwargs and 'repo_name' in kwargs: diff --git a/auth/scopes.py b/auth/scopes.py index c41ad7033..dbbb0ae1c 100644 --- a/auth/scopes.py +++ b/auth/scopes.py @@ -96,11 +96,10 @@ IMPLIED_SCOPES = { def app_scopes(app_config): + scopes_from_config = dict(ALL_SCOPES) if not app_config.get('FEATURE_SUPER_USERS', False): - scopes_from_config = dict(ALL_SCOPES) del scopes_from_config[SUPERUSER.scope] - return scopes_from_config - return ALL_SCOPES + return scopes_from_config def scopes_from_scope_string(scopes): diff --git a/auth/signedgrant.py b/auth/signedgrant.py new file mode 100644 index 000000000..b8169114d --- /dev/null +++ b/auth/signedgrant.py @@ -0,0 +1,55 @@ +import logging + +from flask.sessions import SecureCookieSessionInterface, BadSignature + +from app import app +from auth.validateresult import AuthKind, ValidateResult + +logger = logging.getLogger(__name__) + +# The prefix for all signatures of signed granted. +SIGNATURE_PREFIX = 'sigv2=' + +def generate_signed_token(grants, user_context): + """ Generates a signed session token with the given grants and user context. """ + ser = SecureCookieSessionInterface().get_signing_serializer(app) + data_to_sign = { + 'grants': grants, + 'user_context': user_context, + } + + encrypted = ser.dumps(data_to_sign) + return '{0}{1}'.format(SIGNATURE_PREFIX, encrypted) + + +def validate_signed_grant(auth_header): + """ Validates a signed grant as found inside an auth header and returns whether it points to + a valid grant. + """ + if not auth_header: + return ValidateResult(AuthKind.signed_grant, missing=True) + + # Try to parse the token from the header. + normalized = [part.strip() for part in auth_header.split(' ') if part] + if normalized[0].lower() != 'token' or len(normalized) != 2: + logger.debug('Not a token: %s', auth_header) + return ValidateResult(AuthKind.signed_grant, missing=True) + + # Check that it starts with the expected prefix. + if not normalized[1].startswith(SIGNATURE_PREFIX): + logger.debug('Not a signed grant token: %s', auth_header) + return ValidateResult(AuthKind.signed_grant, missing=True) + + # Decrypt the grant. + encrypted = normalized[1][len(SIGNATURE_PREFIX):] + ser = SecureCookieSessionInterface().get_signing_serializer(app) + + try: + token_data = ser.loads(encrypted, max_age=app.config['SIGNED_GRANT_EXPIRATION_SEC']) + except BadSignature: + logger.warning('Signed grant could not be validated: %s', encrypted) + return ValidateResult(AuthKind.signed_grant, + error_message='Signed grant could not be validated') + + logger.debug('Successfully validated signed grant with data: %s', token_data) + return ValidateResult(AuthKind.signed_grant, signed_data=token_data) diff --git a/auth/test/test_auth_context_type.py b/auth/test/test_auth_context_type.py new file mode 100644 index 000000000..7778d7f90 --- /dev/null +++ b/auth/test/test_auth_context_type.py @@ -0,0 +1,51 @@ +import pytest + +from auth.auth_context_type import SignedAuthContext, ValidatedAuthContext, ContextEntityKind +from data import model, database + +from test.fixtures import * + +def get_oauth_token(_): + return database.OAuthAccessToken.get() + + +@pytest.mark.parametrize('kind, entity_reference, loader', [ + (ContextEntityKind.anonymous, None, None), + (ContextEntityKind.appspecifictoken, '%s%s' % ('a' * 60, 'b' * 60), + model.appspecifictoken.access_valid_token), + (ContextEntityKind.oauthtoken, None, get_oauth_token), + (ContextEntityKind.robot, 'devtable+dtrobot', model.user.lookup_robot), + (ContextEntityKind.user, 'devtable', model.user.get_user), +]) +@pytest.mark.parametrize('v1_dict_format', [ + (True), + (False), +]) +def test_signed_auth_context(kind, entity_reference, loader, v1_dict_format, initialized_db): + if kind == ContextEntityKind.anonymous: + validated = ValidatedAuthContext() + assert validated.is_anonymous + else: + ref = loader(entity_reference) + validated = ValidatedAuthContext(**{kind.value: ref}) + assert not validated.is_anonymous + + assert validated.entity_kind == kind + assert validated.unique_key + + signed = SignedAuthContext.build_from_signed_dict(validated.to_signed_dict(), + v1_dict_format=v1_dict_format) + + if not v1_dict_format: + # Under legacy V1 format, we don't track the app specific token, merely its associated user. + assert signed.entity_kind == kind + assert signed.description == validated.description + assert signed.credential_username == validated.credential_username + assert signed.analytics_id_and_public_metadata() == validated.analytics_id_and_public_metadata() + assert signed.unique_key == validated.unique_key + + assert signed.is_anonymous == validated.is_anonymous + assert signed.authed_user == validated.authed_user + assert signed.has_nonrobot_user == validated.has_nonrobot_user + + assert signed.to_signed_dict() == validated.to_signed_dict() diff --git a/auth/test/test_basic.py b/auth/test/test_basic.py new file mode 100644 index 000000000..24279b4b2 --- /dev/null +++ b/auth/test/test_basic.py @@ -0,0 +1,98 @@ +# -*- coding: utf-8 -*- + +import pytest + +from base64 import b64encode + +from auth.basic import validate_basic_auth +from auth.credentials import (ACCESS_TOKEN_USERNAME, OAUTH_TOKEN_USERNAME, + APP_SPECIFIC_TOKEN_USERNAME) +from auth.validateresult import AuthKind, ValidateResult +from data import model + +from test.fixtures import * + + +def _token(username, password): + assert isinstance(username, basestring) + assert isinstance(password, basestring) + return 'basic ' + b64encode('%s:%s' % (username, password)) + + +@pytest.mark.parametrize('token, expected_result', [ + ('', ValidateResult(AuthKind.basic, missing=True)), + ('someinvalidtoken', ValidateResult(AuthKind.basic, missing=True)), + ('somefoobartoken', ValidateResult(AuthKind.basic, missing=True)), + ('basic ', ValidateResult(AuthKind.basic, missing=True)), + ('basic some token', ValidateResult(AuthKind.basic, missing=True)), + ('basic sometoken', ValidateResult(AuthKind.basic, missing=True)), + (_token(APP_SPECIFIC_TOKEN_USERNAME, 'invalid'), ValidateResult(AuthKind.basic, + error_message='Invalid token')), + (_token(ACCESS_TOKEN_USERNAME, 'invalid'), ValidateResult(AuthKind.basic, + error_message='Invalid access token')), + (_token(OAUTH_TOKEN_USERNAME, 'invalid'), + ValidateResult(AuthKind.basic, error_message='OAuth access token could not be validated')), + (_token('devtable', 'invalid'), ValidateResult(AuthKind.basic, + error_message='Invalid Username or Password')), + (_token('devtable+somebot', 'invalid'), ValidateResult( + AuthKind.basic, error_message='Could not find robot with username: devtable+somebot')), + (_token('disabled', 'password'), ValidateResult( + AuthKind.basic, + error_message='This user has been disabled. Please contact your administrator.')),]) +def test_validate_basic_auth_token(token, expected_result, app): + result = validate_basic_auth(token) + assert result == expected_result + + +def test_valid_user(app): + token = _token('devtable', 'password') + result = validate_basic_auth(token) + assert result == ValidateResult(AuthKind.basic, user=model.user.get_user('devtable')) + + +def test_valid_robot(app): + robot, password = model.user.create_robot('somerobot', model.user.get_user('devtable')) + token = _token(robot.username, password) + result = validate_basic_auth(token) + assert result == ValidateResult(AuthKind.basic, robot=robot) + + +def test_valid_token(app): + access_token = model.token.create_delegate_token('devtable', 'simple', 'sometoken') + token = _token(ACCESS_TOKEN_USERNAME, access_token.get_code()) + result = validate_basic_auth(token) + assert result == ValidateResult(AuthKind.basic, token=access_token) + + +def test_valid_oauth(app): + user = model.user.get_user('devtable') + app = model.oauth.list_applications_for_org(model.user.get_user_or_org('buynlarge'))[0] + oauth_token, code = model.oauth.create_access_token_for_testing(user, app.client_id, 'repo:read') + token = _token(OAUTH_TOKEN_USERNAME, code) + result = validate_basic_auth(token) + assert result == ValidateResult(AuthKind.basic, oauthtoken=oauth_token) + + +def test_valid_app_specific_token(app): + user = model.user.get_user('devtable') + app_specific_token = model.appspecifictoken.create_token(user, 'some token') + full_token = model.appspecifictoken.get_full_token_string(app_specific_token) + token = _token(APP_SPECIFIC_TOKEN_USERNAME, full_token) + result = validate_basic_auth(token) + assert result == ValidateResult(AuthKind.basic, appspecifictoken=app_specific_token) + + +def test_invalid_unicode(app): + token = '\xebOH' + header = 'basic ' + b64encode(token) + result = validate_basic_auth(header) + assert result == ValidateResult(AuthKind.basic, missing=True) + + +def test_invalid_unicode_2(app): + token = '“4JPCOLIVMAY32Q3XGVPHC4CBF8SKII5FWNYMASOFDIVSXTC5I5NBU”' + header = 'basic ' + b64encode('devtable+somerobot:%s' % token) + result = validate_basic_auth(header) + assert result == ValidateResult( + AuthKind.basic, + error_message='Could not find robot with username: devtable+somerobot and supplied password.') diff --git a/auth/test/test_cookie.py b/auth/test/test_cookie.py new file mode 100644 index 000000000..8c212d709 --- /dev/null +++ b/auth/test/test_cookie.py @@ -0,0 +1,66 @@ +import uuid + +from flask_login import login_user + +from app import LoginWrappedDBUser +from data import model +from auth.cookie import validate_session_cookie +from test.fixtures import * + + +def test_anonymous_cookie(app): + assert validate_session_cookie().missing + + +def test_invalidformatted_cookie(app): + # "Login" with a non-UUID reference. + someuser = model.user.get_user('devtable') + login_user(LoginWrappedDBUser('somenonuuid', someuser)) + + # Ensure we get an invalid session cookie format error. + result = validate_session_cookie() + assert result.authed_user is None + assert result.context.identity is None + assert not result.has_nonrobot_user + assert result.error_message == 'Invalid session cookie format' + + +def test_disabled_user(app): + # "Login" with a disabled user. + someuser = model.user.get_user('disabled') + login_user(LoginWrappedDBUser(someuser.uuid, someuser)) + + # Ensure we get an invalid session cookie format error. + result = validate_session_cookie() + assert result.authed_user is None + assert result.context.identity is None + assert not result.has_nonrobot_user + assert result.error_message == 'User account is disabled' + + +def test_valid_user(app): + # Login with a valid user. + someuser = model.user.get_user('devtable') + login_user(LoginWrappedDBUser(someuser.uuid, someuser)) + + result = validate_session_cookie() + assert result.authed_user == someuser + assert result.context.identity is not None + assert result.has_nonrobot_user + assert result.error_message is None + + +def test_valid_organization(app): + # "Login" with a valid organization. + someorg = model.user.get_namespace_user('buynlarge') + someorg.uuid = str(uuid.uuid4()) + someorg.verified = True + someorg.save() + + login_user(LoginWrappedDBUser(someorg.uuid, someorg)) + + result = validate_session_cookie() + assert result.authed_user is None + assert result.context.identity is None + assert not result.has_nonrobot_user + assert result.error_message == 'Cannot login to organization' diff --git a/auth/test/test_credentials.py b/auth/test/test_credentials.py new file mode 100644 index 000000000..4e55c470c --- /dev/null +++ b/auth/test/test_credentials.py @@ -0,0 +1,147 @@ +# -*- coding: utf-8 -*- + +from auth.credentials import validate_credentials, CredentialKind +from auth.credential_consts import (ACCESS_TOKEN_USERNAME, OAUTH_TOKEN_USERNAME, + APP_SPECIFIC_TOKEN_USERNAME) +from auth.validateresult import AuthKind, ValidateResult +from data import model + +from test.fixtures import * + +def test_valid_user(app): + result, kind = validate_credentials('devtable', 'password') + assert kind == CredentialKind.user + assert result == ValidateResult(AuthKind.credentials, user=model.user.get_user('devtable')) + +def test_valid_robot(app): + robot, password = model.user.create_robot('somerobot', model.user.get_user('devtable')) + result, kind = validate_credentials(robot.username, password) + assert kind == CredentialKind.robot + assert result == ValidateResult(AuthKind.credentials, robot=robot) + +def test_valid_robot_for_disabled_user(app): + user = model.user.get_user('devtable') + user.enabled = False + user.save() + + robot, password = model.user.create_robot('somerobot', user) + result, kind = validate_credentials(robot.username, password) + assert kind == CredentialKind.robot + + err = 'This user has been disabled. Please contact your administrator.' + assert result == ValidateResult(AuthKind.credentials, error_message=err) + +def test_valid_token(app): + access_token = model.token.create_delegate_token('devtable', 'simple', 'sometoken') + result, kind = validate_credentials(ACCESS_TOKEN_USERNAME, access_token.get_code()) + assert kind == CredentialKind.token + assert result == ValidateResult(AuthKind.credentials, token=access_token) + +def test_valid_oauth(app): + user = model.user.get_user('devtable') + app = model.oauth.list_applications_for_org(model.user.get_user_or_org('buynlarge'))[0] + oauth_token, code = model.oauth.create_access_token_for_testing(user, app.client_id, 'repo:read') + result, kind = validate_credentials(OAUTH_TOKEN_USERNAME, code) + assert kind == CredentialKind.oauth_token + assert result == ValidateResult(AuthKind.oauth, oauthtoken=oauth_token) + +def test_invalid_user(app): + result, kind = validate_credentials('devtable', 'somepassword') + assert kind == CredentialKind.user + assert result == ValidateResult(AuthKind.credentials, + error_message='Invalid Username or Password') + +def test_valid_app_specific_token(app): + user = model.user.get_user('devtable') + app_specific_token = model.appspecifictoken.create_token(user, 'some token') + full_token = model.appspecifictoken.get_full_token_string(app_specific_token) + result, kind = validate_credentials(APP_SPECIFIC_TOKEN_USERNAME, full_token) + assert kind == CredentialKind.app_specific_token + assert result == ValidateResult(AuthKind.credentials, appspecifictoken=app_specific_token) + +def test_valid_app_specific_token_for_disabled_user(app): + user = model.user.get_user('devtable') + user.enabled = False + user.save() + + app_specific_token = model.appspecifictoken.create_token(user, 'some token') + full_token = model.appspecifictoken.get_full_token_string(app_specific_token) + result, kind = validate_credentials(APP_SPECIFIC_TOKEN_USERNAME, full_token) + assert kind == CredentialKind.app_specific_token + + err = 'This user has been disabled. Please contact your administrator.' + assert result == ValidateResult(AuthKind.credentials, error_message=err) + +def test_invalid_app_specific_token(app): + result, kind = validate_credentials(APP_SPECIFIC_TOKEN_USERNAME, 'somecode') + assert kind == CredentialKind.app_specific_token + assert result == ValidateResult(AuthKind.credentials, error_message='Invalid token') + +def test_invalid_app_specific_token_code(app): + user = model.user.get_user('devtable') + app_specific_token = model.appspecifictoken.create_token(user, 'some token') + full_token = app_specific_token.token_name + 'something' + result, kind = validate_credentials(APP_SPECIFIC_TOKEN_USERNAME, full_token) + assert kind == CredentialKind.app_specific_token + assert result == ValidateResult(AuthKind.credentials, error_message='Invalid token') + +def test_unicode(app): + result, kind = validate_credentials('someusername', 'some₪code') + assert kind == CredentialKind.user + assert not result.auth_valid + assert result == ValidateResult(AuthKind.credentials, + error_message='Invalid Username or Password') + +def test_unicode_robot(app): + robot, _ = model.user.create_robot('somerobot', model.user.get_user('devtable')) + result, kind = validate_credentials(robot.username, 'some₪code') + + assert kind == CredentialKind.robot + assert not result.auth_valid + + msg = 'Could not find robot with username: devtable+somerobot and supplied password.' + assert result == ValidateResult(AuthKind.credentials, error_message=msg) + +def test_invalid_user(app): + result, kind = validate_credentials('someinvaliduser', 'password') + assert kind == CredentialKind.user + assert not result.authed_user + assert not result.auth_valid + +def test_invalid_user_password(app): + result, kind = validate_credentials('devtable', 'somepassword') + assert kind == CredentialKind.user + assert not result.authed_user + assert not result.auth_valid + +def test_invalid_robot(app): + result, kind = validate_credentials('devtable+doesnotexist', 'password') + assert kind == CredentialKind.robot + assert not result.authed_user + assert not result.auth_valid + +def test_invalid_robot_token(app): + robot, _ = model.user.create_robot('somerobot', model.user.get_user('devtable')) + result, kind = validate_credentials(robot.username, 'invalidpassword') + assert kind == CredentialKind.robot + assert not result.authed_user + assert not result.auth_valid + +def test_invalid_unicode_robot(app): + token = '“4JPCOLIVMAY32Q3XGVPHC4CBF8SKII5FWNYMASOFDIVSXTC5I5NBU”' + result, kind = validate_credentials('devtable+somerobot', token) + assert kind == CredentialKind.robot + assert not result.auth_valid + msg = 'Could not find robot with username: devtable+somerobot' + assert result == ValidateResult(AuthKind.credentials, error_message=msg) + +def test_invalid_unicode_robot_2(app): + user = model.user.get_user('devtable') + robot, password = model.user.create_robot('somerobot', user) + + token = '“4JPCOLIVMAY32Q3XGVPHC4CBF8SKII5FWNYMASOFDIVSXTC5I5NBU”' + result, kind = validate_credentials('devtable+somerobot', token) + assert kind == CredentialKind.robot + assert not result.auth_valid + msg = 'Could not find robot with username: devtable+somerobot and supplied password.' + assert result == ValidateResult(AuthKind.credentials, error_message=msg) diff --git a/auth/test/test_decorators.py b/auth/test/test_decorators.py new file mode 100644 index 000000000..b0477f7bd --- /dev/null +++ b/auth/test/test_decorators.py @@ -0,0 +1,105 @@ +import pytest + +from flask import session +from flask_login import login_user +from werkzeug.exceptions import HTTPException + +from app import LoginWrappedDBUser +from auth.auth_context import get_authenticated_user +from auth.decorators import ( + extract_namespace_repo_from_session, require_session_login, process_auth_or_cookie) +from data import model +from test.fixtures import * + + +def test_extract_namespace_repo_from_session_missing(app): + def emptyfunc(): + pass + + session.clear() + with pytest.raises(HTTPException): + extract_namespace_repo_from_session(emptyfunc)() + + +def test_extract_namespace_repo_from_session_present(app): + encountered = [] + + def somefunc(namespace, repository): + encountered.append(namespace) + encountered.append(repository) + + # Add the namespace and repository to the session. + session.clear() + session['namespace'] = 'foo' + session['repository'] = 'bar' + + # Call the decorated method. + extract_namespace_repo_from_session(somefunc)() + + assert encountered[0] == 'foo' + assert encountered[1] == 'bar' + + +def test_require_session_login_missing(app): + def emptyfunc(): + pass + + with pytest.raises(HTTPException): + require_session_login(emptyfunc)() + + +def test_require_session_login_valid_user(app): + def emptyfunc(): + pass + + # Login as a valid user. + someuser = model.user.get_user('devtable') + login_user(LoginWrappedDBUser(someuser.uuid, someuser)) + + # Call the function. + require_session_login(emptyfunc)() + + # Ensure the authenticated user was updated. + assert get_authenticated_user() == someuser + + +def test_require_session_login_invalid_user(app): + def emptyfunc(): + pass + + # "Login" as a disabled user. + someuser = model.user.get_user('disabled') + login_user(LoginWrappedDBUser(someuser.uuid, someuser)) + + # Call the function. + with pytest.raises(HTTPException): + require_session_login(emptyfunc)() + + # Ensure the authenticated user was not updated. + assert get_authenticated_user() is None + + +def test_process_auth_or_cookie_invalid_user(app): + def emptyfunc(): + pass + + # Call the function. + process_auth_or_cookie(emptyfunc)() + + # Ensure the authenticated user was not updated. + assert get_authenticated_user() is None + + +def test_process_auth_or_cookie_valid_user(app): + def emptyfunc(): + pass + + # Login as a valid user. + someuser = model.user.get_user('devtable') + login_user(LoginWrappedDBUser(someuser.uuid, someuser)) + + # Call the function. + process_auth_or_cookie(emptyfunc)() + + # Ensure the authenticated user was updated. + assert get_authenticated_user() == someuser diff --git a/auth/test/test_oauth.py b/auth/test/test_oauth.py new file mode 100644 index 000000000..f678f2604 --- /dev/null +++ b/auth/test/test_oauth.py @@ -0,0 +1,55 @@ +import pytest + +from auth.oauth import validate_bearer_auth, validate_oauth_token +from auth.validateresult import AuthKind, ValidateResult +from data import model +from test.fixtures import * + + +@pytest.mark.parametrize('header, expected_result', [ + ('', ValidateResult(AuthKind.oauth, missing=True)), + ('somerandomtoken', ValidateResult(AuthKind.oauth, missing=True)), + ('bearer some random token', ValidateResult(AuthKind.oauth, missing=True)), + ('bearer invalidtoken', + ValidateResult(AuthKind.oauth, error_message='OAuth access token could not be validated')),]) +def test_bearer(header, expected_result, app): + assert validate_bearer_auth(header) == expected_result + + +def test_valid_oauth(app): + user = model.user.get_user('devtable') + app = model.oauth.list_applications_for_org(model.user.get_user_or_org('buynlarge'))[0] + token_string = '%s%s' % ('a' * 20, 'b' * 20) + oauth_token, _ = model.oauth.create_access_token_for_testing(user, app.client_id, 'repo:read', + access_token=token_string) + result = validate_bearer_auth('bearer ' + token_string) + assert result.context.oauthtoken == oauth_token + assert result.authed_user == user + assert result.auth_valid + + +def test_disabled_user_oauth(app): + user = model.user.get_user('disabled') + token_string = '%s%s' % ('a' * 20, 'b' * 20) + oauth_token, _ = model.oauth.create_access_token_for_testing(user, 'deadbeef', 'repo:admin', + access_token=token_string) + + result = validate_bearer_auth('bearer ' + token_string) + assert result.context.oauthtoken is None + assert result.authed_user is None + assert not result.auth_valid + assert result.error_message == 'Granter of the oauth access token is disabled' + + +def test_expired_token(app): + user = model.user.get_user('devtable') + token_string = '%s%s' % ('a' * 20, 'b' * 20) + oauth_token, _ = model.oauth.create_access_token_for_testing(user, 'deadbeef', 'repo:admin', + access_token=token_string, + expires_in=-1000) + + result = validate_bearer_auth('bearer ' + token_string) + assert result.context.oauthtoken is None + assert result.authed_user is None + assert not result.auth_valid + assert result.error_message == 'OAuth access token has expired' diff --git a/auth/test/test_permissions.py b/auth/test/test_permissions.py new file mode 100644 index 000000000..f2849934d --- /dev/null +++ b/auth/test/test_permissions.py @@ -0,0 +1,37 @@ +import pytest + +from auth import scopes +from auth.permissions import SuperUserPermission, QuayDeferredPermissionUser +from data import model + +from test.fixtures import * + +SUPER_USERNAME = 'devtable' +UNSUPER_USERNAME = 'freshuser' + +@pytest.fixture() +def superuser(initialized_db): + return model.user.get_user(SUPER_USERNAME) + + +@pytest.fixture() +def normie(initialized_db): + return model.user.get_user(UNSUPER_USERNAME) + + +def test_superuser_matrix(superuser, normie): + test_cases = [ + (superuser, {scopes.SUPERUSER}, True), + (superuser, {scopes.DIRECT_LOGIN}, True), + (superuser, {scopes.READ_USER, scopes.SUPERUSER}, True), + (superuser, {scopes.READ_USER}, False), + (normie, {scopes.SUPERUSER}, False), + (normie, {scopes.DIRECT_LOGIN}, False), + (normie, {scopes.READ_USER, scopes.SUPERUSER}, False), + (normie, {scopes.READ_USER}, False), + ] + + for user_obj, scope_set, expected in test_cases: + perm_user = QuayDeferredPermissionUser.for_user(user_obj, scope_set) + has_su = perm_user.can(SuperUserPermission()) + assert has_su == expected diff --git a/auth/test/test_registry_jwt.py b/auth/test/test_registry_jwt.py new file mode 100644 index 000000000..fc6548d74 --- /dev/null +++ b/auth/test/test_registry_jwt.py @@ -0,0 +1,203 @@ +# -*- coding: utf-8 -*- + +import time + +import jwt +import pytest + +from app import app, instance_keys +from auth.auth_context_type import ValidatedAuthContext +from auth.registry_jwt_auth import identity_from_bearer_token, InvalidJWTException +from data import model # TODO: remove this after service keys are decoupled +from data.database import ServiceKeyApprovalType +from initdb import setup_database_for_testing, finished_database_for_testing +from util.morecollections import AttrDict +from util.security.registry_jwt import ANONYMOUS_SUB, build_context_and_subject + +TEST_AUDIENCE = app.config['SERVER_HOSTNAME'] +TEST_USER = AttrDict({'username': 'joeuser', 'uuid': 'foobar', 'enabled': True}) +MAX_SIGNED_S = 3660 +TOKEN_VALIDITY_LIFETIME_S = 60 * 60 # 1 hour +ANONYMOUS_SUB = '(anonymous)' +SERVICE_NAME = 'quay' + +# This import has to come below any references to "app". +from test.fixtures import * + + +def _access(typ='repository', name='somens/somerepo', actions=None): + actions = [] if actions is None else actions + return [{ + 'type': typ, + 'name': name, + 'actions': actions, + }] + + +def _delete_field(token_data, field_name): + token_data.pop(field_name) + return token_data + + +def _token_data(access=[], context=None, audience=TEST_AUDIENCE, user=TEST_USER, iat=None, + exp=None, nbf=None, iss=None, subject=None): + if subject is None: + _, subject = build_context_and_subject(ValidatedAuthContext(user=user)) + return { + 'iss': iss or instance_keys.service_name, + 'aud': audience, + 'nbf': nbf if nbf is not None else int(time.time()), + 'iat': iat if iat is not None else int(time.time()), + 'exp': exp if exp is not None else int(time.time() + TOKEN_VALIDITY_LIFETIME_S), + 'sub': subject, + 'access': access, + 'context': context, + } + + +def _token(token_data, key_id=None, private_key=None, skip_header=False, alg=None): + key_id = key_id or instance_keys.local_key_id + private_key = private_key or instance_keys.local_private_key + + if alg == "none": + private_key = None + + token_headers = {'kid': key_id} + + if skip_header: + token_headers = {} + + token_data = jwt.encode(token_data, private_key, alg or 'RS256', headers=token_headers) + return 'Bearer {0}'.format(token_data) + + +def _parse_token(token): + return identity_from_bearer_token(token)[0] + + +def test_accepted_token(initialized_db): + token = _token(_token_data()) + identity = _parse_token(token) + assert identity.id == TEST_USER.username, 'should be %s, but was %s' % (TEST_USER.username, + identity.id) + assert len(identity.provides) == 0 + + anon_token = _token(_token_data(user=None)) + anon_identity = _parse_token(anon_token) + assert anon_identity.id == ANONYMOUS_SUB, 'should be %s, but was %s' % (ANONYMOUS_SUB, + anon_identity.id) + assert len(identity.provides) == 0 + + +@pytest.mark.parametrize('access', [ + (_access(actions=['pull', 'push'])), + (_access(actions=['pull', '*'])), + (_access(actions=['*', 'push'])), + (_access(actions=['*'])), + (_access(actions=['pull', '*', 'push'])),]) +def test_token_with_access(access, initialized_db): + token = _token(_token_data(access=access)) + identity = _parse_token(token) + assert identity.id == TEST_USER.username, 'should be %s, but was %s' % (TEST_USER.username, + identity.id) + assert len(identity.provides) == 1 + + role = list(identity.provides)[0][3] + if "*" in access[0]['actions']: + assert role == 'admin' + elif "push" in access[0]['actions']: + assert role == 'write' + elif "pull" in access[0]['actions']: + assert role == 'read' + + +@pytest.mark.parametrize('token', [ + pytest.param(_token( + _token_data(access=[{ + 'toipe': 'repository', + 'namesies': 'somens/somerepo', + 'akshuns': ['pull', 'push', '*']}])), id='bad access'), + pytest.param(_token(_token_data(audience='someotherapp')), id='bad aud'), + pytest.param(_token(_delete_field(_token_data(), 'aud')), id='no aud'), + pytest.param(_token(_token_data(nbf=int(time.time()) + 600)), id='future nbf'), + pytest.param(_token(_delete_field(_token_data(), 'nbf')), id='no nbf'), + pytest.param(_token(_token_data(iat=int(time.time()) + 600)), id='future iat'), + pytest.param(_token(_delete_field(_token_data(), 'iat')), id='no iat'), + pytest.param(_token(_token_data(exp=int(time.time()) + MAX_SIGNED_S * 2)), id='exp too long'), + pytest.param(_token(_token_data(exp=int(time.time()) - 60)), id='expired'), + pytest.param(_token(_delete_field(_token_data(), 'exp')), id='no exp'), + pytest.param(_token(_delete_field(_token_data(), 'sub')), id='no sub'), + pytest.param(_token(_token_data(iss='badissuer')), id='bad iss'), + pytest.param(_token(_delete_field(_token_data(), 'iss')), id='no iss'), + pytest.param(_token(_token_data(), skip_header=True), id='no header'), + pytest.param(_token(_token_data(), key_id='someunknownkey'), id='bad key'), + pytest.param(_token(_token_data(), key_id='kid7'), id='bad key :: kid7'), + pytest.param(_token(_token_data(), alg='none', private_key=None), id='none alg'), + pytest.param('some random token', id='random token'), + pytest.param('Bearer: sometokenhere', id='extra bearer'), + pytest.param('\nBearer: dGVzdA', id='leading newline'), +]) +def test_invalid_jwt(token, initialized_db): + with pytest.raises(InvalidJWTException): + _parse_token(token) + + +def test_mixing_keys_e2e(initialized_db): + token_data = _token_data() + + # Create a new key for testing. + p, key = model.service_keys.generate_service_key(instance_keys.service_name, None, kid='newkey', + name='newkey', metadata={}) + private_key = p.exportKey('PEM') + + # Test first with the new valid, but unapproved key. + unapproved_key_token = _token(token_data, key_id='newkey', private_key=private_key) + with pytest.raises(InvalidJWTException): + _parse_token(unapproved_key_token) + + # Approve the key and try again. + admin_user = model.user.get_user('devtable') + model.service_keys.approve_service_key(key.kid, ServiceKeyApprovalType.SUPERUSER, approver=admin_user) + + valid_token = _token(token_data, key_id='newkey', private_key=private_key) + + identity = _parse_token(valid_token) + assert identity.id == TEST_USER.username + assert len(identity.provides) == 0 + + # Try using a different private key with the existing key ID. + bad_private_token = _token(token_data, key_id='newkey', + private_key=instance_keys.local_private_key) + with pytest.raises(InvalidJWTException): + _parse_token(bad_private_token) + + # Try using a different key ID with the existing private key. + kid_mismatch_token = _token(token_data, key_id=instance_keys.local_key_id, + private_key=private_key) + with pytest.raises(InvalidJWTException): + _parse_token(kid_mismatch_token) + + # Delete the new key. + key.delete_instance(recursive=True) + + # Ensure it still works (via the cache.) + deleted_key_token = _token(token_data, key_id='newkey', private_key=private_key) + identity = _parse_token(deleted_key_token) + assert identity.id == TEST_USER.username + assert len(identity.provides) == 0 + + # Break the cache. + instance_keys.clear_cache() + + # Ensure the key no longer works. + with pytest.raises(InvalidJWTException): + _parse_token(deleted_key_token) + + +@pytest.mark.parametrize('token', [ + u'someunicodetoken✡', + u'\xc9\xad\xbd', +]) +def test_unicode_token(token): + with pytest.raises(InvalidJWTException): + _parse_token(token) diff --git a/auth/test/test_scopes.py b/auth/test/test_scopes.py new file mode 100644 index 000000000..b71140136 --- /dev/null +++ b/auth/test/test_scopes.py @@ -0,0 +1,50 @@ +import pytest + +from auth.scopes import ( + scopes_from_scope_string, validate_scope_string, ALL_SCOPES, is_subset_string) + + +@pytest.mark.parametrize( + 'scopes_string, expected', + [ + # Valid single scopes. + ('repo:read', ['repo:read']), + ('repo:admin', ['repo:admin']), + + # Invalid scopes. + ('not:valid', []), + ('repo:admins', []), + + # Valid scope strings. + ('repo:read repo:admin', ['repo:read', 'repo:admin']), + ('repo:read,repo:admin', ['repo:read', 'repo:admin']), + ('repo:read,repo:admin repo:write', ['repo:read', 'repo:admin', 'repo:write']), + + # Partially invalid scopes. + ('repo:read,not:valid', []), + ('repo:read repo:admins', []), + + # Invalid scope strings. + ('repo:read|repo:admin', []), + + # Mixture of delimiters. + ('repo:read, repo:admin', []),]) +def test_parsing(scopes_string, expected): + expected_scope_set = {ALL_SCOPES[scope_name] for scope_name in expected} + parsed_scope_set = scopes_from_scope_string(scopes_string) + assert parsed_scope_set == expected_scope_set + assert validate_scope_string(scopes_string) == bool(expected) + + +@pytest.mark.parametrize('superset, subset, result', [ + ('repo:read', 'repo:read', True), + ('repo:read repo:admin', 'repo:read', True), + ('repo:read,repo:admin', 'repo:read', True), + ('repo:read,repo:admin', 'repo:admin', True), + ('repo:read,repo:admin', 'repo:admin repo:read', True), + ('', 'repo:read', False), + ('unknown:tag', 'repo:read', False), + ('repo:read unknown:tag', 'repo:read', False), + ('repo:read,unknown:tag', 'repo:read', False),]) +def test_subset_string(superset, subset, result): + assert is_subset_string(superset, subset) == result diff --git a/auth/test/test_signedgrant.py b/auth/test/test_signedgrant.py new file mode 100644 index 000000000..e200f0bf1 --- /dev/null +++ b/auth/test/test_signedgrant.py @@ -0,0 +1,32 @@ +import pytest + +from auth.signedgrant import validate_signed_grant, generate_signed_token, SIGNATURE_PREFIX +from auth.validateresult import AuthKind, ValidateResult + + +@pytest.mark.parametrize('header, expected_result', [ + pytest.param('', ValidateResult(AuthKind.signed_grant, missing=True), id='Missing'), + pytest.param('somerandomtoken', ValidateResult(AuthKind.signed_grant, missing=True), + id='Invalid header'), + pytest.param('token somerandomtoken', ValidateResult(AuthKind.signed_grant, missing=True), + id='Random Token'), + pytest.param('token ' + SIGNATURE_PREFIX + 'foo', + ValidateResult(AuthKind.signed_grant, + error_message='Signed grant could not be validated'), + id='Invalid token'), +]) +def test_token(header, expected_result): + assert validate_signed_grant(header) == expected_result + + +def test_valid_grant(): + header = 'token ' + generate_signed_token({'a': 'b'}, {'c': 'd'}) + expected = ValidateResult(AuthKind.signed_grant, signed_data={ + 'grants': { + 'a': 'b', + }, + 'user_context': { + 'c': 'd' + }, + }) + assert validate_signed_grant(header) == expected diff --git a/auth/test/test_validateresult.py b/auth/test/test_validateresult.py new file mode 100644 index 000000000..90875da76 --- /dev/null +++ b/auth/test/test_validateresult.py @@ -0,0 +1,63 @@ +import pytest + +from auth.auth_context import get_authenticated_context +from auth.validateresult import AuthKind, ValidateResult +from data import model +from data.database import AppSpecificAuthToken +from test.fixtures import * + +def get_user(): + return model.user.get_user('devtable') + +def get_app_specific_token(): + return AppSpecificAuthToken.get() + +def get_robot(): + robot, _ = model.user.create_robot('somebot', get_user()) + return robot + +def get_token(): + return model.token.create_delegate_token('devtable', 'simple', 'sometoken') + +def get_oauthtoken(): + user = model.user.get_user('devtable') + return list(model.oauth.list_access_tokens_for_user(user))[0] + +def get_signeddata(): + return {'grants': {'a': 'b'}, 'user_context': {'c': 'd'}} + +@pytest.mark.parametrize('get_entity,entity_kind', [ + (get_user, 'user'), + (get_robot, 'robot'), + (get_token, 'token'), + (get_oauthtoken, 'oauthtoken'), + (get_signeddata, 'signed_data'), + (get_app_specific_token, 'appspecifictoken'), +]) +def test_apply_context(get_entity, entity_kind, app): + assert get_authenticated_context() is None + + entity = get_entity() + args = {} + args[entity_kind] = entity + + result = ValidateResult(AuthKind.basic, **args) + result.apply_to_context() + + expected_user = entity if entity_kind == 'user' or entity_kind == 'robot' else None + if entity_kind == 'oauthtoken': + expected_user = entity.authorized_user + + if entity_kind == 'appspecifictoken': + expected_user = entity.user + + expected_token = entity if entity_kind == 'token' else None + expected_oauth = entity if entity_kind == 'oauthtoken' else None + expected_appspecifictoken = entity if entity_kind == 'appspecifictoken' else None + expected_grant = entity if entity_kind == 'signed_data' else None + + assert get_authenticated_context().authed_user == expected_user + assert get_authenticated_context().token == expected_token + assert get_authenticated_context().oauthtoken == expected_oauth + assert get_authenticated_context().appspecifictoken == expected_appspecifictoken + assert get_authenticated_context().signed_data == expected_grant diff --git a/auth/validateresult.py b/auth/validateresult.py new file mode 100644 index 000000000..3235104e0 --- /dev/null +++ b/auth/validateresult.py @@ -0,0 +1,56 @@ +from enum import Enum +from auth.auth_context_type import ValidatedAuthContext, ContextEntityKind + + +class AuthKind(Enum): + cookie = 'cookie' + basic = 'basic' + oauth = 'oauth' + signed_grant = 'signed_grant' + credentials = 'credentials' + + +class ValidateResult(object): + """ A result of validating auth in one form or another. """ + def __init__(self, kind, missing=False, user=None, token=None, oauthtoken=None, + robot=None, appspecifictoken=None, signed_data=None, error_message=None): + self.kind = kind + self.missing = missing + self.error_message = error_message + self.context = ValidatedAuthContext(user=user, token=token, oauthtoken=oauthtoken, robot=robot, + appspecifictoken=appspecifictoken, signed_data=signed_data) + + def tuple(self): + return (self.kind, self.missing, self.error_message, self.context.tuple()) + + def __eq__(self, other): + return self.tuple() == other.tuple() + + def apply_to_context(self): + """ Applies this auth result to the auth context and Flask-Principal. """ + self.context.apply_to_request_context() + + def with_kind(self, kind): + """ Returns a copy of this result, but with the kind replaced. """ + result = ValidateResult(kind, missing=self.missing, error_message=self.error_message) + result.context = self.context + return result + + def __repr__(self): + return 'ValidateResult: %s (missing: %s, error: %s)' % (self.kind, self.missing, + self.error_message) + + @property + def authed_user(self): + """ Returns the authenticated user, whether directly, or via an OAuth token. """ + return self.context.authed_user + + @property + def has_nonrobot_user(self): + """ Returns whether a user (not a robot) was authenticated successfully. """ + return self.context.has_nonrobot_user + + @property + def auth_valid(self): + """ Returns whether authentication successfully occurred. """ + return self.context.entity_kind != ContextEntityKind.anonymous diff --git a/avatars/avatars.py b/avatars/avatars.py index eaf58bc52..737b51191 100644 --- a/avatars/avatars.py +++ b/avatars/avatars.py @@ -65,6 +65,9 @@ class BaseAvatar(object): def get_data_for_org(self, org): return self.get_data(org.username, org.email, 'org') + def get_data_for_external_user(self, external_user): + return self.get_data(external_user.username, external_user.email, 'user') + def get_data(self, name, email_or_id, kind='user'): """ Computes and returns the full data block for the avatar: { @@ -74,7 +77,11 @@ class BaseAvatar(object): } """ colors = self.colors - hash_value = hashlib.md5(email_or_id.strip().lower()).hexdigest() + + # Note: email_or_id may be None if gotten from external auth when email is disabled, + # so use the username in that case. + username_email_or_id = email_or_id or name + hash_value = hashlib.md5(username_email_or_id.strip().lower()).hexdigest() byte_count = int(math.ceil(math.log(len(colors), 16))) byte_data = hash_value[0:byte_count] diff --git a/bill-of-materials.json b/bill-of-materials.json new file mode 100644 index 000000000..9f552239f --- /dev/null +++ b/bill-of-materials.json @@ -0,0 +1,3857 @@ +[ + { + "format": "Python", + "license": "MIT License", + "project": "aiowsgi" + }, + { + "format": "Python", + "license": "MIT License", + "project": "alembic" + }, + { + "format": "Python", + "license": "MIT License", + "project": "APScheduler" + }, + { + "format": "Python", + "license": "MIT License", + "project": "asn1crypto" + }, + { + "format": "Python", + "license": "Apache Software License 2.0", + "project": "autobahn" + }, + { + "format": "Python", + "license": "MIT License", + "project": "azure-common" + }, + { + "format": "Python", + "license": "MIT License", + "project": "azure-nspkg" + }, + { + "format": "Python", + "license": "MIT License", + "project": "azure-storage-blob" + }, + { + "format": "Python", + "license": "MIT License", + "project": "azure-storage-common" + }, + { + "format": "Python", + "license": "MIT License", + "project": "azure-storage-nspkg" + }, + { + "format": "Python", + "license": "BSD License", + "project": "Babel" + }, + { + "format": "Python", + "license": "PSF License", + "project": "backports.tempfile" + }, + { + "format": "Python", + "license": "MIT License", + "project": "beautifulsoup4" + }, + { + "format": "Python", + "license": "Bittorrent Open Source License", + "project": "bencode" + }, + { + "format": "Python", + "license": "MIT License", + "project": "bintrees" + }, + { + "format": "Python", + "license": "MIT License", + "project": "bitmath" + }, + { + "format": "Python", + "license": "MIT License", + "project": "blinker" + }, + { + "format": "Python", + "license": "Apache Software License 2.0", + "project": "boto3" + }, + { + "format": "Python", + "license": "Apache Software License 2.0", + "project": "botocore" + }, + { + "format": "Python", + "license": "MIT License", + "project": "cachetools" + }, + { + "format": "Python", + "license": "Mozilla Public License, version 2.0", + "project": "certifi" + }, + { + "format": "Python", + "license": "MIT License", + "project": "cffi" + }, + { + "format": "Python", + "license": "LGPL", + "project": "chardet" + }, + { + "format": "Python", + "license": "BSD License", + "project": "click" + }, + { + "format": "Python", + "license": "PSF License", + "project": "contextlib2" + }, + { + "format": "Python", + "license": "BSD or Apache Software License 2.0", + "project": "cryptography" + }, + { + "format": "Python", + "license": "Apache Software License 2.0", + "project": "debtcollector" + }, + { + "format": "Python", + "license": "BSD 2-Clause License", + "project": "decorator" + }, + { + "format": "Python", + "license": "BSD License", + "project": "enum34" + }, + { + "format": "Python", + "license": "BSD License", + "project": "Flask" + }, + { + "format": "Python", + "license": "MIT License", + "project": "Flask-Cors" + }, + { + "format": "Python", + "license": "MIT License", + "project": "Flask-Login" + }, + { + "format": "Python", + "license": "BSD License", + "project": "Flask-Mail" + }, + { + "format": "Python", + "license": "MIT License", + "project": "Flask-Principal" + }, + { + "format": "Python", + "license": "BSD License", + "project": "Flask-RESTful" + }, + { + "format": "Python", + "license": "MIT License", + "project": "funcparserlib" + }, + { + "format": "Python", + "license": "ASL", + "project": "funcsigs" + }, + { + "format": "Python", + "license": "PSF License", + "project": "functools32" + }, + { + "format": "Python", + "license": "Unlicense", + "project": "furl" + }, + { + "format": "Python", + "license": "MIT License", + "project": "future" + }, + { + "format": "Python", + "license": "PSF License", + "project": "futures" + }, + { + "format": "Python", + "license": "Apache Software License 2.0", + "project": "geoip2" + }, + { + "format": "Python", + "license": "MIT License", + "project": "gevent" + }, + { + "format": "Python", + "license": "MIT License", + "project": "gipc" + }, + { + "format": "Python", + "license": "MIT License", + "project": "greenlet" + }, + { + "format": "Python", + "license": "MIT License", + "project": "gunicorn" + }, + { + "format": "Python", + "license": "BSD License", + "project": "hiredis" + }, + { + "format": "Python", + "license": "MIT License", + "project": "html5lib" + }, + { + "format": "Python", + "license": "Apache Software License 2.0", + "project": "httmock" + }, + { + "format": "Python", + "license": "MIT License", + "project": "httplib2" + }, + { + "format": "Python", + "license": "MIT License", + "project": "httpretty" + }, + { + "format": "Python", + "license": "BSD-like", + "project": "idna" + }, + { + "format": "Python", + "license": "PSF License", + "project": "ipaddress" + }, + { + "format": "Python", + "license": "MIT License", + "project": "iso8601" + }, + { + "format": "Python", + "license": "BSD 3-Clause License", + "project": "itsdangerous" + }, + { + "format": "Python", + "license": "BSD License", + "project": "Jinja2" + }, + { + "format": "Python", + "license": "MIT License", + "project": "jmespath" + }, + { + "format": "Python", + "license": "Apache Software License 2.0", + "project": "jsonpath-rw" + }, + { + "format": "Python", + "license": "MIT License", + "project": "jsonschema" + }, + { + "format": "Python", + "license": "Apache Software License 2.0", + "project": "keystoneauth1" + }, + { + "format": "Python", + "license": "MIT License", + "project": "Mako" + }, + { + "format": "Python", + "license": "MIT License", + "project": "marisa-trie" + }, + { + "format": "Python", + "license": "BSD License", + "project": "MarkupSafe" + }, + { + "format": "Python", + "license": "Apache Software License 2.0", + "project": "maxminddb" + }, + { + "format": "Python", + "license": "Apache Software License 2.0", + "project": "mixpanel" + }, + { + "format": "Python", + "license": "BSD 2-Clause License", + "project": "mock" + }, + { + "format": "Python", + "license": "Apache Software License 2.0", + "project": "mockredispy" + }, + { + "format": "Python", + "license": "Apache Software License 2.0", + "project": "monotonic" + }, + { + "format": "Python", + "license": "Apache Software License 2.0", + "project": "moto" + }, + { + "format": "Python", + "license": "Apache Software License 2.0", + "project": "msgpack" + }, + { + "format": "Python", + "license": "Apache Software License 2.0", + "project": "namedlist" + }, + { + "format": "Python", + "license": "BSD 3-Clause License", + "project": "ndg-httpsclient" + }, + { + "format": "Python", + "license": "BSD License", + "project": "netaddr" + }, + { + "format": "Python", + "license": "MIT License", + "project": "netifaces" + }, + { + "format": "Python", + "license": "BSD License", + "project": "oauthlib" + }, + { + "format": "Python", + "license": "Unlicense", + "project": "orderedmultidict" + }, + { + "format": "Python", + "license": "Apache Software License 2.0", + "project": "oslo.config" + }, + { + "format": "Python", + "license": "Apache Software License 2.0", + "project": "oslo.i18n" + }, + { + "format": "Python", + "license": "Apache Software License 2.0", + "project": "oslo.serialization" + }, + { + "format": "Python", + "license": "Apache Software License 2.0", + "project": "oslo.utils" + }, + { + "format": "Python", + "license": "MIT License", + "project": "pathvalidate" + }, + { + "format": "Python", + "license": "Apache Software License 2.0", + "project": "pbr" + }, + { + "format": "Python", + "license": "MIT License", + "project": "peewee" + }, + { + "format": "Python", + "license": "Standard Pil License", + "project": "Pillow" + }, + { + "format": "Python", + "license": "BSD License", + "project": "ply" + }, + { + "format": "Python", + "license": "BSD License", + "project": "psutil" + }, + { + "format": "Python", + "license": "LGPL with exceptions or ZPL", + "project": "psycopg2" + }, + { + "format": "Python", + "license": "BSD License", + "project": "py-bcrypt" + }, + { + "format": "Python", + "license": "BSD License", + "project": "pyasn1" + }, + { + "format": "Python", + "license": "BSD License", + "project": "pyasn1-modules" + }, + { + "format": "Python", + "license": "BSD License", + "project": "pycparser" + }, + { + "format": "Python", + "license": "BSD 2-Clause License", + "project": "pycryptodome" + }, + { + "format": "Python", + "license": "BSD 2-Clause License", + "project": "pycryptodomex" + }, + { + "format": "Python", + "license": "LGPL-3.0", + "project": "PyGithub" + }, + { + "format": "Python", + "license": "LGPL", + "project": "pygpgme" + }, + { + "format": "Python", + "license": "Apache Software License 2.0", + "project": "pyjwkest" + }, + { + "format": "Python", + "license": "MIT License", + "project": "PyJWT" + }, + { + "format": "Python", + "license": "Apache Software License 2.0", + "project": "pymemcache" + }, + { + "format": "Python", + "license": "MIT License", + "project": "PyMySQL" + }, + { + "format": "Python", + "license": "Apache Software License 2.0", + "project": "pyOpenSSL" + }, + { + "format": "Python", + "license": "MIT License", + "project": "pyparsing" + }, + { + "format": "Python", + "license": "BSD 3-Clause License", + "project": "PyPDF2" + }, + { + "format": "Python", + "license": "BSD 2-Clause License", + "project": "python-dateutil" + }, + { + "format": "Python", + "license": "Apache Software License 2.0", + "project": "python-editor" + }, + { + "format": "Python", + "license": "LGPL-3.0", + "project": "python-gitlab" + }, + { + "format": "Python", + "license": "Apache Software License 2.0", + "project": "python-keystoneclient" + }, + { + "format": "Python", + "license": "python style", + "project": "python-ldap" + }, + { + "format": "Python", + "license": "MIT License", + "project": "python-magic" + }, + { + "format": "Python", + "license": "Apache Software License 2.0", + "project": "python-swiftclient" + }, + { + "format": "Python", + "license": "MIT License", + "project": "pytz" + }, + { + "format": "Python", + "license": "MIT License", + "project": "PyYAML" + }, + { + "format": "Python", + "license": "BSD License", + "project": "raven" + }, + { + "format": "Python", + "license": "WTFPL", + "project": "recaptcha2" + }, + { + "format": "Python", + "license": "MIT License", + "project": "redis" + }, + { + "format": "Python", + "license": "MIT License", + "project": "redlock" + }, + { + "format": "Python", + "license": "BSD 3-Clause License", + "project": "reportlab" + }, + { + "format": "Python", + "license": "ISC", + "project": "requests-oauthlib" + }, + { + "format": "Python", + "license": "Apache Software License 2.0", + "project": "rfc3986" + }, + { + "format": "Python", + "license": "Apache Software License 2.0", + "project": "s3transfer" + }, + { + "format": "Python", + "license": "BSD License", + "project": "semantic-version" + }, + { + "format": "Python", + "license": "MIT License", + "project": "six" + }, + { + "format": "Python", + "license": "MIT License", + "project": "SQLAlchemy" + }, + { + "format": "Python", + "license": "Apache Software License 2.0", + "project": "stevedore" + }, + { + "format": "Python", + "license": "MIT License", + "project": "stringscore" + }, + { + "format": "Python", + "license": "MIT License", + "project": "stripe" + }, + { + "format": "Python", + "license": "Apache Software License 2.0", + "project": "toposort" + }, + { + "format": "Python", + "license": "Apache Software License 2.0", + "project": "trollius" + }, + { + "format": "Python", + "license": "MIT License", + "project": "tzlocal" + }, + { + "format": "Python", + "license": "MIT License", + "project": "urllib3" + }, + { + "format": "Python", + "license": "ZPL 2.1", + "project": "waitress" + }, + { + "format": "Python", + "license": "MIT License", + "project": "WebOb" + }, + { + "format": "Python", + "license": "BSD License", + "project": "Werkzeug" + }, + { + "format": "Python", + "license": "BSD License", + "project": "wrapt" + }, + { + "format": "Python", + "license": "Apache Software License 2.0", + "project": "xhtml2pdf" + }, + { + "format": "Python", + "license": "MIT License", + "project": "xmltodict" + }, + { + "format": "Python", + "license": "Apache Software License 2.0", + "project": "yapf" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "abbrev" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "accepts" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "acorn" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "acorn-dynamic-import" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "adm-zip" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "after" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "agent-base" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "ajv" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "ajv-keywords" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "align-text" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "alphanum-sort" + }, + { + "format": "JavaScript", + "license": "bsd-3-clause or MIT License", + "project": "amdefine" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "angular" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "angular-animate" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "angular-cookies" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "angular-mocks" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "angular-route" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "angular-sanitize" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "ansi-regex" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "ansi-styles" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "any-promise" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "anymatch" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "aproba" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "are-we-there-yet" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "argparse" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "arr-diff" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "arr-flatten" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "array-find-index" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "array-flatten" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "array-slice" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "array-union" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "array-uniq" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "array-unique" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "arraybuffer.slice" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "arrify" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "asn1" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "asn1.js" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "assert" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "assert-plus" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "ast-types" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "async" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "async-each" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "asynckit" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "autoprefixer" + }, + { + "format": "JavaScript", + "license": "Apache Software License 2.0", + "project": "aws-sign2" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "aws4" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "babel-code-frame" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "backo2" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "balanced-match" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "base64-arraybuffer" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "base64-js" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "base64id" + }, + { + "format": "JavaScript", + "license": "BSD 3-Clause License", + "project": "bcrypt-pbkdf" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "better-assert" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "big.js" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "binary-extensions" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "blob" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "block-stream" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "blocking-proxy" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "bluebird" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "bn.js" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "body-parser" + }, + { + "format": "JavaScript", + "license": "BSD 3-Clause License", + "project": "boom" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "bootbox" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "bootstrap" + }, + { + "format": "JavaScript", + "license": "Apache Software License 2.0", + "project": "bootstrap-datepicker" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "brace-expansion" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "braces" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "brorand" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "browserify-aes" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "browserify-cipher" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "browserify-des" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "browserify-rsa" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "browserify-sign" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "browserify-zlib" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "browserslist" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "buffer" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "buffer-shims" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "buffer-xor" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "builtin-modules" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "builtin-status-codes" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "bytes" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "cal-heatmap" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "callsite" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "camel-case" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "camelcase" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "camelcase-keys" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "caniuse-api" + }, + { + "format": "JavaScript", + "license": "cc-by-4.0", + "project": "caniuse-db" + }, + { + "format": "JavaScript", + "license": "Apache Software License 2.0", + "project": "caseless" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "center-align" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "chalk" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "chokidar" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "cipher-base" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "clap" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "clean-css" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "clipboard" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "cliui" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "clone" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "co" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "coa" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "code-point-at" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "color" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "color-convert" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "color-name" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "color-string" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "colormin" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "colors" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "combine-lists" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "combined-stream" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "commander" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "component-bind" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "component-emitter" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "component-inherit" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "concat-map" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "connect" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "console-browserify" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "console-control-strings" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "constants-browserify" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "content-disposition" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "content-type" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "cookie" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "cookie-signature" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "core-js" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "core-util-is" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "create-ecdh" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "create-hash" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "create-hmac" + }, + { + "format": "JavaScript", + "license": "BSD 3-Clause License", + "project": "cryptiles" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "crypto-browserify" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "css-color-names" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "css-loader" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "css-selector-tokenizer" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "cssesc" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "cssnano" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "csso" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "currently-unhandled" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "custom-event" + }, + { + "format": "JavaScript", + "license": "BSD 3-Clause License", + "project": "d3" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "dashdash" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "date-now" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "dateformat" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "debug" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "decamelize" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "deep-extend" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "deep-is" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "defined" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "del" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "delayed-stream" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "delegate" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "delegates" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "depd" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "des.js" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "destroy" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "di" + }, + { + "format": "JavaScript", + "license": "BSD 3-Clause License", + "project": "diff" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "diffie-hellman" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "dom-serialize" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "domain-browser" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "duplexer" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "ecc-jsbn" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "ee-first" + }, + { + "format": "JavaScript", + "license": "Apache Software License 2.0", + "project": "ejs" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "electron-to-chromium" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "elliptic" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "emojis-list" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "encodeurl" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "engine.io" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "engine.io-client" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "engine.io-parser" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "enhanced-resolve" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "ent" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "eonasdan-bootstrap-datetimepicker" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "errno" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "error-ex" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "es5-shim" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "es6-shim" + }, + { + "format": "JavaScript", + "license": "Apache Software License 2.0", + "project": "es6-templates" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "escape-html" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "escape-string-regexp" + }, + { + "format": "JavaScript", + "license": "BSD 2-Clause License", + "project": "escodegen" + }, + { + "format": "JavaScript", + "license": "BSD 2-Clause License", + "project": "esprima" + }, + { + "format": "JavaScript", + "license": "BSD License", + "project": "estraverse" + }, + { + "format": "JavaScript", + "license": "BSD License", + "project": "esutils" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "etag" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "eventemitter3" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "events" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "evp_bytestokey" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "exit" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "expand-braces" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "expand-brackets" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "expand-range" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "express" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "extend" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "extglob" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "extsprintf" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "fast-levenshtein" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "fastparse" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "file-saver" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "filename-regex" + }, + { + "format": "JavaScript", + "license": "BSD 3-Clause License", + "project": "filesize" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "fill-range" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "finalhandler" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "find-up" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "flatten" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "for-in" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "for-own" + }, + { + "format": "JavaScript", + "license": "Apache Software License 2.0", + "project": "forever-agent" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "form-data" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "forwarded" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "fresh" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "fs-access" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "fs.realpath" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "fstream" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "fstream-ignore" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "function-bind" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "gauge" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "generate-function" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "generate-object-property" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "get-caller-file" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "get-stdin" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "getpass" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "glob" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "glob-base" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "glob-parent" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "globby" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "good-listener" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "graceful-fs" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "graceful-readlink" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "gzip-size" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "handlebars" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "har-validator" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "has" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "has-ansi" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "has-binary" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "has-cors" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "has-flag" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "has-unicode" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "hash.js" + }, + { + "format": "JavaScript", + "license": "BSD 3-Clause License", + "project": "hawk" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "he" + }, + { + "format": "JavaScript", + "license": "BSD 3-Clause License", + "project": "highlight.js" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "hmac-drbg" + }, + { + "format": "JavaScript", + "license": "BSD 3-Clause License", + "project": "hoek" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "hosted-git-info" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "html-comment-regex" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "html-loader" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "html-minifier" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "http-errors" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "http-proxy" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "http-signature" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "https-browserify" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "https-proxy-agent" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "iconv-lite" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "icss-replace-symbols" + }, + { + "format": "JavaScript", + "license": "BSD 3-Clause License", + "project": "ieee754" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "indent-string" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "indexes-of" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "indexof" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "inflight" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "inherits" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "ini" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "interpret" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "invert-kv" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "ipaddr.js" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "is-absolute-url" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "is-arrayish" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "is-binary-path" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "is-buffer" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "is-builtin-module" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "is-dotfile" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "is-equal-shallow" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "is-extendable" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "is-extglob" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "is-finite" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "is-fullwidth-code-point" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "is-glob" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "is-my-json-valid" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "is-number" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "is-path-cwd" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "is-path-in-cwd" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "is-path-inside" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "is-plain-obj" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "is-posix-bracket" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "is-primitive" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "is-property" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "is-svg" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "is-typedarray" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "is-utf8" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "isarray" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "isbinaryfile" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "isexe" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "isobject" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "isstream" + }, + { + "format": "JavaScript", + "license": "BSD 3-Clause License", + "project": "istanbul" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "jasmine" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "jasmine-core" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "jasmine-ts" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "jasminewd2" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "jodid25519" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "jquery" + }, + { + "format": "JavaScript", + "license": "BSD 3-Clause License", + "project": "js-base64" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "js-tokens" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "js-yaml" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "jsbn" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "jsesc" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "json-loader" + }, + { + "format": "JavaScript", + "license": "AFL v2.1 and BSD License", + "project": "json-schema" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "json-stable-stringify" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "json-stringify-safe" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "json3" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "json5" + }, + { + "format": "JavaScript", + "license": "Public Domain", + "project": "jsonify" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "jsonpointer" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "jsprim" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "karma" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "karma-chrome-launcher" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "karma-coverage" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "karma-es6-shim" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "karma-jasmine" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "karma-webpack" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "kind-of" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "lazy-cache" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "lcid" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "levn" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "load-json-file" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "loader-runner" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "loader-utils" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "lodash" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "lodash._createcompounder" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "lodash._root" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "lodash.camelcase" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "lodash.deburr" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "lodash.memoize" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "lodash.uniq" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "lodash.words" + }, + { + "format": "JavaScript", + "license": "Apache Software License 2.0", + "project": "log4js" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "longest" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "loud-rejection" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "lower-case" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "lru-cache" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "macaddress" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "make-error" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "map-obj" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "math-expression-evaluator" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "media-typer" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "memory-fs" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "meow" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "merge-descriptors" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "methods" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "micromatch" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "miller-rabin" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "mime" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "mime-db" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "mime-types" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "minimalistic-assert" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "minimalistic-crypto-utils" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "minimatch" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "minimist" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "mkdirp" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "moment" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "moment-timezone" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "ms" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "nan" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "ncname" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "negotiator" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "ng-metadata" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "ngtemplate-loader" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "no-case" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "node-libs-browser" + }, + { + "format": "JavaScript", + "license": "BSD 3-Clause License", + "project": "node-pre-gyp" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "nopt" + }, + { + "format": "JavaScript", + "license": "BSD 2-Clause License", + "project": "normalize-package-data" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "normalize-path" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "normalize-range" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "normalize-url" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "npmlog" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "null-check" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "num2fraction" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "number-is-nan" + }, + { + "format": "JavaScript", + "license": "Apache Software License 2.0", + "project": "oauth-sign" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "object-assign" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "object-component" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "object.omit" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "on-finished" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "once" + }, + { + "format": "JavaScript", + "license": "WTFPL or MIT", + "project": "opener" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "optimist" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "optionator" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "options" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "os-browserify" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "os-locale" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "os-tmpdir" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "pako" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "param-case" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "parse-asn1" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "parse-glob" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "parse-json" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "parsejson" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "parseqs" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "parseuri" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "parseurl" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "path-browserify" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "path-exists" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "path-is-absolute" + }, + { + "format": "JavaScript", + "license": "WTFPL or MIT", + "project": "path-is-inside" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "path-parse" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "path-to-regexp" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "path-type" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "pbkdf2" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "pify" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "pinkie" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "pinkie-promise" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "postcss" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "postcss-calc" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "postcss-colormin" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "postcss-convert-values" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "postcss-discard-comments" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "postcss-discard-duplicates" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "postcss-discard-empty" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "postcss-discard-overridden" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "postcss-discard-unused" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "postcss-filter-plugins" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "postcss-merge-idents" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "postcss-merge-longhand" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "postcss-merge-rules" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "postcss-message-helpers" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "postcss-minify-font-values" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "postcss-minify-gradients" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "postcss-minify-params" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "postcss-minify-selectors" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "postcss-modules-extract-imports" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "postcss-modules-local-by-default" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "postcss-modules-scope" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "postcss-modules-values" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "postcss-normalize-charset" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "postcss-normalize-url" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "postcss-ordered-values" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "postcss-reduce-idents" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "postcss-reduce-initial" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "postcss-reduce-transforms" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "postcss-selector-parser" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "postcss-svgo" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "postcss-unique-selectors" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "postcss-value-parser" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "postcss-zindex" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "prelude-ls" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "prepend-http" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "preserve" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "private" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "process" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "process-nextick-args" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "protractor" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "proxy-addr" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "prr" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "public-encrypt" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "punycode" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "q" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "qjobs" + }, + { + "format": "JavaScript", + "license": "BSD 3-Clause License", + "project": "qs" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "query-string" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "querystring" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "querystring-es3" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "querystringify" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "randomatic" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "randombytes" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "range-parser" + }, + { + "format": "JavaScript", + "license": "BSD 2-Clause License", + "project": "raven-js" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "raw-body" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "raw-loader" + }, + { + "format": "JavaScript", + "license": "BSD 2-Clause License or MIT License or Apache Software License 2.0", + "project": "rc" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "read-pkg" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "read-pkg-up" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "readable-stream" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "readdirp" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "recast" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "redent" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "reduce-css-calc" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "reduce-function-call" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "regenerate" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "regex-cache" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "regexpu-core" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "regjsgen" + }, + { + "format": "JavaScript", + "license": "BSD 2-Clause License", + "project": "regjsparser" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "relateurl" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "repeat-element" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "repeat-string" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "repeating" + }, + { + "format": "JavaScript", + "license": "Apache Software License 2.0", + "project": "request" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "require-directory" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "require-main-filename" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "requires-port" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "resolve" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "restangular" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "right-align" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "rimraf" + }, + { + "format": "JavaScript", + "license": "BSD 3-Clause License", + "project": "ripemd160" + }, + { + "format": "JavaScript", + "license": "Apache Software License 2.0", + "project": "rxjs" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "safe-buffer" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "saucelabs" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "sax" + }, + { + "format": "JavaScript", + "license": "ISC and MIT License", + "project": "sax" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "script-loader" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "select" + }, + { + "format": "JavaScript", + "license": "Apache Software License 2.0", + "project": "selenium-webdriver" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "semver" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "send" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "serve-static" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "set-blocking" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "set-immediate-shim" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "setimmediate" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "setprototypeof" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "sha.js" + }, + { + "format": "JavaScript", + "license": "BSD 3-Clause License", + "project": "showdown" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "signal-exit" + }, + { + "format": "JavaScript", + "license": "BSD License", + "project": "sntp" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "socket.io" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "socket.io-adapter" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "socket.io-client" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "socket.io-parser" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "sort-keys" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "source-list-map" + }, + { + "format": "JavaScript", + "license": "BSD 3-Clause License", + "project": "source-map" + }, + { + "format": "JavaScript", + "license": "BSD License", + "project": "source-map" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "source-map-loader" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "source-map-support" + }, + { + "format": "JavaScript", + "license": "Apache Software License 2.0", + "project": "spdx-correct" + }, + { + "format": "JavaScript", + "license": "MIT and cc-by-3.0", + "project": "spdx-expression-parse" + }, + { + "format": "JavaScript", + "license": "Unlicense", + "project": "spdx-license-ids" + }, + { + "format": "JavaScript", + "license": "BSD 3-Clause License", + "project": "sprintf-js" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "sshpk" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "statuses" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "stream-browserify" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "stream-http" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "strict-uri-encode" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "string-width" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "string_decoder" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "stringstream" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "strip-ansi" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "strip-bom" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "strip-indent" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "strip-json-comments" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "style-loader" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "supports-color" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "svgo" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "symbol-observable" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "tapable" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "tar" + }, + { + "format": "JavaScript", + "license": "BSD 2-Clause License", + "project": "tar-pack" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "through" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "timers-browserify" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "tiny-emitter" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "tmp" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "to-array" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "to-arraybuffer" + }, + { + "format": "JavaScript", + "license": "BSD 3-Clause License", + "project": "tough-cookie" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "trim-newlines" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "ts-loader" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "ts-mocks" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "ts-node" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "tsconfig" + }, + { + "format": "JavaScript", + "license": "Apache Software License 2.0", + "project": "tslib" + }, + { + "format": "JavaScript", + "license": "Apache Software License 2.0", + "project": "tslint" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "tsutils" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "tty-browserify" + }, + { + "format": "JavaScript", + "license": "Apache Software License 2.0", + "project": "tunnel-agent" + }, + { + "format": "JavaScript", + "license": "Unlicense", + "project": "tweetnacl" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "type-check" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "type-is" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "types/angular" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "types/angular-mocks" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "types/angular-route" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "types/angular-sanitize" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "types/core-js" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "types/jasmine" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "types/jquery" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "types/node" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "types/q" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "types/selenium-webdriver" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "types/showdown" + }, + { + "format": "JavaScript", + "license": "Apache Software License 2.0", + "project": "typescript" + }, + { + "format": "JavaScript", + "license": "BSD 2-Clause License", + "project": "uglify-js" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "uglify-to-browserify" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "uid-number" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "ultron" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "underscore" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "uniq" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "uniqid" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "uniqs" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "unpipe" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "upper-case" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "urijs" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "url" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "url-parse" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "user-home" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "useragent" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "util" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "util-deprecate" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "utils-merge" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "uuid" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "v8flags" + }, + { + "format": "JavaScript", + "license": "Apache Software License 2.0", + "project": "validate-npm-package-license" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "vary" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "vendors" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "verror" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "vm-browserify" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "void-elements" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "watchpack" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "webdriver-js-extender" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "webdriver-manager" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "webpack" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "webpack-bundle-analyzer" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "webpack-dev-middleware" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "webpack-sources" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "whet.extend" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "which" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "which-module" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "wide-align" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "window-size" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "wordwrap" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "wrap-ansi" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "wrappy" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "ws" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "wtf-8" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "xml-char-classes" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "xml2js" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "xmlbuilder" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "xmlhttprequest-ssl" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "xtend" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "y18n" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "yargs" + }, + { + "format": "JavaScript", + "license": "ISC", + "project": "yargs-parser" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "yeast" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "yn" + }, + { + "format": "JavaScript", + "license": "MIT License", + "project": "zeroclipboard" + } +] \ No newline at end of file diff --git a/binary_dependencies/nginx_1.9.5-1_amd64.deb b/binary_dependencies/nginx_1.9.5-1_amd64.deb deleted file mode 100644 index 3c459b96b..000000000 Binary files a/binary_dependencies/nginx_1.9.5-1_amd64.deb and /dev/null differ diff --git a/boot.py b/boot.py old mode 100644 new mode 100755 index a1990d253..228fb2987 --- a/boot.py +++ b/boot.py @@ -4,15 +4,22 @@ from datetime import datetime, timedelta from urlparse import urlunparse from jinja2 import Template -from cachetools import lru_cache +from cachetools.func import lru_cache +import logging import release import os.path from app import app +from data.model import ServiceKeyDoesNotExist from data.model.release import set_region_release +from data.model.service_keys import get_service_key from util.config.database import sync_database_with_config from util.generatepresharedkey import generate_key +from _init import CONF_DIR + + +logger = logging.getLogger(__name__) @lru_cache(maxsize=1) @@ -38,49 +45,82 @@ def get_audience(): return urlunparse((scheme, hostname + ':' + port, '', '', '', '')) +def _verify_service_key(): + try: + with open(app.config['INSTANCE_SERVICE_KEY_KID_LOCATION']) as f: + quay_key_id = f.read() + + try: + get_service_key(quay_key_id, approved_only=False) + assert os.path.exists(app.config['INSTANCE_SERVICE_KEY_LOCATION']) + return quay_key_id + except ServiceKeyDoesNotExist: + logger.exception('Could not find non-expired existing service key %s; creating a new one', + quay_key_id) + return None + + # Found a valid service key, so exiting. + except IOError: + logger.exception('Could not load existing service key; creating a new one') + return None + + def setup_jwt_proxy(): """ Creates a service key for quay to use in the jwtproxy and generates the JWT proxy configuration. """ - if os.path.exists('conf/jwtproxy_conf.yaml'): - # Proxy is already setup. - return + if os.path.exists(os.path.join(CONF_DIR, 'jwtproxy_conf.yaml')): + # Proxy is already setup. Make sure the service key is still valid. + quay_key_id = _verify_service_key() + if quay_key_id is not None: + return - # Generate the key for this Quay instance to use. - minutes_until_expiration = app.config.get('QUAY_SERVICE_KEY_EXPIRATION', 120) - expiration = datetime.now() + timedelta(minutes=minutes_until_expiration) - quay_key, quay_key_id = generate_key('quay', get_audience(), expiration_date=expiration) + # Ensure we have an existing key if in read-only mode. + if app.config.get('REGISTRY_STATE', 'normal') == 'readonly': + quay_key_id = _verify_service_key() + if quay_key_id is None: + raise Exception('No valid service key found for read-only registry.') + else: + # Generate the key for this Quay instance to use. + minutes_until_expiration = app.config.get('INSTANCE_SERVICE_KEY_EXPIRATION', 120) + expiration = datetime.now() + timedelta(minutes=minutes_until_expiration) + quay_key, quay_key_id = generate_key(app.config['INSTANCE_SERVICE_KEY_SERVICE'], + get_audience(), expiration_date=expiration) - with open('conf/quay.kid', mode='w') as f: - f.truncate(0) - f.write(quay_key_id) + with open(app.config['INSTANCE_SERVICE_KEY_KID_LOCATION'], mode='w') as f: + f.truncate(0) + f.write(quay_key_id) - with open('conf/quay.pem', mode='w') as f: - f.truncate(0) - f.write(quay_key.exportKey()) + with open(app.config['INSTANCE_SERVICE_KEY_LOCATION'], mode='w') as f: + f.truncate(0) + f.write(quay_key.exportKey()) # Generate the JWT proxy configuration. audience = get_audience() registry = audience + '/keys' security_issuer = app.config.get('SECURITY_SCANNER_ISSUER_NAME', 'security_scanner') - with open("conf/jwtproxy_conf.yaml.jnj") as f: + with open(os.path.join(CONF_DIR, 'jwtproxy_conf.yaml.jnj')) as f: template = Template(f.read()) rendered = template.render( + conf_dir=CONF_DIR, audience=audience, registry=registry, key_id=quay_key_id, security_issuer=security_issuer, + service_key_location=app.config['INSTANCE_SERVICE_KEY_LOCATION'], ) - with open('conf/jwtproxy_conf.yaml', 'w') as f: + with open(os.path.join(CONF_DIR, 'jwtproxy_conf.yaml'), 'w') as f: f.write(rendered) def main(): - if app.config.get('SETUP_COMPLETE', False): - sync_database_with_config(app.config) - setup_jwt_proxy() + if not app.config.get('SETUP_COMPLETE', False): + raise Exception('Your configuration bundle is either not mounted or setup has not been completed') + + sync_database_with_config(app.config) + setup_jwt_proxy() # Record deploy if release.REGION and release.GIT_HEAD: diff --git a/buildman/MAINTAINERS b/buildman/MAINTAINERS new file mode 100644 index 000000000..99447c637 --- /dev/null +++ b/buildman/MAINTAINERS @@ -0,0 +1,2 @@ +Charlton Austin (@charltonaustin) +Joseph Schorr (@josephschorr) diff --git a/buildman/asyncutil.py b/buildman/asyncutil.py index 26b16e28a..accb13542 100644 --- a/buildman/asyncutil.py +++ b/buildman/asyncutil.py @@ -1,5 +1,15 @@ +from concurrent.futures import ThreadPoolExecutor from functools import partial -from trollius import get_event_loop + +from trollius import get_event_loop, coroutine + + +def wrap_with_threadpool(obj, worker_threads=1): + """ + Wraps a class in an async executor so that it can be safely used in an event loop like trollius. + """ + async_executor = ThreadPoolExecutor(worker_threads) + return AsyncWrapper(obj, executor=async_executor), async_executor class AsyncWrapper(object): @@ -25,3 +35,8 @@ class AsyncWrapper(object): return self._loop.run_in_executor(self._executor, callable_delegate_attr) return wrapper + + @coroutine + def __call__(self, *args, **kwargs): + callable_delegate_attr = partial(self._delegate, *args, **kwargs) + return self._loop.run_in_executor(self._executor, callable_delegate_attr) diff --git a/buildman/builder.py b/buildman/builder.py index 08f5f733a..0261c262d 100644 --- a/buildman/builder.py +++ b/buildman/builder.py @@ -1,10 +1,12 @@ import logging import os -import features import time import socket +import features + from app import app, userfiles as user_files, build_logs, dockerfile_build_queue +from util.log import logfile_path from buildman.manager.enterprise import EnterpriseManager from buildman.manager.ephemeral import EphemeralBuilderManager @@ -35,6 +37,12 @@ def run_build_manager(): time.sleep(1000) return + if app.config.get('REGISTRY_STATE', 'normal') == 'readonly': + logger.debug('Building is disabled while in read-only mode.') + while True: + time.sleep(1000) + return + build_manager_config = app.config.get('BUILD_MANAGER') if build_manager_config is None: return @@ -77,13 +85,13 @@ def run_build_manager(): server.run('0.0.0.0', websocket_port, controller_port, ssl=ssl_context) if __name__ == '__main__': - logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT) + logging.config.fileConfig(logfile_path(debug=True), disable_existing_loggers=False) logging.getLogger('peewee').setLevel(logging.WARN) logging.getLogger('boto').setLevel(logging.WARN) if app.config.get('EXCEPTION_LOG_TYPE', 'FakeSentry') == 'Sentry': buildman_name = '%s:buildman' % socket.gethostname() setup_logging(SentryHandler(app.config.get('SENTRY_DSN', ''), name=buildman_name, - level=logging.ERROR)) + level=logging.ERROR)) run_build_manager() diff --git a/buildman/component/buildcomponent.py b/buildman/component/buildcomponent.py index 8421bdee2..62c64e6b8 100644 --- a/buildman/component/buildcomponent.py +++ b/buildman/component/buildcomponent.py @@ -1,28 +1,37 @@ import datetime +import os import time import logging import json import trollius -import re from autobahn.wamp.exception import ApplicationError from trollius import From, Return +from active_migration import ActiveDataMigration, ERTMigrationFlags from buildman.server import BuildJobResult from buildman.component.basecomponent import BaseComponent +from buildman.component.buildparse import extract_current_step from buildman.jobutil.buildjob import BuildJobLoadException from buildman.jobutil.buildstatus import StatusHandler from buildman.jobutil.workererror import WorkerError -from data.database import BUILD_PHASE +from app import app +from data.database import BUILD_PHASE, UseThenDisconnect +from data.model import InvalidRepositoryBuildException +from data.registry_model import registry_model +from util import slash_join -HEARTBEAT_DELTA = datetime.timedelta(seconds=30) +HEARTBEAT_DELTA = datetime.timedelta(seconds=60) BUILD_HEARTBEAT_DELAY = datetime.timedelta(seconds=30) HEARTBEAT_TIMEOUT = 10 INITIAL_TIMEOUT = 25 SUPPORTED_WORKER_VERSIONS = ['0.3'] +# Label which marks a manifest with its source build ID. +INTERNAL_LABEL_BUILD_UUID = 'quay.build.uuid' + logger = logging.getLogger(__name__) class ComponentStatus(object): @@ -61,22 +70,23 @@ class BuildComponent(BaseComponent): def onJoin(self, details): logger.debug('Registering methods and listeners for component %s', self.builder_realm) yield From(self.register(self._on_ready, u'io.quay.buildworker.ready')) - yield From(self.register(self._determine_cache_tag, - u'io.quay.buildworker.determinecachetag')) + yield From(self.register(self._determine_cache_tag, u'io.quay.buildworker.determinecachetag')) yield From(self.register(self._ping, u'io.quay.buildworker.ping')) + yield From(self.register(self._on_log_message, u'io.quay.builder.logmessagesynchronously')) - yield From(self.subscribe(self._on_heartbeat, 'io.quay.builder.heartbeat')) - yield From(self.subscribe(self._on_log_message, 'io.quay.builder.logmessage')) + yield From(self.subscribe(self._on_heartbeat, u'io.quay.builder.heartbeat')) yield From(self._set_status(ComponentStatus.WAITING)) - def is_ready(self): - """ Determines whether a build component is ready to begin a build. """ - return self._component_status == ComponentStatus.RUNNING - @trollius.coroutine def start_build(self, build_job): """ Starts a build. """ + if self._component_status not in (ComponentStatus.WAITING, ComponentStatus.RUNNING): + logger.debug('Could not start build for component %s (build %s, worker version: %s): %s', + self.builder_realm, build_job.repo_build.uuid, self._worker_version, + self._component_status) + raise Return() + logger.debug('Starting build for component %s (build %s, worker version: %s)', self.builder_realm, build_job.repo_build.uuid, self._worker_version) @@ -93,7 +103,8 @@ class BuildComponent(BaseComponent): try: build_config = build_job.build_config except BuildJobLoadException as irbe: - self._build_failure('Could not load build job information', irbe) + yield From(self._build_failure('Could not load build job information', irbe)) + raise Return() base_image_information = {} @@ -111,20 +122,22 @@ class BuildComponent(BaseComponent): # defaults to empty string to avoid requiring a pointer on the builder. # sub_directory: The location within the build package of the Dockerfile and the build context. # repository: The repository for which this build is occurring. - # registry: The registry for which this build is occuring (e.g. 'quay.io', 'staging.quay.io'). + # registry: The registry for which this build is occuring (e.g. 'quay.io'). # pull_token: The token to use when pulling the cache for building. # push_token: The token to use to push the built image. # tag_names: The name(s) of the tag(s) for the newly built image. # base_image: The image name and credentials to use to conduct the base image pull. # username: The username for pulling the base image (if any). # password: The password for pulling the base image (if any). + context, dockerfile_path = self.extract_dockerfile_args(build_config) build_arguments = { 'build_package': build_job.get_build_package_url(self.user_files), - 'sub_directory': build_config.get('build_subdir', ''), + 'context': context, + 'dockerfile_path': dockerfile_path, 'repository': repository_name, 'registry': self.registry_hostname, - 'pull_token': build_job.repo_build.access_token.code, - 'push_token': build_job.repo_build.access_token.code, + 'pull_token': build_job.repo_build.access_token.get_code(), + 'push_token': build_job.repo_build.access_token.get_code(), 'tag_names': build_config.get('docker_tags', ['latest']), 'base_image': base_image_information, } @@ -134,24 +147,61 @@ class BuildComponent(BaseComponent): # url: url used to clone the git repository # sha: the sha1 identifier of the commit to check out # private_key: the key used to get read access to the git repository - if build_job.repo_build.trigger.private_key is not None: + + # TODO(remove-unenc): Remove legacy field. + private_key = None + if build_job.repo_build.trigger is not None and \ + build_job.repo_build.trigger.secure_private_key is not None: + private_key = build_job.repo_build.trigger.secure_private_key.decrypt() + + if ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS) and \ + private_key is None and \ + build_job.repo_build.trigger is not None: + private_key = build_job.repo_build.trigger.private_key + + if private_key is not None: build_arguments['git'] = { 'url': build_config['trigger_metadata'].get('git_url', ''), 'sha': BuildComponent._commit_sha(build_config), - 'private_key': build_job.repo_build.trigger.private_key, + 'private_key': private_key or '', } + # If the build args have no buildpack, mark it as a failure before sending + # it to a builder instance. + if not build_arguments['build_package'] and not build_arguments['git']: + logger.error('%s: insufficient build args: %s', + self._current_job.repo_build.uuid, build_arguments) + yield From(self._build_failure('Insufficient build arguments. No buildpack available.')) + raise Return() + # Invoke the build. logger.debug('Invoking build: %s', self.builder_realm) logger.debug('With Arguments: %s', build_arguments) - self.call("io.quay.builder.build", **build_arguments).add_done_callback(self._build_complete) + def build_complete_callback(result): + """ This function is used to execute a coroutine as the callback. """ + trollius.ensure_future(self._build_complete(result)) + + self.call("io.quay.builder.build", **build_arguments).add_done_callback(build_complete_callback) # Set the heartbeat for the future. If the builder never receives the build call, # then this will cause a timeout after 30 seconds. We know the builder has registered # by this point, so it makes sense to have a timeout. self._last_heartbeat = datetime.datetime.utcnow() + BUILD_HEARTBEAT_DELAY + @staticmethod + def extract_dockerfile_args(build_config): + dockerfile_path = build_config.get('build_subdir', '') + context = build_config.get('context', '') + if not (dockerfile_path == '' or context == ''): + # This should not happen and can be removed when we centralize validating build_config + dockerfile_abspath = slash_join('', dockerfile_path) + if ".." in os.path.relpath(dockerfile_abspath, context): + return os.path.split(dockerfile_path) + dockerfile_path = os.path.relpath(dockerfile_abspath, context) + + return context, dockerfile_path + @staticmethod def _commit_sha(build_config): """ Determines whether the metadata is using an old schema or not and returns the commit. """ @@ -159,6 +209,14 @@ class BuildComponent(BaseComponent): old_commit_sha = build_config['trigger_metadata'].get('commit_sha', '') return commit_sha or old_commit_sha + @staticmethod + def name_and_path(subdir): + """ Returns the dockerfile path and name """ + if subdir.endswith("/"): + subdir += "Dockerfile" + elif not subdir.endswith("Dockerfile"): + subdir += "/Dockerfile" + return os.path.split(subdir) @staticmethod def _total_completion(statuses, total_images): @@ -196,6 +254,8 @@ class BuildComponent(BaseComponent): status_dict[status_completion_key] = \ BuildComponent._total_completion(images, max(len(images), num_images)) + + @trollius.coroutine def _on_log_message(self, phase, json_data): """ Tails log messages and updates the build status. """ # Update the heartbeat. @@ -222,15 +282,23 @@ class BuildComponent(BaseComponent): current_status_string = str(fully_unwrapped.encode('utf-8')) if current_status_string and phase == BUILD_PHASE.BUILDING: - step_increment = re.search(r'Step ([0-9]+) :', current_status_string) - if step_increment: - current_step = int(step_increment.group(1)) + current_step = extract_current_step(current_status_string) # Parse and update the phase and the status_dict. The status dictionary contains # the pull/push progress, as well as the current step index. with self._build_status as status_dict: - if self._build_status.set_phase(phase, log_data.get('status_data')): - logger.debug('Build %s has entered a new phase: %s', self.builder_realm, phase) + try: + changed_phase = yield From(self._build_status.set_phase(phase, log_data.get('status_data'))) + if changed_phase: + logger.debug('Build %s has entered a new phase: %s', self.builder_realm, phase) + elif self._current_job.repo_build.phase == BUILD_PHASE.CANCELLED: + build_id = self._current_job.repo_build.uuid + logger.debug('Trying to move cancelled build into phase: %s with id: %s', phase, build_id) + raise Return(False) + except InvalidRepositoryBuildException: + build_id = self._current_job.repo_build.uuid + logger.warning('Build %s was not found; repo was probably deleted', build_id) + raise Return(False) BuildComponent._process_pushpull_status(status_dict, phase, log_data, self._image_info) @@ -241,12 +309,13 @@ class BuildComponent(BaseComponent): # If the json data contains an error, then something went wrong with a push or pull. if 'error' in log_data: - self._build_status.set_error(log_data['error']) + yield From(self._build_status.set_error(log_data['error'])) if current_step is not None: - self._build_status.set_command(current_status_string) + yield From(self._build_status.set_command(current_status_string)) elif phase == BUILD_PHASE.BUILDING: - self._build_status.append_log(current_status_string) + yield From(self._build_status.append_log(current_status_string)) + raise Return(True) @trollius.coroutine def _determine_cache_tag(self, command_comments, base_image_name, base_image_tag, base_image_id): @@ -259,46 +328,72 @@ class BuildComponent(BaseComponent): tag_found = self._current_job.determine_cached_tag(base_image_id, command_comments) raise Return(tag_found or '') + @trollius.coroutine def _build_failure(self, error_message, exception=None): """ Handles and logs a failed build. """ - self._build_status.set_error(error_message, { - 'internal_error': str(exception) if exception else None - }) + yield From(self._build_status.set_error(error_message, { + 'internal_error': str(exception) if exception else None + })) build_id = self._current_job.repo_build.uuid logger.warning('Build %s failed with message: %s', build_id, error_message) # Mark that the build has finished (in an error state) - trollius.async(self._build_finished(BuildJobResult.ERROR)) + yield From(self._build_finished(BuildJobResult.ERROR)) + @trollius.coroutine def _build_complete(self, result): """ Wraps up a completed build. Handles any errors and calls self._build_finished. """ + build_id = self._current_job.repo_build.uuid + try: # Retrieve the result. This will raise an ApplicationError on any error that occurred. result_value = result.result() kwargs = {} # Note: If we are hitting an older builder that didn't return ANY map data, then the result - # value will be a bool instead of a proper CallResult object (because autobahn sucks). + # value will be a bool instead of a proper CallResult object. # Therefore: we have a try-except guard here to ensure we don't hit this pitfall. try: kwargs = result_value.kwresults except: pass - self._build_status.set_phase(BUILD_PHASE.COMPLETE) - trollius.async(self._build_finished(BuildJobResult.COMPLETE)) + try: + yield From(self._build_status.set_phase(BUILD_PHASE.COMPLETE)) + except InvalidRepositoryBuildException: + logger.warning('Build %s was not found; repo was probably deleted', build_id) + raise Return() + + yield From(self._build_finished(BuildJobResult.COMPLETE)) + + # Label the pushed manifests with the build metadata. + manifest_digests = kwargs.get('digests') or [] + repository = registry_model.lookup_repository(self._current_job.namespace, + self._current_job.repo_name) + if repository is not None: + for digest in manifest_digests: + with UseThenDisconnect(app.config): + manifest = registry_model.lookup_manifest_by_digest(repository, digest, + require_available=True) + if manifest is None: + continue + + registry_model.create_manifest_label(manifest, INTERNAL_LABEL_BUILD_UUID, + build_id, 'internal', 'text/plain') # Send the notification that the build has completed successfully. - self._current_job.send_notification('build_success', image_id=kwargs.get('image_id')) + self._current_job.send_notification('build_success', + image_id=kwargs.get('image_id'), + manifest_digests=manifest_digests) except ApplicationError as aex: - build_id = self._current_job.repo_build.uuid worker_error = WorkerError(aex.error, aex.kwargs.get('base_error')) # Write the error to the log. - self._build_status.set_error(worker_error.public_message(), worker_error.extra_data(), - internal_error=worker_error.is_internal_error(), - requeued=self._current_job.has_retries_remaining()) + yield From(self._build_status.set_error(worker_error.public_message(), + worker_error.extra_data(), + internal_error=worker_error.is_internal_error(), + requeued=self._current_job.has_retries_remaining())) # Send the notification that the build has failed. self._current_job.send_notification('build_failure', @@ -306,17 +401,21 @@ class BuildComponent(BaseComponent): # Mark the build as completed. if worker_error.is_internal_error(): - logger.exception('Got remote internal exception for build: %s', build_id) - trollius.async(self._build_finished(BuildJobResult.INCOMPLETE)) + logger.exception('[BUILD INTERNAL ERROR: Remote] Build ID: %s: %s', build_id, + worker_error.public_message()) + yield From(self._build_finished(BuildJobResult.INCOMPLETE)) else: logger.debug('Got remote failure exception for build %s: %s', build_id, aex) - trollius.async(self._build_finished(BuildJobResult.ERROR)) + yield From(self._build_finished(BuildJobResult.ERROR)) + + # Remove the current job. + self._current_job = None + @trollius.coroutine def _build_finished(self, job_status): """ Alerts the parent that a build has completed and sets the status back to running. """ yield From(self.parent_manager.job_completed(self._current_job, job_status, self)) - self._current_job = None # Set the component back to a running state. yield From(self._set_status(ComponentStatus.RUNNING)) @@ -365,7 +464,7 @@ class BuildComponent(BaseComponent): if self._component_status == ComponentStatus.TIMED_OUT: return - logger.debug('got heartbeat on realm %s', self.builder_realm) + logger.debug('Got heartbeat on realm %s', self.builder_realm) self._last_heartbeat = datetime.datetime.utcnow() @trollius.coroutine @@ -383,9 +482,8 @@ class BuildComponent(BaseComponent): raise Return() # If there is an active build, write the heartbeat to its status. - build_status = self._build_status - if build_status is not None: - with build_status as status_dict: + if self._build_status is not None: + with self._build_status as status_dict: status_dict['heartbeat'] = int(time.time()) # Mark the build item. @@ -419,13 +517,23 @@ class BuildComponent(BaseComponent): # If we still have a running job, then it has not completed and we need to tell the parent # manager. if self._current_job is not None: - self._build_status.set_error('Build worker timed out', internal_error=True, - requeued=self._current_job.has_retries_remaining()) + yield From(self._build_status.set_error('Build worker timed out', internal_error=True, + requeued=self._current_job.has_retries_remaining())) + build_id = self._current_job.build_uuid + logger.error('[BUILD INTERNAL ERROR: Timeout] Build ID: %s', build_id) yield From(self.parent_manager.job_completed(self._current_job, - BuildJobResult.INCOMPLETE, - self)) - self._current_job = None + BuildJobResult.INCOMPLETE, + self)) # Unregister the current component so that it cannot be invoked again. self.parent_manager.build_component_disposed(self, True) + + # Remove the job reference. + self._current_job = None + + @trollius.coroutine + def cancel_build(self): + self.parent_manager.build_component_disposed(self, True) + self._current_job = None + yield From(self._set_status(ComponentStatus.RUNNING)) diff --git a/buildman/component/buildparse.py b/buildman/component/buildparse.py new file mode 100644 index 000000000..3560c0861 --- /dev/null +++ b/buildman/component/buildparse.py @@ -0,0 +1,15 @@ +import re + +def extract_current_step(current_status_string): + """ Attempts to extract the current step numeric identifier from the given status string. Returns the step + number or None if none. + """ + # Older format: `Step 12 :` + # Newer format: `Step 4/13 :` + step_increment = re.search(r'Step ([0-9]+)/([0-9]+) :', current_status_string) + if step_increment: + return int(step_increment.group(1)) + + step_increment = re.search(r'Step ([0-9]+) :', current_status_string) + if step_increment: + return int(step_increment.group(1)) diff --git a/buildman/component/test/test_buildcomponent.py b/buildman/component/test/test_buildcomponent.py new file mode 100644 index 000000000..c4e026916 --- /dev/null +++ b/buildman/component/test/test_buildcomponent.py @@ -0,0 +1,36 @@ +import pytest + +from buildman.component.buildcomponent import BuildComponent + + +@pytest.mark.parametrize('input,expected_path,expected_file', [ + ("", "/", "Dockerfile"), + ("/", "/", "Dockerfile"), + ("/Dockerfile", "/", "Dockerfile"), + ("/server.Dockerfile", "/", "server.Dockerfile"), + ("/somepath", "/somepath", "Dockerfile"), + ("/somepath/", "/somepath", "Dockerfile"), + ("/somepath/Dockerfile", "/somepath", "Dockerfile"), + ("/somepath/server.Dockerfile", "/somepath", "server.Dockerfile"), + ("/somepath/some_other_path", "/somepath/some_other_path", "Dockerfile"), + ("/somepath/some_other_path/", "/somepath/some_other_path", "Dockerfile"), + ("/somepath/some_other_path/Dockerfile", "/somepath/some_other_path", "Dockerfile"), + ("/somepath/some_other_path/server.Dockerfile", "/somepath/some_other_path", "server.Dockerfile"), +]) +def test_path_is_dockerfile(input, expected_path, expected_file): + actual_path, actual_file = BuildComponent.name_and_path(input) + assert actual_path == expected_path + assert actual_file == expected_file + +@pytest.mark.parametrize('build_config,context,dockerfile_path', [ + ({}, '', ''), + ({'build_subdir': '/builddir/Dockerfile'}, '', '/builddir/Dockerfile'), + ({'context': '/builddir'}, '/builddir', ''), + ({'context': '/builddir', 'build_subdir': '/builddir/Dockerfile'}, '/builddir', 'Dockerfile'), + ({'context': '/some_other_dir/Dockerfile', 'build_subdir': '/builddir/Dockerfile'}, '/builddir', 'Dockerfile'), + ({'context': '/', 'build_subdir':'Dockerfile'}, '/', 'Dockerfile') +]) +def test_extract_dockerfile_args(build_config, context, dockerfile_path): + actual_context, actual_dockerfile_path = BuildComponent.extract_dockerfile_args(build_config) + assert context == actual_context + assert dockerfile_path == actual_dockerfile_path diff --git a/buildman/component/test/test_buildparse.py b/buildman/component/test/test_buildparse.py new file mode 100644 index 000000000..3bdb7295e --- /dev/null +++ b/buildman/component/test/test_buildparse.py @@ -0,0 +1,16 @@ +import pytest + +from buildman.component.buildparse import extract_current_step + + +@pytest.mark.parametrize('input,expected_step', [ + ("", None), + ("Step a :", None), + ("Step 1 :", 1), + ("Step 1 : ", 1), + ("Step 1/2 : ", 1), + ("Step 2/17 : ", 2), + ("Step 4/13 : ARG somearg=foo", 4), +]) +def test_extract_current_step(input, expected_step): + assert extract_current_step(input) == expected_step diff --git a/buildman/enums.py b/buildman/enums.py index 2a5cb1978..f88d2b690 100644 --- a/buildman/enums.py +++ b/buildman/enums.py @@ -1,3 +1,5 @@ +from data.database import BUILD_PHASE + class BuildJobResult(object): """ Build job result enum """ INCOMPLETE = 'incomplete' @@ -11,3 +13,9 @@ class BuildServerStatus(object): RUNNING = 'running' SHUTDOWN = 'shutting_down' EXCEPTION = 'exception' + +RESULT_PHASES = { + BuildJobResult.INCOMPLETE: BUILD_PHASE.INTERNAL_ERROR, + BuildJobResult.COMPLETE: BUILD_PHASE.COMPLETE, + BuildJobResult.ERROR: BUILD_PHASE.ERROR, +} diff --git a/buildman/jobutil/buildjob.py b/buildman/jobutil/buildjob.py index dbbb8113f..f245ce2bf 100644 --- a/buildman/jobutil/buildjob.py +++ b/buildman/jobutil/buildjob.py @@ -1,10 +1,14 @@ import json import logging -from cachetools import lru_cache -from endpoints.notificationhelper import spawn_notification +from app import app +from cachetools.func import lru_cache +from notifications import spawn_notification from data import model -from util.imagetree import ImageTree +from data.registry_model import registry_model +from data.registry_model.datatypes import RepositoryReference +from data.database import UseThenDisconnect +from util.morecollections import AttrDict logger = logging.getLogger(__name__) @@ -13,6 +17,7 @@ class BuildJobLoadException(Exception): """ Exception raised if a build job could not be instantiated for some reason. """ pass + class BuildJob(object): """ Represents a single in-progress build job. """ def __init__(self, job_item): @@ -20,43 +25,45 @@ class BuildJob(object): try: self.job_details = json.loads(job_item.body) + self.build_notifier = BuildJobNotifier(self.build_uuid) except ValueError: raise BuildJobLoadException( - 'Could not parse build queue item config with ID %s' % self.job_details['build_uuid'] + 'Could not parse build queue item config with ID %s' % self.job_details['build_uuid'] ) + @property + def retries_remaining(self): + return self.job_item.retries_remaining + def has_retries_remaining(self): return self.job_item.retries_remaining > 0 - def send_notification(self, kind, error_message=None, image_id=None): - tags = self.build_config.get('docker_tags', ['latest']) - event_data = { - 'build_id': self.repo_build.uuid, - 'build_name': self.repo_build.display_name, - 'docker_tags': tags, - 'trigger_id': self.repo_build.trigger.uuid, - 'trigger_kind': self.repo_build.trigger.service.name, - 'trigger_metadata': self.build_config.get('trigger_metadata', {}) - } - - if image_id is not None: - event_data['image_id'] = image_id - - if error_message is not None: - event_data['error_message'] = error_message - - spawn_notification(self.repo_build.repository, kind, event_data, - subpage='build/%s' % self.repo_build.uuid, - pathargs=['build', self.repo_build.uuid]) - + def send_notification(self, kind, error_message=None, image_id=None, manifest_digests=None): + self.build_notifier.send_notification(kind, error_message, image_id, manifest_digests) @lru_cache(maxsize=1) def _load_repo_build(self): - try: - return model.build.get_repository_build(self.job_details['build_uuid']) - except model.InvalidRepositoryBuildException: - raise BuildJobLoadException( - 'Could not load repository build with ID %s' % self.job_details['build_uuid']) + with UseThenDisconnect(app.config): + try: + return model.build.get_repository_build(self.build_uuid) + except model.InvalidRepositoryBuildException: + raise BuildJobLoadException( + 'Could not load repository build with ID %s' % self.build_uuid) + + @property + def build_uuid(self): + """ Returns the unique UUID for this build job. """ + return self.job_details['build_uuid'] + + @property + def namespace(self): + """ Returns the namespace under which this build is running. """ + return self.repo_build.repository.namespace_user.username + + @property + def repo_name(self): + """ Returns the name of the repository under which this build is running. """ + return self.repo_build.repository.name @property def repo_build(self): @@ -71,7 +78,7 @@ class BuildJob(object): if not self.repo_build.resource_key: return '' - return user_files.get_file_url(self.repo_build.resource_key, requires_cors=False) + return user_files.get_file_url(self.repo_build.resource_key, '127.0.0.1', requires_cors=False) @property def pull_credentials(self): @@ -89,67 +96,88 @@ class BuildJob(object): def determine_cached_tag(self, base_image_id=None, cache_comments=None): """ Returns the tag to pull to prime the cache or None if none. """ - cached_tag = None - if base_image_id and cache_comments: - cached_tag = self._determine_cached_tag_by_comments(base_image_id, cache_comments) - - if not cached_tag: - cached_tag = self._determine_cached_tag_by_tag() - + cached_tag = self._determine_cached_tag_by_tag() logger.debug('Determined cached tag %s for %s: %s', cached_tag, base_image_id, cache_comments) - return cached_tag - def _determine_cached_tag_by_comments(self, base_image_id, cache_commands): - """ Determines the tag to use for priming the cache for this build job, by matching commands - starting at the given base_image_id. This mimics the Docker cache checking, so it should, - in theory, provide "perfect" caching. - """ - # Lookup the base image in the repository. If it doesn't exist, nothing more to do. - repo_build = self.repo_build - repo_namespace = repo_build.repository.namespace_user.username - repo_name = repo_build.repository.name - - base_image = model.image.get_image(repo_build.repository, base_image_id) - if base_image is None: - return None - - # Build an in-memory tree of the full heirarchy of images in the repository. - all_images = model.image.get_repository_images_without_placements(repo_build.repository, - with_ancestor=base_image) - - all_tags = model.tag.list_repository_tags(repo_namespace, repo_name) - tree = ImageTree(all_images, all_tags, base_filter=base_image.id) - - # Find a path in the tree, starting at the base image, that matches the cache comments - # or some subset thereof. - def checker(step, image): - if step >= len(cache_commands): - return False - - full_command = '["/bin/sh", "-c", "%s"]' % cache_commands[step] - logger.debug('Checking step #%s: %s, %s == %s', step, image.id, image.command, full_command) - - return image.command == full_command - - path = tree.find_longest_path(base_image.id, checker) - if not path: - return None - - # Find any tag associated with the last image in the path. - return tree.tag_containing_image(path[-1]) - - def _determine_cached_tag_by_tag(self): """ Determines the cached tag by looking for one of the tags being built, and seeing if it exists in the repository. This is a fallback for when no comment information is available. """ - tags = self.build_config.get('docker_tags', ['latest']) - repository = self.repo_build.repository - existing_tags = model.tag.list_repository_tags(repository.namespace_user.username, - repository.name) - cached_tags = set(tags) & set([tag.name for tag in existing_tags]) - if cached_tags: - return list(cached_tags)[0] + with UseThenDisconnect(app.config): + tags = self.build_config.get('docker_tags', ['latest']) + repository = RepositoryReference.for_repo_obj(self.repo_build.repository) + matching_tag = registry_model.find_matching_tag(repository, tags) + if matching_tag is not None: + return matching_tag.name - return None + most_recent_tag = registry_model.get_most_recent_tag(repository) + if most_recent_tag is not None: + return most_recent_tag.name + + return None + + +class BuildJobNotifier(object): + """ A class for sending notifications to a job that only relies on the build_uuid """ + + def __init__(self, build_uuid): + self.build_uuid = build_uuid + + @property + def repo_build(self): + return self._load_repo_build() + + @lru_cache(maxsize=1) + def _load_repo_build(self): + try: + return model.build.get_repository_build(self.build_uuid) + except model.InvalidRepositoryBuildException: + raise BuildJobLoadException( + 'Could not load repository build with ID %s' % self.build_uuid) + + @property + def build_config(self): + try: + return json.loads(self.repo_build.job_config) + except ValueError: + raise BuildJobLoadException( + 'Could not parse repository build job config with ID %s' % self.repo_build.uuid + ) + + def send_notification(self, kind, error_message=None, image_id=None, manifest_digests=None): + with UseThenDisconnect(app.config): + tags = self.build_config.get('docker_tags', ['latest']) + trigger = self.repo_build.trigger + if trigger is not None and trigger.id is not None: + trigger_kind = trigger.service.name + else: + trigger_kind = None + + event_data = { + 'build_id': self.repo_build.uuid, + 'build_name': self.repo_build.display_name, + 'docker_tags': tags, + 'trigger_id': trigger.uuid if trigger is not None else None, + 'trigger_kind': trigger_kind, + 'trigger_metadata': self.build_config.get('trigger_metadata', {}) + } + + if image_id is not None: + event_data['image_id'] = image_id + + if manifest_digests: + event_data['manifest_digests'] = manifest_digests + + if error_message is not None: + event_data['error_message'] = error_message + + # TODO: remove when more endpoints have been converted to using + # interfaces + repo = AttrDict({ + 'namespace_name': self.repo_build.repository.namespace_user.username, + 'name': self.repo_build.repository.name, + }) + spawn_notification(repo, kind, event_data, + subpage='build/%s' % self.repo_build.uuid, + pathargs=['build', self.repo_build.uuid]) diff --git a/buildman/jobutil/buildstatus.py b/buildman/jobutil/buildstatus.py index 079615812..662dbaa10 100644 --- a/buildman/jobutil/buildstatus.py +++ b/buildman/jobutil/buildstatus.py @@ -1,12 +1,17 @@ -from data.database import BUILD_PHASE -from data import model -from redis import RedisError - import datetime import logging +from redis import RedisError +from trollius import From, Return, coroutine + +from data.database import BUILD_PHASE +from data import model +from buildman.asyncutil import AsyncWrapper + + logger = logging.getLogger(__name__) + class StatusHandler(object): """ Context wrapper for writing status to build logs. """ @@ -14,66 +19,70 @@ class StatusHandler(object): self._current_phase = None self._current_command = None self._uuid = repository_build_uuid - self._build_logs = build_logs + self._build_logs = AsyncWrapper(build_logs) + self._sync_build_logs = build_logs + self._build_model = AsyncWrapper(model.build) self._status = { - 'total_commands': 0, - 'current_command': None, - 'push_completion': 0.0, - 'pull_completion': 0.0, + 'total_commands': 0, + 'current_command': None, + 'push_completion': 0.0, + 'pull_completion': 0.0, } # Write the initial status. self.__exit__(None, None, None) + @coroutine def _append_log_message(self, log_message, log_type=None, log_data=None): log_data = log_data or {} log_data['datetime'] = str(datetime.datetime.now()) try: - self._build_logs.append_log_message(self._uuid, log_message, log_type, log_data) + yield From(self._build_logs.append_log_message(self._uuid, log_message, log_type, log_data)) except RedisError: logger.exception('Could not save build log for build %s: %s', self._uuid, log_message) + @coroutine def append_log(self, log_message, extra_data=None): if log_message is None: return - self._append_log_message(log_message, log_data=extra_data) + yield From(self._append_log_message(log_message, log_data=extra_data)) + @coroutine def set_command(self, command, extra_data=None): if self._current_command == command: - return + raise Return() self._current_command = command - self._append_log_message(command, self._build_logs.COMMAND, extra_data) + yield From(self._append_log_message(command, self._build_logs.COMMAND, extra_data)) + @coroutine def set_error(self, error_message, extra_data=None, internal_error=False, requeued=False): - self.set_phase(BUILD_PHASE.INTERNAL_ERROR if internal_error and requeued else BUILD_PHASE.ERROR) + error_phase = BUILD_PHASE.INTERNAL_ERROR if internal_error and requeued else BUILD_PHASE.ERROR + yield From(self.set_phase(error_phase)) extra_data = extra_data or {} extra_data['internal_error'] = internal_error - self._append_log_message(error_message, self._build_logs.ERROR, extra_data) + yield From(self._append_log_message(error_message, self._build_logs.ERROR, extra_data)) + @coroutine def set_phase(self, phase, extra_data=None): if phase == self._current_phase: - return False + raise Return(False) self._current_phase = phase - self._append_log_message(phase, self._build_logs.PHASE, extra_data) + yield From(self._append_log_message(phase, self._build_logs.PHASE, extra_data)) # Update the repository build with the new phase - repo_build = model.build.get_repository_build(self._uuid) - repo_build.phase = phase - repo_build.save() - - return True + raise Return(self._build_model.update_phase_then_close(self._uuid, phase)) def __enter__(self): return self._status def __exit__(self, exc_type, value, traceback): try: - self._build_logs.set_status(self._uuid, self._status) + self._sync_build_logs.set_status(self._uuid, self._status) except RedisError: logger.exception('Could not set status of build %s to %s', self._uuid, self._status) diff --git a/buildman/jobutil/workererror.py b/buildman/jobutil/workererror.py index 047140e8e..9245f312e 100644 --- a/buildman/jobutil/workererror.py +++ b/buildman/jobutil/workererror.py @@ -5,79 +5,91 @@ class WorkerError(object): self._base_message = base_message self._error_handlers = { - 'io.quay.builder.buildpackissue': { - 'message': 'Could not load build package', - 'is_internal': True, - }, + 'io.quay.builder.buildpackissue': { + 'message': 'Could not load build package', + 'is_internal': True, + }, - 'io.quay.builder.gitfailure': { - 'message': 'Could not clone git repository', - 'show_base_error': True, - }, + 'io.quay.builder.gitfailure': { + 'message': 'Could not clone git repository', + 'show_base_error': True, + }, - 'io.quay.builder.gitcheckout': { - 'message': 'Could not checkout git ref. Have you force pushed recently?', - }, + 'io.quay.builder.gitcheckout': { + 'message': 'Could not checkout git ref. If you force pushed recently, ' + + 'the commit may be missing.', + 'show_base_error': True, + }, - 'io.quay.builder.cannotextractbuildpack': { - 'message': 'Could not extract the contents of the build package' - }, + 'io.quay.builder.cannotextractbuildpack': { + 'message': 'Could not extract the contents of the build package' + }, - 'io.quay.builder.cannotpullforcache': { - 'message': 'Could not pull cached image', - 'is_internal': True - }, + 'io.quay.builder.cannotpullforcache': { + 'message': 'Could not pull cached image', + 'is_internal': True + }, - 'io.quay.builder.dockerfileissue': { - 'message': 'Could not find or parse Dockerfile', - 'show_base_error': True - }, + 'io.quay.builder.dockerfileissue': { + 'message': 'Could not find or parse Dockerfile', + 'show_base_error': True + }, - 'io.quay.builder.cannotpullbaseimage': { - 'message': 'Could not pull base image', - 'show_base_error': True - }, + 'io.quay.builder.cannotpullbaseimage': { + 'message': 'Could not pull base image', + 'show_base_error': True + }, - 'io.quay.builder.internalerror': { - 'message': 'An internal error occurred while building. Please submit a ticket.', - 'is_internal': True - }, + 'io.quay.builder.internalerror': { + 'message': 'An internal error occurred while building. Please submit a ticket.', + 'is_internal': True + }, - 'io.quay.builder.buildrunerror': { - 'message': 'Could not start the build process', - 'is_internal': True - }, + 'io.quay.builder.buildrunerror': { + 'message': 'Could not start the build process', + 'is_internal': True + }, - 'io.quay.builder.builderror': { - 'message': 'A build step failed', - 'show_base_error': True - }, + 'io.quay.builder.builderror': { + 'message': 'A build step failed', + 'show_base_error': True + }, - 'io.quay.builder.tagissue': { - 'message': 'Could not tag built image', - 'is_internal': True - }, + 'io.quay.builder.tagissue': { + 'message': 'Could not tag built image', + 'is_internal': True + }, - 'io.quay.builder.pushissue': { - 'message': 'Could not push built image', - 'show_base_error': True, - 'is_internal': True - }, + 'io.quay.builder.pushissue': { + 'message': 'Could not push built image', + 'show_base_error': True, + 'is_internal': True + }, - 'io.quay.builder.dockerconnecterror': { - 'message': 'Could not connect to Docker daemon', - 'is_internal': True - }, + 'io.quay.builder.dockerconnecterror': { + 'message': 'Could not connect to Docker daemon', + 'is_internal': True + }, - 'io.quay.builder.missingorinvalidargument': { - 'message': 'Missing required arguments for builder', - 'is_internal': True - }, + 'io.quay.builder.missingorinvalidargument': { + 'message': 'Missing required arguments for builder', + 'is_internal': True + }, - 'io.quay.builder.cachelookupissue': { - 'message': 'Error checking for a cached tag', - 'is_internal': True - } + 'io.quay.builder.cachelookupissue': { + 'message': 'Error checking for a cached tag', + 'is_internal': True + }, + + 'io.quay.builder.errorduringphasetransition': { + 'message': 'Error during phase transition. If this problem persists ' + + 'please contact customer support.', + 'is_internal': True + }, + + 'io.quay.builder.clientrejectedtransition': { + 'message': 'Build can not be finished due to user cancellation.', + } } def is_internal_error(self): @@ -98,10 +110,10 @@ class WorkerError(object): def extra_data(self): if self._base_message: return { - 'base_error': self._base_message, - 'error_code': self._error_code + 'base_error': self._base_message, + 'error_code': self._error_code } return { - 'error_code': self._error_code + 'error_code': self._error_code } diff --git a/buildman/manager/basemanager.py b/buildman/manager/basemanager.py index 83c192d36..23627830a 100644 --- a/buildman/manager/basemanager.py +++ b/buildman/manager/basemanager.py @@ -17,7 +17,7 @@ class BaseManager(object): every few minutes. """ self.job_heartbeat_callback(build_job) - def setup_time(self): + def overall_setup_time(self): """ Returns the number of seconds that the build system should wait before allowing the job to be picked up again after called 'schedule'. """ @@ -58,7 +58,7 @@ class BaseManager(object): @coroutine def job_completed(self, build_job, job_status, build_component): """ Method invoked once a job_item has completed, in some manner. The job_status will be - one of: incomplete, error, complete. Implementations of this method should call + one of: incomplete, error, complete. Implementations of this method should call coroutine self.job_complete_callback with a status of Incomplete if they wish for the job to be automatically requeued. """ diff --git a/buildman/manager/buildcanceller.py b/buildman/manager/buildcanceller.py new file mode 100644 index 000000000..dd49e9f38 --- /dev/null +++ b/buildman/manager/buildcanceller.py @@ -0,0 +1,27 @@ +import logging + +from buildman.manager.orchestrator_canceller import OrchestratorCanceller +from buildman.manager.noop_canceller import NoopCanceller + +logger = logging.getLogger(__name__) + +CANCELLERS = {'ephemeral': OrchestratorCanceller} + + +class BuildCanceller(object): + """ A class to manage cancelling a build """ + + def __init__(self, app=None): + self.build_manager_config = app.config.get('BUILD_MANAGER') + if app is None or self.build_manager_config is None: + self.handler = NoopCanceller() + else: + self.handler = None + + def try_cancel_build(self, uuid): + """ A method to kill a running build """ + if self.handler is None: + canceller = CANCELLERS.get(self.build_manager_config[0], NoopCanceller) + self.handler = canceller(self.build_manager_config[1]) + + return self.handler.try_cancel_build(uuid) diff --git a/buildman/manager/enterprise.py b/buildman/manager/enterprise.py index d74c50ea9..3d32a61d0 100644 --- a/buildman/manager/enterprise.py +++ b/buildman/manager/enterprise.py @@ -45,7 +45,7 @@ class EnterpriseManager(BaseManager): # production, build workers in enterprise are long-lived and register dynamically. self.register_component(REGISTRATION_REALM, DynamicRegistrationComponent) - def setup_time(self): + def overall_setup_time(self): # Builders are already registered, so the setup time should be essentially instant. We therefore # only return a minute here. return 60 @@ -79,7 +79,7 @@ class EnterpriseManager(BaseManager): @coroutine def job_completed(self, build_job, job_status, build_component): - self.job_complete_callback(build_job, job_status) + yield From(self.job_complete_callback(build_job, job_status)) def build_component_disposed(self, build_component, timed_out): self.all_components.remove(build_component) diff --git a/buildman/manager/ephemeral.py b/buildman/manager/ephemeral.py index 14b88ef8c..590a90dde 100644 --- a/buildman/manager/ephemeral.py +++ b/buildman/manager/ephemeral.py @@ -1,393 +1,555 @@ import logging -import etcd import uuid import calendar -import os.path import json +import time +from collections import namedtuple from datetime import datetime, timedelta -from trollius import From, coroutine, Return, async -from concurrent.futures import ThreadPoolExecutor -from urllib3.exceptions import ReadTimeoutError, ProtocolError +from six import iteritems + +from trollius import From, coroutine, Return, async, sleep from app import metric_queue +from buildman.orchestrator import (orchestrator_from_config, KeyEvent, + OrchestratorError, OrchestratorConnectionError, + ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION) from buildman.manager.basemanager import BaseManager -from buildman.manager.executor import PopenExecutor, EC2Executor +from buildman.manager.executor import PopenExecutor, EC2Executor, KubernetesExecutor from buildman.component.buildcomponent import BuildComponent from buildman.jobutil.buildjob import BuildJob -from buildman.asyncutil import AsyncWrapper from buildman.server import BuildJobResult +from util import slash_join from util.morecollections import AttrDict logger = logging.getLogger(__name__) -ETCD_MAX_WATCH_TIMEOUT = 30 -EC2_API_TIMEOUT = 20 -RETRY_IMMEDIATELY_TIMEOUT = 0 +JOB_PREFIX = 'building/' +LOCK_PREFIX = 'lock/' +REALM_PREFIX = 'realm/' +CANCEL_PREFIX = 'cancel/' +METRIC_PREFIX = 'metric/' + +CANCELED_LOCK_PREFIX = slash_join(LOCK_PREFIX, 'job-cancelled') +EXPIRED_LOCK_PREFIX = slash_join(LOCK_PREFIX, 'job-expired') + +EPHEMERAL_API_TIMEOUT = 20 +EPHEMERAL_SETUP_TIMEOUT = 500 + +RETRY_IMMEDIATELY_SLEEP_DURATION = 0 +TOO_MANY_WORKERS_SLEEP_DURATION = 10 -class EtcdAction(object): - GET = 'get' - SET = 'set' - EXPIRE = 'expire' - UPDATE = 'update' - DELETE = 'delete' - CREATE = 'create' - COMPARE_AND_SWAP = 'compareAndSwap' - COMPARE_AND_DELETE = 'compareAndDelete' +BuildInfo = namedtuple('BuildInfo', ['component', 'build_job', 'execution_id', 'executor_name']) class EphemeralBuilderManager(BaseManager): """ Build manager implementation for the Enterprise Registry. """ - _executors = { - 'popen': PopenExecutor, - 'ec2': EC2Executor, + + EXECUTORS = { + 'popen': PopenExecutor, + 'ec2': EC2Executor, + 'kubernetes': KubernetesExecutor, } - _etcd_client_klass = etcd.Client - def __init__(self, *args, **kwargs): + super(EphemeralBuilderManager, self).__init__(*args, **kwargs) + self._shutting_down = False self._manager_config = None - self._async_thread_executor = None - self._etcd_client = None + self._orchestrator = None - self._etcd_realm_prefix = None - self._etcd_builder_prefix = None + # The registered executors available for running jobs, in order. + self._ordered_executors = [] + # The registered executors, mapped by their unique name. + self._executor_name_to_executor = {} + + # Map from builder component to its associated job. self._component_to_job = {} - self._job_uuid_to_component = {} - self._component_to_builder = {} - self._executor = None + # Map from build UUID to a BuildInfo tuple with information about the build. + self._build_uuid_to_info = {} - # Map of etcd keys being watched to the tasks watching them - self._watch_tasks = {} - - super(EphemeralBuilderManager, self).__init__(*args, **kwargs) - - def _watch_etcd(self, etcd_key, change_callback, start_index=None, recursive=True, - restarter=None): - watch_task_key = (etcd_key, recursive) - def callback_wrapper(changed_key_future): - new_index = start_index - etcd_result = None - - if not changed_key_future.cancelled(): - try: - etcd_result = changed_key_future.result() - existing_index = getattr(etcd_result, 'etcd_index', None) - new_index = etcd_result.modifiedIndex + 1 - - logger.debug('Got watch of key: %s%s at #%s with result: %s', etcd_key, - '*' if recursive else '', existing_index, etcd_result) - - except ReadTimeoutError: - logger.debug('Read-timeout on etcd watch %s, rescheduling', etcd_key) - - except etcd.EtcdEventIndexCleared: - # This happens if etcd2 has moved forward too fast for us to start watching - # at the index we retrieved. We therefore start a new watch at HEAD and - # (if specified) call the restarter method which should conduct a read and - # reset the state of the manager. - # TODO: Remove this hack once Etcd is fixed. - logger.exception('Etcd moved forward too quickly. Restarting watch cycle.') - new_index = None - if restarter is not None: - async(restarter()) - - except (KeyError, etcd.EtcdKeyError): - logger.debug('Etcd key already cleared: %s', etcd_key) - return - - except etcd.EtcdException as eex: - # TODO(jschorr): This is a quick and dirty hack and should be replaced - # with a proper exception check. - if str(eex.message).find('Read timed out') >= 0: - logger.debug('Read-timeout on etcd watch %s, rescheduling', etcd_key) - else: - logger.exception('Exception on etcd watch: %s', etcd_key) - - except ProtocolError: - logger.exception('Exception on etcd watch: %s', etcd_key) - - - if watch_task_key not in self._watch_tasks or self._watch_tasks[watch_task_key].done(): - self._watch_etcd(etcd_key, change_callback, start_index=new_index, restarter=restarter) - - if etcd_result: - change_callback(etcd_result) - - if not self._shutting_down: - logger.debug('Scheduling watch of key: %s%s at start index %s', etcd_key, - '*' if recursive else '', start_index) - - watch_future = self._etcd_client.watch(etcd_key, recursive=recursive, index=start_index, - timeout=ETCD_MAX_WATCH_TIMEOUT) - watch_future.add_done_callback(callback_wrapper) - - self._watch_tasks[watch_task_key] = async(watch_future) + def overall_setup_time(self): + return EPHEMERAL_SETUP_TIMEOUT @coroutine - def _handle_builder_expiration(self, etcd_result): - if etcd_result is None: - return + def _mark_job_incomplete(self, build_job, build_info): + """ Marks a job as incomplete, in response to a failure to start or a timeout. """ + executor_name = build_info.executor_name + execution_id = build_info.execution_id - if etcd_result.action == EtcdAction.EXPIRE: - # Handle the expiration - logger.debug('Builder expired, clean up the old build node') - job_metadata = json.loads(etcd_result._prev_node.value) + logger.warning('Build executor failed to successfully boot with execution id %s', + execution_id) - if 'builder_id' in job_metadata: - builder_id = job_metadata['builder_id'] + # Take a lock to ensure that only one manager reports the build as incomplete for this + # execution. + lock_key = slash_join(self._expired_lock_prefix, build_job.build_uuid, execution_id) + acquired_lock = yield From(self._orchestrator.lock(lock_key)) + if acquired_lock: + try: + # Clean up the bookkeeping for the job. + yield From(self._orchestrator.delete_key(self._job_key(build_job))) + except KeyError: + logger.debug('Could not delete job key %s; might have been removed already', + build_job.build_uuid) - # Before we delete the build node, we take a lock to make sure that only one manager - # can terminate the node. - try: - lock_key = self._etcd_lock_key(builder_id) - yield From(self._etcd_client.write(lock_key, '', prevExist=False, ttl=self.setup_time())) - except (KeyError, etcd.EtcdKeyError): - logger.debug('Somebody else is cleaning up the build node: %s', builder_id) - return + logger.error('[BUILD INTERNAL ERROR] Build ID: %s. Exec name: %s. Exec ID: %s', + build_job.build_uuid, executor_name, execution_id) + yield From(self.job_complete_callback(build_job, BuildJobResult.INCOMPLETE, executor_name, + update_phase=True)) + else: + logger.debug('Did not get lock for job-expiration for job %s', build_job.build_uuid) - if not job_metadata.get('had_heartbeat', True): - logger.warning('Build node failed to successfully boot: %s', builder_id) - build_job = BuildJob(AttrDict(job_metadata['job_queue_item'])) - self.job_complete_callback(build_job, BuildJobResult.INCOMPLETE) + @coroutine + def _job_callback(self, key_change): + """ + This is the callback invoked when keys related to jobs are changed. + It ignores all events related to the creation of new jobs. + Deletes or expirations cause checks to ensure they've been properly marked as completed. - logger.info('Terminating expired build node: %s', builder_id) - yield From(self._executor.stop_builder(builder_id)) + :param key_change: the event and value produced by a key changing in the orchestrator + :type key_change: :class:`KeyChange` + """ + if key_change.event in (KeyEvent.CREATE, KeyEvent.SET): + raise Return() + + elif key_change.event in (KeyEvent.DELETE, KeyEvent.EXPIRE): + # Handle the expiration/deletion. + job_metadata = json.loads(key_change.value) + build_job = BuildJob(AttrDict(job_metadata['job_queue_item'])) + logger.debug('Got "%s" of job %s', key_change.event, build_job.build_uuid) + + # Get the build info. + build_info = self._build_uuid_to_info.get(build_job.build_uuid, None) + if build_info is None: + logger.debug('No build info for "%s" job %s (%s); probably already deleted by this manager', + key_change.event, build_job.build_uuid, job_metadata) + raise Return() + + if key_change.event != KeyEvent.EXPIRE: + # If the etcd action was not an expiration, then it was already deleted by some manager and + # the execution was therefore already shutdown. All that's left is to remove the build info. + self._build_uuid_to_info.pop(build_job.build_uuid, None) + raise Return() + + logger.debug('got expiration for job %s with metadata: %s', build_job.build_uuid, + job_metadata) + + if not job_metadata.get('had_heartbeat', False): + # If we have not yet received a heartbeat, then the node failed to boot in some way. + # We mark the job as incomplete here. + yield From(self._mark_job_incomplete(build_job, build_info)) + + # Finally, we terminate the build execution for the job. We don't do this under a lock as + # terminating a node is an atomic operation; better to make sure it is terminated than not. + logger.info('Terminating expired build executor for job %s with execution id %s', + build_job.build_uuid, build_info.execution_id) + yield From(self.kill_builder_executor(build_job.build_uuid)) + else: + logger.warning('Unexpected KeyEvent (%s) on job key: %s', key_change.event, key_change.key) - def _handle_realm_change(self, etcd_result): - if etcd_result is None: - return - - if etcd_result.action == EtcdAction.CREATE: - # We must listen on the realm created by ourselves or another worker - realm_spec = json.loads(etcd_result.value) + @coroutine + def _realm_callback(self, key_change): + logger.debug('realm callback for key: %s', key_change.key) + if key_change.event == KeyEvent.CREATE: + # Listen on the realm created by ourselves or another worker. + realm_spec = json.loads(key_change.value) self._register_realm(realm_spec) - elif etcd_result.action == EtcdAction.DELETE or etcd_result.action == EtcdAction.EXPIRE: - # We must stop listening for new connections on the specified realm, if we did not get the - # connection - realm_spec = json.loads(etcd_result._prev_node.value) + elif key_change.event in (KeyEvent.DELETE, KeyEvent.EXPIRE): + # Stop listening for new connections on the realm, if we did not get the connection. + realm_spec = json.loads(key_change.value) + realm_id = realm_spec['realm'] + build_job = BuildJob(AttrDict(realm_spec['job_queue_item'])) - component = self._job_uuid_to_component.pop(build_job.job_details['build_uuid'], None) - if component is not None: - # We were not the manager which the worker connected to, remove the bookkeeping for it - logger.debug('Unregistering unused component on realm: %s', realm_spec['realm']) - del self._component_to_job[component] - del self._component_to_builder[component] - self.unregister_component(component) + build_uuid = build_job.build_uuid + + logger.debug('Realm key %s for build %s was %s', realm_id, build_uuid, key_change.event) + build_info = self._build_uuid_to_info.get(build_uuid, None) + if build_info is not None: + # Pop off the component and if we find one, then the build has not connected to this + # manager, so we can safely unregister its component. + component = self._component_to_job.pop(build_info.component, None) + if component is not None: + # We were not the manager which the worker connected to, remove the bookkeeping for it + logger.debug('Unregistering unused component for build %s', build_uuid) + self.unregister_component(build_info.component) + + # If the realm has expired, then perform cleanup of the executor. + if key_change.event == KeyEvent.EXPIRE: + execution_id = realm_spec.get('execution_id', None) + executor_name = realm_spec.get('executor_name', 'EC2Executor') + + # Cleanup the job, since it never started. + logger.debug('Job %s for incomplete marking: %s', build_uuid, build_info) + if build_info is not None: + yield From(self._mark_job_incomplete(build_job, build_info)) + + # Cleanup the executor. + logger.info('Realm %s expired for job %s, terminating executor %s with execution id %s', + realm_id, build_uuid, executor_name, execution_id) + yield From(self.terminate_executor(executor_name, execution_id)) else: - logger.warning('Unexpected action (%s) on realm key: %s', etcd_result.action, etcd_result.key) + logger.warning('Unexpected action (%s) on realm key: %s', key_change.event, key_change.key) + def _register_realm(self, realm_spec): - logger.debug('Registering realm with manager: %s', realm_spec['realm']) + logger.debug('Got call to register realm %s with manager', realm_spec['realm']) + + # Create the build information block for the registered realm. + build_job = BuildJob(AttrDict(realm_spec['job_queue_item'])) + execution_id = realm_spec.get('execution_id', None) + executor_name = realm_spec.get('executor_name', 'EC2Executor') + + logger.debug('Registering realm %s with manager: %s', realm_spec['realm'], realm_spec) component = self.register_component(realm_spec['realm'], BuildComponent, token=realm_spec['token']) - if component in self._component_to_job: - logger.debug('Realm already registered with manager: %s', realm_spec['realm']) - return component + build_info = BuildInfo(component=component, build_job=build_job, execution_id=execution_id, + executor_name=executor_name) - build_job = BuildJob(AttrDict(realm_spec['job_queue_item'])) self._component_to_job[component] = build_job - self._component_to_builder[component] = realm_spec['builder_id'] - self._job_uuid_to_component[build_job.job_details['build_uuid']] = component + self._build_uuid_to_info[build_job.build_uuid] = build_info + + logger.debug('Registered realm %s with manager', realm_spec['realm']) return component + @property + def registered_executors(self): + return self._ordered_executors + @coroutine def _register_existing_realms(self): try: - all_realms = yield From(self._etcd_client.read(self._etcd_realm_prefix, recursive=True)) + all_realms = yield From(self._orchestrator.get_prefixed_keys(self._realm_prefix)) # Register all existing realms found. - encountered = set() - for realm in all_realms.children: - if not realm.dir: - component = self._register_realm(json.loads(realm.value)) - encountered.add(component) + encountered = {self._register_realm(json.loads(realm_data)) + for _realm, realm_data in all_realms} # Remove any components not encountered so we can clean up. - for found in list(self._component_to_job.keys()): - if not found in encountered: - self._component_to_job.pop(component) - self._component_to_builder.pop(component) + for component, job in iteritems(self._component_to_job): + if not component in encountered: + self._component_to_job.pop(component, None) + self._build_uuid_to_info.pop(job.build_uuid, None) - except (KeyError, etcd.EtcdKeyError): - # no realms have been registered yet + except KeyError: pass + def _load_executor(self, executor_kind_name, executor_config): + executor_klass = EphemeralBuilderManager.EXECUTORS.get(executor_kind_name) + if executor_klass is None: + logger.error('Unknown executor %s; skipping install', executor_kind_name) + return + + executor = executor_klass(executor_config, self.manager_hostname) + if executor.name in self._executor_name_to_executor: + raise Exception('Executor with name %s already registered' % executor.name) + + self._ordered_executors.append(executor) + self._executor_name_to_executor[executor.name] = executor + + def _config_prefix(self, key): + if self._manager_config.get('ORCHESTRATOR') is None: + return key + + prefix = self._manager_config.get('ORCHESTRATOR_PREFIX', '') + return slash_join(prefix, key).lstrip('/') + '/' + + @property + def _job_prefix(self): + return self._config_prefix(JOB_PREFIX) + + @property + def _realm_prefix(self): + return self._config_prefix(REALM_PREFIX) + + @property + def _cancel_prefix(self): + return self._config_prefix(CANCEL_PREFIX) + + @property + def _metric_prefix(self): + return self._config_prefix(METRIC_PREFIX) + + @property + def _expired_lock_prefix(self): + return self._config_prefix(EXPIRED_LOCK_PREFIX) + + @property + def _canceled_lock_prefix(self): + return self._config_prefix(CANCELED_LOCK_PREFIX) + + def _metric_key(self, realm): + """ + Create a key which is used to track a job in the Orchestrator. + + :param realm: realm for the build + :type realm: str + :returns: key used to track jobs + :rtype: str + """ + return slash_join(self._metric_prefix, realm) + + def _job_key(self, build_job): + """ + Creates a key which is used to track a job in the Orchestrator. + + :param build_job: unique job identifier for a build + :type build_job: str + :returns: key used to track the job + :rtype: str + """ + return slash_join(self._job_prefix, build_job.job_details['build_uuid']) + + def _realm_key(self, realm): + """ + Create a key which is used to track an incoming connection on a realm. + + :param realm: realm for the build + :type realm: str + :returns: key used to track the connection to the realm + :rtype: str + """ + return slash_join(self._realm_prefix, realm) + + def initialize(self, manager_config): logger.debug('Calling initialize') self._manager_config = manager_config - executor_klass = self._executors.get(manager_config.get('EXECUTOR', ''), PopenExecutor) - self._executor = executor_klass(manager_config.get('EXECUTOR_CONFIG', {}), - self.manager_hostname) + # Note: Executor config can be defined either as a single block of EXECUTOR_CONFIG (old style) + # or as a new set of executor configurations, with the order determining how we fallback. We + # check for both here to ensure backwards compatibility. + if manager_config.get('EXECUTORS'): + for executor_config in manager_config['EXECUTORS']: + self._load_executor(executor_config.get('EXECUTOR'), executor_config) + else: + self._load_executor(manager_config.get('EXECUTOR'), manager_config.get('EXECUTOR_CONFIG')) - etcd_host = self._manager_config.get('ETCD_HOST', '127.0.0.1') - etcd_port = self._manager_config.get('ETCD_PORT', 2379) - etcd_ca_cert = self._manager_config.get('ETCD_CA_CERT', None) + logger.debug('calling orchestrator_from_config') + self._orchestrator = orchestrator_from_config(manager_config) - etcd_auth = self._manager_config.get('ETCD_CERT_AND_KEY', None) - if etcd_auth is not None: - etcd_auth = tuple(etcd_auth) # Convert YAML list to a tuple - - etcd_protocol = 'http' if etcd_auth is None else 'https' - logger.debug('Connecting to etcd on %s:%s', etcd_host, etcd_port) - - worker_threads = self._manager_config.get('ETCD_WORKER_THREADS', 5) - self._async_thread_executor = ThreadPoolExecutor(worker_threads) - self._etcd_client = AsyncWrapper(self._etcd_client_klass(host=etcd_host, port=etcd_port, - cert=etcd_auth, ca_cert=etcd_ca_cert, - protocol=etcd_protocol, - read_timeout=5), - executor=self._async_thread_executor) - - self._etcd_builder_prefix = self._manager_config.get('ETCD_BUILDER_PREFIX', 'building/') - self._watch_etcd(self._etcd_builder_prefix, self._handle_builder_expiration) - - self._etcd_realm_prefix = self._manager_config.get('ETCD_REALM_PREFIX', 'realm/') - self._watch_etcd(self._etcd_realm_prefix, self._handle_realm_change, - restarter=self._register_existing_realms) - - self._etcd_lock_prefix = self._manager_config.get('ETCD_LOCK_PREFIX', 'locks/') + logger.debug('setting on_key_change callbacks for job, cancel, realm') + self._orchestrator.on_key_change(self._job_prefix, self._job_callback) + self._orchestrator.on_key_change(self._cancel_prefix, self._cancel_callback) + self._orchestrator.on_key_change(self._realm_prefix, self._realm_callback, + restarter=self._register_existing_realms) # Load components for all realms currently known to the cluster async(self._register_existing_realms()) - def setup_time(self): - setup_time = self._manager_config.get('MACHINE_SETUP_TIME', 300) - return setup_time - def shutdown(self): logger.debug('Shutting down worker.') - self._shutting_down = True - - for (etcd_key, _), task in self._watch_tasks.items(): - if not task.done(): - logger.debug('Canceling watch task for %s', etcd_key) - task.cancel() - - if self._async_thread_executor is not None: - logger.debug('Shutting down thread pool executor.') - self._async_thread_executor.shutdown() + if self._orchestrator is not None: + self._orchestrator.shutdown() @coroutine def schedule(self, build_job): build_uuid = build_job.job_details['build_uuid'] logger.debug('Calling schedule with job: %s', build_uuid) - # Check if there are worker slots avialable by checking the number of jobs in etcd + # Check if there are worker slots available by checking the number of jobs in the orchestrator allowed_worker_count = self._manager_config.get('ALLOWED_WORKER_COUNT', 1) try: - building = yield From(self._etcd_client.read(self._etcd_builder_prefix, recursive=True)) - workers_alive = sum(1 for child in building.children if not child.dir) - except (KeyError, etcd.EtcdKeyError): + active_jobs = yield From(self._orchestrator.get_prefixed_keys(self._job_prefix)) + workers_alive = len(active_jobs) + except KeyError: workers_alive = 0 - except etcd.EtcdException: - logger.exception('Exception when reading job count from etcd for job: %s', build_uuid) - raise Return(False, RETRY_IMMEDIATELY_TIMEOUT) + except OrchestratorConnectionError: + logger.exception('Could not read job count from orchestrator for job due to orchestrator being down') + raise Return(False, ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION) + except OrchestratorError: + logger.exception('Exception when reading job count from orchestrator for job: %s', build_uuid) + raise Return(False, RETRY_IMMEDIATELY_SLEEP_DURATION) - logger.debug('Total jobs: %s', workers_alive) + logger.debug('Total jobs (scheduling job %s): %s', build_uuid, workers_alive) if workers_alive >= allowed_worker_count: logger.info('Too many workers alive, unable to start new worker for build job: %s. %s >= %s', build_uuid, workers_alive, allowed_worker_count) - raise Return(False, RETRY_IMMEDIATELY_TIMEOUT) + raise Return(False, TOO_MANY_WORKERS_SLEEP_DURATION) - job_key = self._etcd_job_key(build_job) + job_key = self._job_key(build_job) # First try to take a lock for this job, meaning we will be responsible for its lifeline realm = str(uuid.uuid4()) token = str(uuid.uuid4()) nonce = str(uuid.uuid4()) - setup_time = self.setup_time() - expiration = datetime.utcnow() + timedelta(seconds=setup_time) machine_max_expiration = self._manager_config.get('MACHINE_MAX_TIME', 7200) max_expiration = datetime.utcnow() + timedelta(seconds=machine_max_expiration) payload = { - 'expiration': calendar.timegm(expiration.timetuple()), - 'max_expiration': calendar.timegm(max_expiration.timetuple()), - 'nonce': nonce, - 'had_heartbeat': False, - 'job_queue_item': build_job.job_item, + 'max_expiration': calendar.timegm(max_expiration.timetuple()), + 'nonce': nonce, + 'had_heartbeat': False, + 'job_queue_item': build_job.job_item, } + lock_payload = json.dumps(payload) + logger.debug('Writing key for job %s with expiration in %s seconds', build_uuid, + EPHEMERAL_SETUP_TIMEOUT) try: - yield From(self._etcd_client.write(job_key, lock_payload, prevExist=False, - ttl=EC2_API_TIMEOUT)) - except (KeyError, etcd.EtcdKeyError): - # The job was already taken by someone else, we are probably a retry - logger.error('Job: %s already exists in etcd, timeout may be misconfigured', build_uuid) - raise Return(False, EC2_API_TIMEOUT) - except etcd.EtcdException: - logger.exception('Exception when writing job %s to etcd', build_uuid) - raise Return(False, RETRY_IMMEDIATELY_TIMEOUT) + yield From(self._orchestrator.set_key(job_key, lock_payload, overwrite=False, + expiration=EPHEMERAL_SETUP_TIMEOUT)) + except KeyError: + logger.warning('Job: %s already exists in orchestrator, timeout may be misconfigured', + build_uuid) + raise Return(False, EPHEMERAL_API_TIMEOUT) + except OrchestratorConnectionError: + logger.exception('Exception when writing job %s to orchestrator; could not connect', + build_uuid) + raise Return(False, ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION) + except OrchestratorError: + logger.exception('Exception when writing job %s to orchestrator', build_uuid) + raise Return(False, RETRY_IMMEDIATELY_SLEEP_DURATION) - executor_type = self._executor.__class__.__name__ - logger.debug('Starting builder for job: %s with executor: %s', build_uuid, executor_type) + # Got a lock, now lets boot the job via one of the registered executors. + started_with_executor = None + execution_id = None - try: - builder_id = yield From(self._executor.start_builder(realm, token, build_uuid)) - metric_queue.put('EC2BuilderStarted', 1, unit='Count') - except: - logger.exception('Exception when starting builder for job: %s', build_uuid) - raise Return(False, EC2_API_TIMEOUT) + logger.debug("Registered executors are: %s", [ex.name for ex in self._ordered_executors]) + for executor in self._ordered_executors: + # Check if we can use this executor based on its whitelist, by namespace. + namespace = build_job.namespace + if not executor.allowed_for_namespace(namespace): + logger.debug('Job %s (namespace: %s) cannot use executor %s', build_uuid, namespace, + executor.name) + continue - # Store the builder in etcd associated with the job id - try: - payload['builder_id'] = builder_id - yield From(self._etcd_client.write(job_key, json.dumps(payload), prevValue=lock_payload, - ttl=setup_time)) - except etcd.EtcdException: - logger.exception('Exception when writing job %s to etcd', build_uuid) - raise Return(False, EC2_API_TIMEOUT) + # Check if we can use this executor based on the retries remaining. + if executor.minimum_retry_threshold > build_job.retries_remaining: + metric_queue.builder_fallback.Inc() + logger.debug('Job %s cannot use executor %s as it is below retry threshold %s (retry #%s)', + build_uuid, executor.name, executor.minimum_retry_threshold, + build_job.retries_remaining) + continue - # Store the realm spec which will allow any manager to accept this builder when it connects - realm_spec = json.dumps({ - 'realm': realm, - 'token': token, - 'builder_id': builder_id, - 'job_queue_item': build_job.job_item, + logger.debug('Starting builder for job %s with selected executor: %s', build_uuid, + executor.name) + + try: + execution_id = yield From(executor.start_builder(realm, token, build_uuid)) + except: + try: + metric_queue.build_start_failure.Inc(labelvalues=[executor.name]) + metric_queue.put_deprecated(('ExecutorFailure-%s' % executor.name), 1, unit='Count') + except: + logger.exception('Exception when writing failure metric for execution %s for job %s', + execution_id, build_uuid) + + logger.exception('Exception when starting builder for job: %s', build_uuid) + continue + + try: + metric_queue.build_start_success.Inc(labelvalues=[executor.name]) + except: + logger.exception('Exception when writing success metric for execution %s for job %s', + execution_id, build_uuid) + + try: + metric_queue.ephemeral_build_workers.Inc() + except: + logger.exception('Exception when writing start metrics for execution %s for job %s', + execution_id, build_uuid) + + started_with_executor = executor + + # Break out of the loop now that we've started a builder successfully. + break + + # If we didn't start the job, cleanup and return it to the queue. + if started_with_executor is None: + logger.error('Could not start ephemeral worker for build %s', build_uuid) + + # Delete the associated build job record. + yield From(self._orchestrator.delete_key(job_key)) + raise Return(False, EPHEMERAL_API_TIMEOUT) + + # Job was started! + logger.debug('Started execution with ID %s for job: %s with executor: %s', + execution_id, build_uuid, started_with_executor.name) + + # Store metric data + metric_spec = json.dumps({ + 'executor_name': started_with_executor.name, + 'start_time': time.time(), }) try: - yield From(self._etcd_client.write(self._etcd_realm_key(realm), realm_spec, prevExist=False, - ttl=setup_time)) - except (KeyError, etcd.EtcdKeyError): - logger.error('Realm %s already exists in etcd for job %s ' + + yield From(self._orchestrator.set_key(self._metric_key(realm), metric_spec, overwrite=False, + expiration=machine_max_expiration + 10)) + except KeyError: + logger.error('Realm %s already exists in orchestrator for job %s ' + 'UUID collision or something is very very wrong.', realm, build_uuid) - raise Return(False, setup_time) - except etcd.EtcdException: - logger.exception('Exception when writing realm %s to etcd for job %s', realm, build_uuid) + except OrchestratorError: + logger.exception('Exception when writing realm %s to orchestrator for job %s', + realm, build_uuid) + + # Store the realm spec which will allow any manager to accept this builder when it connects + realm_spec = json.dumps({ + 'realm': realm, + 'token': token, + 'execution_id': execution_id, + 'executor_name': started_with_executor.name, + 'job_queue_item': build_job.job_item, + }) + + try: + setup_time = started_with_executor.setup_time or self.overall_setup_time() + logger.debug('Writing job key for job %s using executor %s with ID %s and ttl %s', build_uuid, + started_with_executor.name, execution_id, setup_time) + yield From(self._orchestrator.set_key(self._realm_key(realm), realm_spec, + expiration=setup_time)) + except OrchestratorConnectionError: + logger.exception('Exception when writing realm %s to orchestrator for job %s', + realm, build_uuid) + raise Return(False, ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION) + except OrchestratorError: + logger.exception('Exception when writing realm %s to orchestrator for job %s', + realm, build_uuid) raise Return(False, setup_time) + logger.debug('Builder spawn complete for job %s using executor %s with ID %s ', + build_uuid, started_with_executor.name, execution_id) raise Return(True, None) @coroutine def build_component_ready(self, build_component): - try: - # Clean up the bookkeeping for allowing any manager to take the job - job = self._component_to_job.pop(build_component) - del self._job_uuid_to_component[job.job_details['build_uuid']] - yield From(self._etcd_client.delete(self._etcd_realm_key(build_component.builder_realm))) + logger.debug('Got component ready for component with realm %s', build_component.builder_realm) - logger.debug('Sending build %s to newly ready component on realm %s', - job.job_details['build_uuid'], build_component.builder_realm) - yield From(build_component.start_build(job)) - except (KeyError, etcd.EtcdKeyError): - logger.warning('Builder is asking for more work, but work already completed') + # Pop off the job for the component. + # We do so before we send out the watch below, as it will also remove this mapping. + job = self._component_to_job.pop(build_component, None) + if job is None: + # This will occur once the build finishes, so no need to worry about it. + # We log in case it happens outside of the expected flow. + logger.debug('Could not find job for the build component on realm %s; component is ready', + build_component.builder_realm) + raise Return() + + # Start the build job. + logger.debug('Sending build %s to newly ready component on realm %s', + job.build_uuid, build_component.builder_realm) + yield From(build_component.start_build(job)) + + yield From(self._write_duration_metric(metric_queue.builder_time_to_build, + build_component.builder_realm)) + + # Clean up the bookkeeping for allowing any manager to take the job. + try: + yield From(self._orchestrator.delete_key(self._realm_key(build_component.builder_realm))) + except KeyError: + logger.warning('Could not delete realm key %s', build_component.builder_realm) def build_component_disposed(self, build_component, timed_out): logger.debug('Calling build_component_disposed.') @@ -395,68 +557,154 @@ class EphemeralBuilderManager(BaseManager): @coroutine def job_completed(self, build_job, job_status, build_component): - logger.debug('Calling job_completed with status: %s', job_status) + logger.debug('Calling job_completed for job %s with status: %s', + build_job.build_uuid, job_status) - # Kill the ephmeral builder - yield From(self._executor.stop_builder(self._component_to_builder.pop(build_component))) + yield From(self._write_duration_metric(metric_queue.build_time, build_component.builder_realm)) - # Release the lock in etcd - job_key = self._etcd_job_key(build_job) + # Mark the job as completed. Since this is being invoked from the component, we don't need + # to ask for the phase to be updated as well. + build_info = self._build_uuid_to_info.get(build_job.build_uuid, None) + executor_name = build_info.executor_name if build_info else None + yield From(self.job_complete_callback(build_job, job_status, executor_name, update_phase=False)) + + # Kill the ephemeral builder. + yield From(self.kill_builder_executor(build_job.build_uuid)) + + # Delete the build job from the orchestrator. try: - yield From(self._etcd_client.delete(job_key)) - except (KeyError, etcd.EtcdKeyError): + job_key = self._job_key(build_job) + yield From(self._orchestrator.delete_key(job_key)) + except KeyError: logger.debug('Builder is asking for job to be removed, but work already completed') + except OrchestratorConnectionError: + logger.exception('Could not remove job key as orchestrator is not available') + yield From(sleep(ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION)) + raise Return() - self.job_complete_callback(build_job, job_status) + # Delete the metric from the orchestrator. + try: + metric_key = self._metric_key(build_component.builder_realm) + yield From(self._orchestrator.delete_key(metric_key)) + except KeyError: + logger.debug('Builder is asking for metric to be removed, but key not found') + except OrchestratorConnectionError: + logger.exception('Could not remove metric key as orchestrator is not available') + yield From(sleep(ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION)) + raise Return() + + logger.debug('job_completed for job %s with status: %s', build_job.build_uuid, job_status) + + @coroutine + def kill_builder_executor(self, build_uuid): + logger.info('Starting termination of executor for job %s', build_uuid) + build_info = self._build_uuid_to_info.pop(build_uuid, None) + if build_info is None: + logger.debug('Build information not found for build %s; skipping termination', build_uuid) + raise Return() + + # Remove the build's component. + self._component_to_job.pop(build_info.component, None) + + # Stop the build node/executor itself. + yield From(self.terminate_executor(build_info.executor_name, build_info.execution_id)) + + @coroutine + def terminate_executor(self, executor_name, execution_id): + executor = self._executor_name_to_executor.get(executor_name) + if executor is None: + logger.error('Could not find registered executor %s', executor_name) + raise Return() + + # Terminate the executor's execution. + logger.info('Terminating executor %s with execution id %s', executor_name, execution_id) + yield From(executor.stop_builder(execution_id)) @coroutine def job_heartbeat(self, build_job): - # Extend the deadline in etcd - job_key = self._etcd_job_key(build_job) + """ + :param build_job: the identifier for the build + :type build_job: str + """ + self.job_heartbeat_callback(build_job) + self._extend_job_in_orchestrator(build_job) + @coroutine + def _extend_job_in_orchestrator(self, build_job): try: - build_job_metadata_response = yield From(self._etcd_client.read(job_key)) - except (KeyError, etcd.EtcdKeyError): - logger.info('Job %s no longer exists in etcd', build_job.job_details['build_uuid']) - return + job_data = yield From(self._orchestrator.get_key(self._job_key(build_job))) + except KeyError: + logger.info('Job %s no longer exists in the orchestrator', build_job.build_uuid) + raise Return() + except OrchestratorConnectionError: + logger.exception('failed to connect when attempted to extend job') - build_job_metadata = json.loads(build_job_metadata_response.value) + build_job_metadata = json.loads(job_data) max_expiration = datetime.utcfromtimestamp(build_job_metadata['max_expiration']) max_expiration_remaining = max_expiration - datetime.utcnow() max_expiration_sec = max(0, int(max_expiration_remaining.total_seconds())) ttl = min(self.heartbeat_period_sec * 2, max_expiration_sec) - new_expiration = datetime.utcnow() + timedelta(seconds=ttl) - payload = { - 'expiration': calendar.timegm(new_expiration.timetuple()), - 'builder_id': build_job_metadata['builder_id'], - 'job_queue_item': build_job.job_item, - 'max_expiration': build_job_metadata['max_expiration'], - 'had_heartbeat': True, + 'job_queue_item': build_job.job_item, + 'max_expiration': build_job_metadata['max_expiration'], + 'had_heartbeat': True, } - yield From(self._etcd_client.write(job_key, json.dumps(payload), ttl=ttl)) + try: + yield From(self._orchestrator.set_key(self._job_key(build_job), json.dumps(payload), + expiration=ttl)) + except OrchestratorConnectionError: + logger.exception('Could not update heartbeat for job as the orchestrator is not available') + yield From(sleep(ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION)) - self.job_heartbeat_callback(build_job) - - def _etcd_job_key(self, build_job): - """ Create a key which is used to track a job in etcd. + @coroutine + def _write_duration_metric(self, metric, realm): """ - return os.path.join(self._etcd_builder_prefix, build_job.job_details['build_uuid']) - - def _etcd_lock_key(self, unique_lock_id): - """ Create a key which is used to create a temporary lock in etcd. + :returns: True if the metric was written, otherwise False + :rtype: bool """ - return os.path.join(self._etcd_lock_prefix, unique_lock_id) - - def _etcd_realm_key(self, realm): - """ Create a key which is used to track an incoming connection on a realm. - """ - return os.path.join(self._etcd_realm_prefix, realm) + try: + metric_data = yield From(self._orchestrator.get_key(self._metric_key(realm))) + parsed_metric_data = json.loads(metric_data) + start_time = parsed_metric_data['start_time'] + metric.Observe(time.time() - start_time, + labelvalues=[parsed_metric_data.get('executor_name', + 'unknown')]) + except Exception: + logger.exception("Could not write metric for realm %s", realm) def num_workers(self): - """ Return the number of workers we're managing locally. """ - return len(self._component_to_builder) + The number of workers we're managing locally. + + :returns: the number of the workers locally managed + :rtype: int + """ + return len(self._component_to_job) + + + @coroutine + def _cancel_callback(self, key_change): + if key_change.event not in (KeyEvent.CREATE, KeyEvent.SET): + raise Return() + + build_uuid = key_change.value + build_info = self._build_uuid_to_info.get(build_uuid, None) + if build_info is None: + logger.debug('No build info for "%s" job %s', key_change.event, build_uuid) + raise Return(False) + + lock_key = slash_join(self._canceled_lock_prefix, + build_uuid, build_info.execution_id) + lock_acquired = yield From(self._orchestrator.lock(lock_key)) + if lock_acquired: + builder_realm = build_info.component.builder_realm + yield From(self.kill_builder_executor(build_uuid)) + yield From(self._orchestrator.delete_key(self._realm_key(builder_realm))) + yield From(self._orchestrator.delete_key(self._metric_key(builder_realm))) + yield From(self._orchestrator.delete_key(slash_join(self._job_prefix, build_uuid))) + + # This is outside the lock so we can un-register the component wherever it is registered to. + yield From(build_info.component.cancel_build()) diff --git a/buildman/manager/etcd_canceller.py b/buildman/manager/etcd_canceller.py new file mode 100644 index 000000000..ce92a1bbc --- /dev/null +++ b/buildman/manager/etcd_canceller.py @@ -0,0 +1,37 @@ +import logging +import etcd + +logger = logging.getLogger(__name__) + + +class EtcdCanceller(object): + """ A class that sends a message to etcd to cancel a build """ + + def __init__(self, config): + etcd_host = config.get('ETCD_HOST', '127.0.0.1') + etcd_port = config.get('ETCD_PORT', 2379) + etcd_ca_cert = config.get('ETCD_CA_CERT', None) + etcd_auth = config.get('ETCD_CERT_AND_KEY', None) + if etcd_auth is not None: + etcd_auth = tuple(etcd_auth) + + etcd_protocol = 'http' if etcd_auth is None else 'https' + logger.debug('Connecting to etcd on %s:%s', etcd_host, etcd_port) + self._cancel_prefix = config.get('ETCD_CANCEL_PREFIX', 'cancel/') + self._etcd_client = etcd.Client( + host=etcd_host, + port=etcd_port, + cert=etcd_auth, + ca_cert=etcd_ca_cert, + protocol=etcd_protocol, + read_timeout=5) + + def try_cancel_build(self, build_uuid): + """ Writes etcd message to cancel build_uuid. """ + logger.info("Cancelling build %s".format(build_uuid)) + try: + self._etcd_client.write("{}{}".format(self._cancel_prefix, build_uuid), build_uuid, ttl=60) + return True + except etcd.EtcdException: + logger.exception("Failed to write to etcd client %s", build_uuid) + return False diff --git a/buildman/manager/executor.py b/buildman/manager/executor.py index f6966debd..e82d7a316 100644 --- a/buildman/manager/executor.py +++ b/buildman/manager/executor.py @@ -1,19 +1,29 @@ +import datetime +import hashlib import logging import os -import uuid +import socket +import subprocess import threading -import boto.ec2 -import requests -import cachetools -import trollius +import uuid - -from jinja2 import FileSystemLoader, Environment -from trollius import coroutine, From, Return, get_event_loop from functools import partial -from buildman.asyncutil import AsyncWrapper +import boto.ec2 +import cachetools.func +import requests +import trollius + from container_cloud_config import CloudConfigContext +from jinja2 import FileSystemLoader, Environment +from trollius import coroutine, From, Return, get_event_loop + +import release + +from buildman.asyncutil import AsyncWrapper +from app import metric_queue, app +from util.metrics.metricqueue import duration_collector_async +from _init import ROOT_DIR logger = logging.getLogger(__name__) @@ -24,7 +34,7 @@ ONE_HOUR = 60*60 _TAG_RETRY_COUNT = 3 # Number of times to retry adding tags. _TAG_RETRY_SLEEP = 2 # Number of seconds to wait between tag retries. -ENV = Environment(loader=FileSystemLoader('buildman/templates')) +ENV = Environment(loader=FileSystemLoader(os.path.join(ROOT_DIR, "buildman/templates"))) TEMPLATE = ENV.get_template('cloudconfig.yaml') CloudConfigContext().populate_jinja_environment(ENV) @@ -36,12 +46,27 @@ class ExecutorException(Exception): class BuilderExecutor(object): def __init__(self, executor_config, manager_hostname): + """ Interface which can be plugged into the EphemeralNodeManager to provide a strategy for + starting and stopping builders. + """ self.executor_config = executor_config self.manager_hostname = manager_hostname - """ Interface which can be plugged into the EphemeralNodeManager to provide a strategy for - starting and stopping builders. - """ + default_websocket_scheme = 'wss' if app.config['PREFERRED_URL_SCHEME'] == 'https' else 'ws' + self.websocket_scheme = executor_config.get("WEBSOCKET_SCHEME", default_websocket_scheme) + + @property + def name(self): + """ Name returns the unique name for this executor. """ + return self.executor_config.get('NAME') or self.__class__.__name__ + + @property + def setup_time(self): + """ Returns the amount of time (in seconds) to wait for the execution to start for the build. + If None, the manager's default will be used. + """ + return self.executor_config.get('SETUP_TIME') + @coroutine def start_builder(self, realm, token, build_uuid): """ Create a builder with the specified config. Returns a unique id which can be used to manage @@ -55,11 +80,33 @@ class BuilderExecutor(object): """ raise NotImplementedError - def get_manager_websocket_url(self): - return 'ws://{0}:' + def allowed_for_namespace(self, namespace): + """ Returns true if this executor can be used for builds in the given namespace. """ - def generate_cloud_config(self, realm, token, coreos_channel, manager_hostname, - quay_username=None, quay_password=None): + # Check for an explicit namespace whitelist. + namespace_whitelist = self.executor_config.get('NAMESPACE_WHITELIST') + if namespace_whitelist is not None and namespace in namespace_whitelist: + return True + + # Check for a staged rollout percentage. If found, we hash the namespace and, if it is found + # in the first X% of the character space, we allow this executor to be used. + staged_rollout = self.executor_config.get('STAGED_ROLLOUT') + if staged_rollout is not None: + bucket = int(hashlib.sha256(namespace).hexdigest()[-2:], 16) + return bucket < (256 * staged_rollout) + + # If there are no restrictions in place, we are free to use this executor. + return staged_rollout is None and namespace_whitelist is None + + @property + def minimum_retry_threshold(self): + """ Returns the minimum number of retries required for this executor to be used or 0 if + none. """ + return self.executor_config.get('MINIMUM_RETRY_THRESHOLD', 0) + + def generate_cloud_config(self, realm, token, build_uuid, coreos_channel, + manager_hostname, quay_username=None, + quay_password=None): if quay_username is None: quay_username = self.executor_config['QUAY_USERNAME'] @@ -67,14 +114,20 @@ class BuilderExecutor(object): quay_password = self.executor_config['QUAY_PASSWORD'] return TEMPLATE.render( - realm=realm, - token=token, - quay_username=quay_username, - quay_password=quay_password, - manager_hostname=manager_hostname, - coreos_channel=coreos_channel, - worker_tag=self.executor_config['WORKER_TAG'], - logentries_token=self.executor_config.get('LOGENTRIES_TOKEN', None), + realm=realm, + token=token, + build_uuid=build_uuid, + quay_username=quay_username, + quay_password=quay_password, + manager_hostname=manager_hostname, + websocket_scheme=self.websocket_scheme, + coreos_channel=coreos_channel, + worker_image=self.executor_config.get('WORKER_IMAGE', 'quay.io/coreos/registry-build-worker'), + worker_tag=self.executor_config['WORKER_TAG'], + logentries_token=self.executor_config.get('LOGENTRIES_TOKEN', None), + volume_size=self.executor_config.get('VOLUME_SIZE', '42G'), + max_lifetime_s=self.executor_config.get('MAX_LIFETIME_S', 10800), + ssh_authorized_keys=self.executor_config.get('SSH_AUTHORIZED_KEYS', []), ) @@ -92,13 +145,13 @@ class EC2Executor(BuilderExecutor): """ Creates an ec2 connection which can be used to manage instances. """ return AsyncWrapper(boto.ec2.connect_to_region( - self.executor_config['EC2_REGION'], - aws_access_key_id=self.executor_config['AWS_ACCESS_KEY'], - aws_secret_access_key=self.executor_config['AWS_SECRET_KEY'], + self.executor_config['EC2_REGION'], + aws_access_key_id=self.executor_config['AWS_ACCESS_KEY'], + aws_secret_access_key=self.executor_config['AWS_SECRET_KEY'], )) @classmethod - @cachetools.ttl_cache(ttl=ONE_HOUR) + @cachetools.func.ttl_cache(ttl=ONE_HOUR) def _get_coreos_ami(cls, ec2_region, coreos_channel): """ Retrieve the CoreOS AMI id from the canonical listing. """ @@ -107,21 +160,25 @@ class EC2Executor(BuilderExecutor): return stack_amis[ec2_region] @coroutine + @duration_collector_async(metric_queue.builder_time_to_start, ['ec2']) def start_builder(self, realm, token, build_uuid): region = self.executor_config['EC2_REGION'] channel = self.executor_config.get('COREOS_CHANNEL', 'stable') - get_ami_callable = partial(self._get_coreos_ami, region, channel) - coreos_ami = yield From(self._loop.run_in_executor(None, get_ami_callable)) - user_data = self.generate_cloud_config(realm, token, channel, self.manager_hostname) - logger.debug('Generated cloud config: %s', user_data) + coreos_ami = self.executor_config.get('COREOS_AMI', None) + if coreos_ami is None: + get_ami_callable = partial(self._get_coreos_ami, region, channel) + coreos_ami = yield From(self._loop.run_in_executor(None, get_ami_callable)) + + user_data = self.generate_cloud_config(realm, token, build_uuid, channel, self.manager_hostname) + logger.debug('Generated cloud config for build %s: %s', build_uuid, user_data) ec2_conn = self._get_conn() ssd_root_ebs = boto.ec2.blockdevicemapping.BlockDeviceType( - size=48, - volume_type='gp2', - delete_on_termination=True, + size=int(self.executor_config.get('BLOCK_DEVICE_SIZE', 48)), + volume_type='gp2', + delete_on_termination=True, ) block_devices = boto.ec2.blockdevicemapping.BlockDeviceMapping() block_devices['/dev/xvda'] = ssd_root_ebs @@ -129,13 +186,14 @@ class EC2Executor(BuilderExecutor): interfaces = None if self.executor_config.get('EC2_VPC_SUBNET_ID', None) is not None: interface = boto.ec2.networkinterface.NetworkInterfaceSpecification( - subnet_id=self.executor_config['EC2_VPC_SUBNET_ID'], - groups=self.executor_config['EC2_SECURITY_GROUP_IDS'], - associate_public_ip_address=True, + subnet_id=self.executor_config['EC2_VPC_SUBNET_ID'], + groups=self.executor_config['EC2_SECURITY_GROUP_IDS'], + associate_public_ip_address=True, ) interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface) - reservation = yield From(ec2_conn.run_instances( + try: + reservation = yield From(ec2_conn.run_instances( coreos_ami, instance_type=self.executor_config['EC2_INSTANCE_TYPE'], key_name=self.executor_config.get('EC2_KEY_NAME', None), @@ -143,7 +201,11 @@ class EC2Executor(BuilderExecutor): instance_initiated_shutdown_behavior='terminate', block_device_map=block_devices, network_interfaces=interfaces, - )) + )) + except boto.exception.EC2ResponseError as ec2e: + logger.exception('Unable to spawn builder instance') + metric_queue.ephemeral_build_worker_failure.Inc() + raise ec2e if not reservation.instances: raise ExecutorException('Unable to spawn builder instance.') @@ -152,18 +214,23 @@ class EC2Executor(BuilderExecutor): launched = AsyncWrapper(reservation.instances[0]) + # Sleep a few seconds to wait for AWS to spawn the instance. + yield From(trollius.sleep(_TAG_RETRY_SLEEP)) + + # Tag the instance with its metadata. for i in range(0, _TAG_RETRY_COUNT): try: yield From(launched.add_tags({ - 'Name': 'Quay Ephemeral Builder', - 'Realm': realm, - 'Token': token, - 'BuildUUID': build_uuid, + 'Name': 'Quay Ephemeral Builder', + 'Realm': realm, + 'Token': token, + 'BuildUUID': build_uuid, })) except boto.exception.EC2ResponseError as ec2e: if ec2e.error_code == 'InvalidInstanceID.NotFound': if i < _TAG_RETRY_COUNT - 1: - logger.warning('Failed to write EC2 tags (attempt #%s)', i) + logger.warning('Failed to write EC2 tags for instance %s for build %s (attempt #%s)', + launched.id, build_uuid, i) yield From(trollius.sleep(_TAG_RETRY_SLEEP)) continue @@ -171,6 +238,7 @@ class EC2Executor(BuilderExecutor): logger.exception('Failed to write EC2 tags (attempt #%s)', i) + logger.debug('Machine with ID %s started for build %s', launched.id, build_uuid) raise Return(launched.id) @coroutine @@ -201,21 +269,28 @@ class PopenExecutor(BuilderExecutor): """ Executor which uses Popen to fork a quay-builder process. """ @coroutine + @duration_collector_async(metric_queue.builder_time_to_start, ['fork']) def start_builder(self, realm, token, build_uuid): # Now start a machine for this job, adding the machine id to the etcd information logger.debug('Forking process for build') - import subprocess + + ws_host = os.environ.get("BUILDMAN_WS_HOST", "localhost") + ws_port = os.environ.get("BUILDMAN_WS_PORT", "8787") builder_env = { - 'TOKEN': token, - 'REALM': realm, - 'ENDPOINT': 'ws://localhost:8787', - 'DOCKER_TLS_VERIFY': os.environ.get('DOCKER_TLS_VERIFY', ''), - 'DOCKER_CERT_PATH': os.environ.get('DOCKER_CERT_PATH', ''), - 'DOCKER_HOST': os.environ.get('DOCKER_HOST', ''), + 'TOKEN': token, + 'REALM': realm, + 'ENDPOINT': 'ws://%s:%s' % (ws_host, ws_port), + 'DOCKER_TLS_VERIFY': os.environ.get('DOCKER_TLS_VERIFY', ''), + 'DOCKER_CERT_PATH': os.environ.get('DOCKER_CERT_PATH', ''), + 'DOCKER_HOST': os.environ.get('DOCKER_HOST', ''), + 'PATH': "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" } logpipe = LogPipe(logging.INFO) - spawned = subprocess.Popen('/Users/jake/bin/quay-builder', stdout=logpipe, stderr=logpipe, + spawned = subprocess.Popen(os.environ.get('BUILDER_BINARY_LOCATION', + '/usr/local/bin/quay-builder'), + stdout=logpipe, + stderr=logpipe, env=builder_env) builder_id = str(uuid.uuid4()) @@ -236,6 +311,222 @@ class PopenExecutor(BuilderExecutor): logpipe.close() +class KubernetesExecutor(BuilderExecutor): + """ Executes build jobs by creating Kubernetes jobs which run a qemu-kvm virtual + machine in a pod """ + def __init__(self, *args, **kwargs): + super(KubernetesExecutor, self).__init__(*args, **kwargs) + self._loop = get_event_loop() + self.namespace = self.executor_config.get('BUILDER_NAMESPACE', 'builder') + self.image = self.executor_config.get('BUILDER_VM_CONTAINER_IMAGE', + 'quay.io/quay/quay-builder-qemu-coreos:stable') + + @coroutine + def _request(self, method, path, **kwargs): + request_options = dict(kwargs) + + tls_cert = self.executor_config.get('K8S_API_TLS_CERT') + tls_key = self.executor_config.get('K8S_API_TLS_KEY') + tls_ca = self.executor_config.get('K8S_API_TLS_CA') + service_account_token = self.executor_config.get('SERVICE_ACCOUNT_TOKEN') + + if 'timeout' not in request_options: + request_options['timeout'] = self.executor_config.get("K8S_API_TIMEOUT", 20) + + if service_account_token: + scheme = 'https' + request_options['headers'] = {'Authorization': 'Bearer ' + service_account_token} + logger.debug('Using service account token for Kubernetes authentication') + elif tls_cert and tls_key: + scheme = 'https' + request_options['cert'] = (tls_cert, tls_key) + logger.debug('Using tls certificate and key for Kubernetes authentication') + if tls_ca: + request_options['verify'] = tls_ca + else: + scheme = 'http' + + server = self.executor_config.get('K8S_API_SERVER', 'localhost:8080') + url = '%s://%s%s' % (scheme, server, path) + + logger.debug('Executor config: %s', self.executor_config) + logger.debug('Kubernetes request: %s %s: %s', method, url, request_options) + res = requests.request(method, url, **request_options) + logger.debug('Kubernetes response: %s: %s', res.status_code, res.text) + raise Return(res) + + def _jobs_path(self): + return '/apis/batch/v1/namespaces/%s/jobs' % self.namespace + + def _job_path(self, build_uuid): + return '%s/%s' % (self._jobs_path(), build_uuid) + + def _kubernetes_distribution(self): + return self.executor_config.get('KUBERNETES_DISTRIBUTION', 'basic').lower() + + def _is_basic_kubernetes_distribution(self): + return self._kubernetes_distribution() == 'basic' + + def _is_openshift_kubernetes_distribution(self): + return self._kubernetes_distribution() == 'openshift' + + def _build_job_container_resources(self): + # Minimum acceptable free resources for this container to "fit" in a quota + # These may be lower than the absolute limits if the cluster is knowingly + # oversubscribed by some amount. + container_requests = { + 'memory' : self.executor_config.get('CONTAINER_MEMORY_REQUEST', '3968Mi'), + } + + container_limits = { + 'memory' : self.executor_config.get('CONTAINER_MEMORY_LIMITS', '5120Mi'), + 'cpu' : self.executor_config.get('CONTAINER_CPU_LIMITS', '1000m'), + } + + resources = { + 'requests': container_requests, + } + + if self._is_openshift_kubernetes_distribution(): + resources['requests']['cpu'] = self.executor_config.get('CONTAINER_CPU_REQUEST', '500m') + resources['limits'] = container_limits + + return resources + + def _build_job_containers(self, user_data): + vm_memory_limit = self.executor_config.get('VM_MEMORY_LIMIT', '4G') + vm_volume_size = self.executor_config.get('VOLUME_SIZE', '32G') + + container = { + 'name': 'builder', + 'imagePullPolicy': 'IfNotPresent', + 'image': self.image, + 'securityContext': {'privileged': True}, + 'env': [ + {'name': 'USERDATA', 'value': user_data}, + {'name': 'VM_MEMORY', 'value': vm_memory_limit}, + {'name': 'VM_VOLUME_SIZE', 'value': vm_volume_size}, + ], + 'resources': self._build_job_container_resources(), + } + + if self._is_basic_kubernetes_distribution(): + container['volumeMounts'] = [{'name': 'secrets-mask','mountPath': '/var/run/secrets/kubernetes.io/serviceaccount'}] + + return container + + def _job_resource(self, build_uuid, user_data, coreos_channel='stable'): + image_pull_secret_name = self.executor_config.get('IMAGE_PULL_SECRET_NAME', 'builder') + service_account = self.executor_config.get('SERVICE_ACCOUNT_NAME', 'quay-builder-sa') + node_selector_label_key = self.executor_config.get('NODE_SELECTOR_LABEL_KEY', 'beta.kubernetes.io/instance-type') + node_selector_label_value = self.executor_config.get('NODE_SELECTOR_LABEL_VALUE', '') + + node_selector = { + node_selector_label_key : node_selector_label_value + } + + release_sha = release.GIT_HEAD or 'none' + if ' ' in release_sha: + release_sha = 'HEAD' + + job_resource = { + 'apiVersion': 'batch/v1', + 'kind': 'Job', + 'metadata': { + 'namespace': self.namespace, + 'generateName': build_uuid + '-', + 'labels': { + 'build': build_uuid, + 'time': datetime.datetime.now().strftime('%Y-%m-%d-%H'), + 'manager': socket.gethostname(), + 'quay-sha': release_sha, + }, + }, + 'spec' : { + 'activeDeadlineSeconds': self.executor_config.get('MAXIMUM_JOB_TIME', 7200), + 'template': { + 'metadata': { + 'labels': { + 'build': build_uuid, + 'time': datetime.datetime.now().strftime('%Y-%m-%d-%H'), + 'manager': socket.gethostname(), + 'quay-sha': release_sha, + }, + }, + 'spec': { + 'imagePullSecrets': [{ 'name': image_pull_secret_name }], + 'restartPolicy': 'Never', + 'dnsPolicy': 'Default', + 'containers': [self._build_job_containers(user_data)], + }, + }, + }, + } + + if self._is_openshift_kubernetes_distribution(): + # Setting `automountServiceAccountToken` to false will prevent automounting API credentials for a service account. + job_resource['spec']['template']['spec']['automountServiceAccountToken'] = False + + # Use dedicated service account that has no authorization to any resources. + job_resource['spec']['template']['spec']['serviceAccount'] = service_account + + # Setting `enableServiceLinks` to false prevents information about other services from being injected into pod's + # environment variables. Pod has no visibility into other services on the cluster. + job_resource['spec']['template']['spec']['enableServiceLinks'] = False + + if node_selector_label_value.strip() != '': + job_resource['spec']['template']['spec']['nodeSelector'] = node_selector + + if self._is_basic_kubernetes_distribution(): + # This volume is a hack to mask the token for the namespace's + # default service account, which is placed in a file mounted under + # `/var/run/secrets/kubernetes.io/serviceaccount` in all pods. + # There's currently no other way to just disable the service + # account at either the pod or namespace level. + # + # https://github.com/kubernetes/kubernetes/issues/16779 + # + job_resource['spec']['template']['spec']['volumes'] = [{'name': 'secrets-mask','emptyDir': {'medium': 'Memory'}}] + + return job_resource + + @coroutine + @duration_collector_async(metric_queue.builder_time_to_start, ['k8s']) + def start_builder(self, realm, token, build_uuid): + # generate resource + channel = self.executor_config.get('COREOS_CHANNEL', 'stable') + user_data = self.generate_cloud_config(realm, token, build_uuid, channel, self.manager_hostname) + resource = self._job_resource(build_uuid, user_data, channel) + logger.debug('Using Kubernetes Distribution: %s', self._kubernetes_distribution()) + logger.debug('Generated kubernetes resource:\n%s', resource) + + # schedule + create_job = yield From(self._request('POST', self._jobs_path(), json=resource)) + if int(create_job.status_code / 100) != 2: + raise ExecutorException('Failed to create job: %s: %s: %s' % + (build_uuid, create_job.status_code, create_job.text)) + + job = create_job.json() + raise Return(job['metadata']['name']) + + @coroutine + def stop_builder(self, builder_id): + pods_path = '/api/v1/namespaces/%s/pods' % self.namespace + + # Delete the job itself. + try: + yield From(self._request('DELETE', self._job_path(builder_id))) + except: + logger.exception('Failed to send delete job call for job %s', builder_id) + + # Delete the pod(s) for the job. + selectorString = "job-name=%s" % builder_id + try: + yield From(self._request('DELETE', pods_path, params=dict(labelSelector=selectorString))) + except: + logger.exception("Failed to send delete pod call for job %s", builder_id) + + class LogPipe(threading.Thread): """ Adapted from http://codereview.stackexchange.com/a/17959 """ diff --git a/buildman/manager/noop_canceller.py b/buildman/manager/noop_canceller.py new file mode 100644 index 000000000..2adf17ad7 --- /dev/null +++ b/buildman/manager/noop_canceller.py @@ -0,0 +1,8 @@ +class NoopCanceller(object): + """ A class that can not cancel a build """ + def __init__(self, config=None): + pass + + def try_cancel_build(self, uuid): + """ Does nothing and fails to cancel build. """ + return False diff --git a/buildman/manager/orchestrator_canceller.py b/buildman/manager/orchestrator_canceller.py new file mode 100644 index 000000000..f3f821d5e --- /dev/null +++ b/buildman/manager/orchestrator_canceller.py @@ -0,0 +1,26 @@ +import logging + +from buildman.orchestrator import orchestrator_from_config, OrchestratorError +from util import slash_join + + +logger = logging.getLogger(__name__) + + +CANCEL_PREFIX = 'cancel/' + + +class OrchestratorCanceller(object): + """ An asynchronous way to cancel a build with any Orchestrator. """ + def __init__(self, config): + self._orchestrator = orchestrator_from_config(config, canceller_only=True) + + def try_cancel_build(self, build_uuid): + logger.info('Cancelling build %s', build_uuid) + cancel_key = slash_join(CANCEL_PREFIX, build_uuid) + try: + self._orchestrator.set_key_sync(cancel_key, build_uuid, expiration=60) + return True + except OrchestratorError: + logger.exception('Failed to write cancel action to redis with uuid %s', build_uuid) + return False diff --git a/buildman/orchestrator.py b/buildman/orchestrator.py new file mode 100644 index 000000000..54580589c --- /dev/null +++ b/buildman/orchestrator.py @@ -0,0 +1,753 @@ +from abc import ABCMeta, abstractmethod +from collections import namedtuple + +import datetime +import json +import logging +import re +import time + +from enum import IntEnum, unique +from six import add_metaclass, iteritems +from trollius import async, coroutine, From, Return +from urllib3.exceptions import ReadTimeoutError, ProtocolError + +import etcd +import redis + +from buildman.asyncutil import wrap_with_threadpool +from util import slash_join +from util.expiresdict import ExpiresDict + + +logger = logging.getLogger(__name__) + +ONE_DAY = 60 * 60 * 24 +ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION = 5 +DEFAULT_LOCK_EXPIRATION = 10000 + +ETCD_READ_TIMEOUT = 5 +ETCD_MAX_WATCH_TIMEOUT = 30 + +REDIS_EXPIRING_SUFFIX = '/expiring' +REDIS_DEFAULT_PUBSUB_KEY = 'orchestrator_events' +REDIS_EVENT_KIND_MESSAGE = 'message' +REDIS_EVENT_KIND_PMESSAGE = 'pmessage' +REDIS_NONEXPIRING_KEY = -1 + +# This constant defines the Redis configuration flags used to watch [K]eyspace and e[x]pired +# events on keys. For more info, see https://redis.io/topics/notifications#configuration +REDIS_KEYSPACE_EVENT_CONFIG_VALUE = 'Kx' +REDIS_KEYSPACE_EVENT_CONFIG_KEY = 'notify-keyspace-events' +REDIS_KEYSPACE_KEY_PATTERN = '__keyspace@%s__:%s' +REDIS_EXPIRED_KEYSPACE_PATTERN = slash_join(REDIS_KEYSPACE_KEY_PATTERN, REDIS_EXPIRING_SUFFIX) +REDIS_EXPIRED_KEYSPACE_REGEX = re.compile(REDIS_EXPIRED_KEYSPACE_PATTERN % (r'(\S+)', r'(\S+)')) + + +def orchestrator_from_config(manager_config, canceller_only=False): + """ + Allocates a new Orchestrator from the 'ORCHESTRATOR' block from provided manager config. + Checks for legacy configuration prefixed with 'ETCD_' when the 'ORCHESTRATOR' is not present. + + :param manager_config: the configuration for the orchestrator + :type manager_config: dict + :rtype: :class: Orchestrator + """ + # Legacy codepath only knows how to configure etcd. + if manager_config.get('ORCHESTRATOR') is None: + manager_config['ORCHESTRATOR'] = {key: value + for (key, value) in iteritems(manager_config) + if key.startswith('ETCD_') and not key.endswith('_PREFIX')} + + # Sanity check that legacy prefixes are no longer being used. + for key in manager_config['ORCHESTRATOR'].keys(): + words = key.split('_') + if len(words) > 1 and words[-1].lower() == 'prefix': + raise AssertionError('legacy prefix used, use ORCHESTRATOR_PREFIX instead') + + def _dict_key_prefix(d): + """ + :param d: the dict that has keys prefixed with underscore + :type d: {str: any} + :rtype: str + """ + return d.keys()[0].split('_', 1)[0].lower() + + orchestrator_name = _dict_key_prefix(manager_config['ORCHESTRATOR']) + + def format_key(key): + return key.lower().split('_', 1)[1] + + orchestrator_kwargs = {format_key(key): value + for (key, value) in iteritems(manager_config['ORCHESTRATOR'])} + + if manager_config.get('ORCHESTRATOR_PREFIX') is not None: + orchestrator_kwargs['orchestrator_prefix'] = manager_config['ORCHESTRATOR_PREFIX'] + + orchestrator_kwargs['canceller_only'] = canceller_only + + logger.debug('attempting to create orchestrator %s with kwargs %s', + orchestrator_name, orchestrator_kwargs) + return orchestrator_by_name(orchestrator_name, **orchestrator_kwargs) + + +def orchestrator_by_name(name, **kwargs): + _ORCHESTRATORS = { + 'etcd': Etcd2Orchestrator, + 'mem': MemoryOrchestrator, + 'redis': RedisOrchestrator, + } + return _ORCHESTRATORS.get(name, MemoryOrchestrator)(**kwargs) + + +class OrchestratorError(Exception): + pass + + +# TODO: replace with ConnectionError when this codebase is Python 3. +class OrchestratorConnectionError(OrchestratorError): + pass + + +@unique +class KeyEvent(IntEnum): + CREATE = 1 + SET = 2 + DELETE = 3 + EXPIRE = 4 + + +class KeyChange(namedtuple('KeyChange', ['event', 'key', 'value'])): + pass + + +@add_metaclass(ABCMeta) +class Orchestrator(object): + """ + Orchestrator is the interface that is used to synchronize the build states + across build managers. + + This interface assumes that storage is being done by a key-value store + that supports watching for events on keys. + + Missing keys should return KeyError; otherwise, errors should raise an + OrchestratorError. + + :param key_prefix: the prefix of keys being watched + :type key_prefix: str + """ + + @abstractmethod + def on_key_change(self, key, callback, restarter=None): + """ + + The callback parameter takes in a KeyChange object as a parameter. + """ + pass + + @abstractmethod + def get_prefixed_keys(self, prefix): + """ + + :returns: a dict of key value pairs beginning with prefix + :rtype: {str: str} + """ + pass + + @abstractmethod + def get_key(self, key): + """ + + :returns: the value stored at the provided key + :rtype: str + """ + pass + + @abstractmethod + def set_key(self, key, value, overwrite=False, expiration=None): + """ + + :param key: the identifier for the value + :type key: str + :param value: the value being stored + :type value: str + :param overwrite: whether or not a KeyError is thrown if the key already exists + :type overwrite: bool + :param expiration: the duration in seconds that a key should be available + :type expiration: int + """ + pass + + @abstractmethod + def set_key_sync(self, key, value, overwrite=False, expiration=None): + """ + set_key, but without trollius coroutines. + """ + pass + + @abstractmethod + def delete_key(self, key): + """ + Deletes a key that has been set in the orchestrator. + + :param key: the identifier for the key + :type key: str + """ + pass + + @abstractmethod + def lock(self, key, expiration=DEFAULT_LOCK_EXPIRATION): + """ + Takes a lock for synchronizing exclusive operations cluster-wide. + + :param key: the identifier for the lock + :type key: str + :param expiration: the duration until the lock expires + :type expiration: :class:`datetime.timedelta` or int (seconds) + :returns: whether or not the lock was acquired + :rtype: bool + """ + pass + + @abstractmethod + def shutdown(): + """ + This function should shutdown any final resources allocated by the Orchestrator. + """ + pass + + +def _sleep_orchestrator(): + """ + This function blocks the trollius event loop by sleeping in order to backoff if a failure + such as a ConnectionError has occurred. + """ + logger.exception('Connecting to etcd failed; sleeping for %s and then trying again', + ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION) + time.sleep(ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION) + logger.exception('Connecting to etcd failed; slept for %s and now trying again', + ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION) + + +class EtcdAction(object): + """ Enumeration of the various kinds of etcd actions we can observe via a watch. """ + GET = 'get' + SET = 'set' + EXPIRE = 'expire' + UPDATE = 'update' + DELETE = 'delete' + CREATE = 'create' + COMPARE_AND_SWAP = 'compareAndSwap' + COMPARE_AND_DELETE = 'compareAndDelete' + + +class Etcd2Orchestrator(Orchestrator): + def __init__(self, host='127.0.0.1', port=2379, cert_and_key=None, ca_cert=None, + client_threads=5, canceller_only=False, **kwargs): + self.is_canceller_only = canceller_only + + logger.debug('initializing async etcd client') + self._sync_etcd_client = etcd.Client( + host=host, + port=port, + cert=tuple(cert_and_key) if cert_and_key is not None else None, + ca_cert=ca_cert, + protocol='http' if cert_and_key is None else 'https', + read_timeout=ETCD_READ_TIMEOUT, + ) + + if not self.is_canceller_only: + (self._etcd_client, self._async_executor) = wrap_with_threadpool(self._sync_etcd_client, + client_threads) + + logger.debug('creating initial orchestrator state') + self._shutting_down = False + self._watch_tasks = {} + + @staticmethod + def _sanity_check_ttl(ttl): + """ + A TTL of < 0 in etcd results in the key *never being expired*. + We use a max here to ensure that if the TTL is < 0, the key will expire immediately. + """ + return max(ttl, 0) + + def _watch_etcd(self, key, callback, restarter=None, start_index=None): + def callback_wrapper(changed_key_future): + new_index = start_index + etcd_result = None + + if not changed_key_future.cancelled(): + try: + etcd_result = changed_key_future.result() + existing_index = getattr(etcd_result, 'etcd_index', None) + new_index = etcd_result.modifiedIndex + 1 + + logger.debug('Got watch of key: %s at #%s with result: %s', + key, existing_index, etcd_result) + + except ReadTimeoutError: + logger.debug('Read-timeout on etcd watch %s, rescheduling', key) + + except etcd.EtcdEventIndexCleared: + # This happens if etcd2 has moved forward too fast for us to start watching at the index + # we retrieved. We therefore start a new watch at HEAD and (if specified) call the + # restarter method which should conduct a read and reset the state of the manager. + logger.debug('Etcd moved forward too quickly. Restarting watch cycle.') + new_index = None + if restarter is not None: + async(restarter()) + + except (KeyError, etcd.EtcdKeyError): + logger.debug('Etcd key already cleared: %s', key) + return + + except etcd.EtcdConnectionFailed: + _sleep_orchestrator() + + except etcd.EtcdException as eex: + # TODO: This is a quick and dirty hack and should be replaced with a proper + # exception check. + if str(eex.message).find('Read timed out') >= 0: + logger.debug('Read-timeout on etcd watch %s, rescheduling', key) + else: + logger.exception('Exception on etcd watch: %s', key) + + except ProtocolError: + logger.exception('Exception on etcd watch: %s', key) + + if key not in self._watch_tasks or self._watch_tasks[key].done(): + self._watch_etcd(key, callback, start_index=new_index, restarter=restarter) + + if etcd_result and etcd_result.value is not None: + async(callback(self._etcd_result_to_keychange(etcd_result))) + + if not self._shutting_down: + logger.debug('Scheduling watch of key: %s at start index %s', key, start_index) + watch_future = self._etcd_client.watch(key, recursive=True, index=start_index, + timeout=ETCD_MAX_WATCH_TIMEOUT) + watch_future.add_done_callback(callback_wrapper) + + self._watch_tasks[key] = async(watch_future) + + @staticmethod + def _etcd_result_to_keychange(etcd_result): + event = Etcd2Orchestrator._etcd_result_to_keyevent(etcd_result) + return KeyChange(event, etcd_result.key, etcd_result.value) + + @staticmethod + def _etcd_result_to_keyevent(etcd_result): + if etcd_result.action == EtcdAction.CREATE: + return KeyEvent.CREATE + if etcd_result.action == EtcdAction.SET: + return KeyEvent.CREATE if etcd_result.createdIndex == etcd_result.modifiedIndex else KeyEvent.SET + if etcd_result.action == EtcdAction.DELETE: + return KeyEvent.DELETE + if etcd_result.action == EtcdAction.EXPIRE: + return KeyEvent.EXPIRE + raise AssertionError('etcd action must have equivalant keyevent') + + def on_key_change(self, key, callback, restarter=None): + assert not self.is_canceller_only + + logger.debug('creating watch on %s', key) + self._watch_etcd(key, callback, restarter=restarter) + + @coroutine + def get_prefixed_keys(self, prefix): + assert not self.is_canceller_only + + try: + etcd_result = yield From(self._etcd_client.read(prefix, recursive=True)) + raise Return({leaf.key: leaf.value for leaf in etcd_result.leaves}) + except etcd.EtcdKeyError: + raise KeyError + except etcd.EtcdConnectionFailed as ex: + raise OrchestratorConnectionError(ex) + except etcd.EtcdException as ex: + raise OrchestratorError(ex) + + @coroutine + def get_key(self, key): + assert not self.is_canceller_only + + try: + # Ignore pylint: the value property on EtcdResult is added dynamically using setattr. + etcd_result = yield From(self._etcd_client.read(key)) + raise Return(etcd_result.value) + except etcd.EtcdKeyError: + raise KeyError + except etcd.EtcdConnectionFailed as ex: + raise OrchestratorConnectionError(ex) + except etcd.EtcdException as ex: + raise OrchestratorError(ex) + + @coroutine + def set_key(self, key, value, overwrite=False, expiration=None): + assert not self.is_canceller_only + + yield From(self._etcd_client.write(key, value, prevExists=overwrite, + ttl=self._sanity_check_ttl(expiration))) + + def set_key_sync(self, key, value, overwrite=False, expiration=None): + self._sync_etcd_client.write(key, value, prevExists=overwrite, + ttl=self._sanity_check_ttl(expiration)) + + @coroutine + def delete_key(self, key): + assert not self.is_canceller_only + + try: + yield From(self._etcd_client.delete(key)) + except etcd.EtcdKeyError: + raise KeyError + except etcd.EtcdConnectionFailed as ex: + raise OrchestratorConnectionError(ex) + except etcd.EtcdException as ex: + raise OrchestratorError(ex) + + @coroutine + def lock(self, key, expiration=DEFAULT_LOCK_EXPIRATION): + assert not self.is_canceller_only + + try: + yield From(self._etcd_client.write(key, {}, prevExist=False, + ttl=self._sanity_check_ttl(expiration))) + raise Return(True) + except (KeyError, etcd.EtcdKeyError): + raise Return(False) + except etcd.EtcdConnectionFailed: + logger.exception('Could not get etcd atomic lock as etcd is down') + raise Return(False) + except etcd.EtcdException as ex: + raise OrchestratorError(ex) + + def shutdown(self): + logger.debug('Shutting down etcd client.') + self._shutting_down = True + + if self.is_canceller_only: + return + + for (key, _), task in self._watch_tasks.items(): + if not task.done(): + logger.debug('Canceling watch task for %s', key) + task.cancel() + + if self._async_executor is not None: + self._async_executor.shutdown() + + +class MemoryOrchestrator(Orchestrator): + def __init__(self, **kwargs): + self.state = ExpiresDict() + self.callbacks = {} + + def _callbacks_prefixed(self, prefix): + return (callback for (key, callback) in iteritems(self.callbacks) + if key.startswith(prefix)) + + def on_key_change(self, key, callback, restarter=None): + self.callbacks[key] = callback + + @coroutine + def get_prefixed_keys(self, prefix): + raise Return({k: value for (k, value) in self.state.items() + if k.startswith(prefix)}) + + @coroutine + def get_key(self, key): + raise Return(self.state[key]) + + @coroutine + def set_key(self, key, value, overwrite=False, expiration=None): + preexisting_key = 'key' in self.state + if preexisting_key and not overwrite: + raise KeyError + + absolute_expiration = None + if expiration is not None: + absolute_expiration = datetime.datetime.now() + datetime.timedelta(seconds=expiration) + + self.state.set(key, value, expires=absolute_expiration) + + event = KeyEvent.CREATE if not preexisting_key else KeyEvent.SET + for callback in self._callbacks_prefixed(key): + yield From(callback(KeyChange(event, key, value))) + + def set_key_sync(self, key, value, overwrite=False, expiration=None): + """ + set_key, but without trollius coroutines. + """ + preexisting_key = 'key' in self.state + if preexisting_key and not overwrite: + raise KeyError + + absolute_expiration = None + if expiration is not None: + absolute_expiration = datetime.datetime.now() + datetime.timedelta(seconds=expiration) + + self.state.set(key, value, expires=absolute_expiration) + + event = KeyEvent.CREATE if not preexisting_key else KeyEvent.SET + for callback in self._callbacks_prefixed(key): + callback(KeyChange(event, key, value)) + + @coroutine + def delete_key(self, key): + value = self.state[key] + del self.state[key] + + for callback in self._callbacks_prefixed(key): + yield From(callback(KeyChange(KeyEvent.DELETE, key, value))) + + @coroutine + def lock(self, key, expiration=DEFAULT_LOCK_EXPIRATION): + if key in self.state: + raise Return(False) + self.state.set(key, None, expires=expiration) + raise Return(True) + + def shutdown(self): + self.state = None + self.callbacks = None + + +class RedisOrchestrator(Orchestrator): + def __init__(self, host='127.0.0.1', port=6379, password=None, db=0, cert_and_key=None, + ca_cert=None, client_threads=5, ssl=False, skip_keyspace_event_setup=False, + canceller_only=False, **kwargs): + self.is_canceller_only = canceller_only + (cert, key) = tuple(cert_and_key) if cert_and_key is not None else (None, None) + self._sync_client = redis.StrictRedis( + host=host, + port=port, + password=password, + db=db, + ssl_certfile=cert, + ssl_keyfile=key, + ssl_ca_certs=ca_cert, + ssl=ssl, + ) + + self._shutting_down = False + self._tasks = {} + self._watched_keys = {} + self._pubsub_key = slash_join(kwargs.get('orchestrator_prefix', ''), + REDIS_DEFAULT_PUBSUB_KEY).lstrip('/') + + if not self.is_canceller_only: + (self._client, self._async_executor) = wrap_with_threadpool(self._sync_client, client_threads) + + # Configure a subscription to watch events that the orchestrator manually publishes. + logger.debug('creating pubsub with key %s', self._pubsub_key) + published_pubsub = self._sync_client.pubsub() + published_pubsub.subscribe(self._pubsub_key) + (self._pubsub, self._async_executor_pub) = wrap_with_threadpool(published_pubsub) + self._watch_published_key() + + # Configure a subscription to watch expired keyspace events. + if not skip_keyspace_event_setup: + self._sync_client.config_set(REDIS_KEYSPACE_EVENT_CONFIG_KEY, + REDIS_KEYSPACE_EVENT_CONFIG_VALUE) + + expiring_pubsub = self._sync_client.pubsub() + expiring_pubsub.psubscribe(REDIS_EXPIRED_KEYSPACE_PATTERN % (db, '*')) + (self._pubsub_expiring, self._async_executor_ex) = wrap_with_threadpool(expiring_pubsub) + self._watch_expiring_key() + + def _watch_published_key(self): + def published_callback_wrapper(event_future): + logger.debug('published callback called') + event_result = None + + if not event_future.cancelled(): + try: + event_result = event_future.result() + (redis_event, event_key, event_value) = event_result + logger.debug('Got watch of key: (%s, %s, %s)', redis_event, event_key, event_value) + except redis.ConnectionError: + _sleep_orchestrator() + except redis.RedisError: + logger.exception('Exception watching redis publish: %s', event_key) + + # Schedule creating a new future if this one has been consumed. + if 'pub' not in self._tasks or self._tasks['pub'].done(): + self._watch_published_key() + + if event_result is not None and redis_event == REDIS_EVENT_KIND_MESSAGE: + keychange = self._publish_to_keychange(event_value) + for watched_key, callback in iteritems(self._watched_keys): + if keychange.key.startswith(watched_key): + async(callback(keychange)) + + if not self._shutting_down: + logger.debug('Scheduling watch of publish stream') + watch_future = self._pubsub.parse_response() + watch_future.add_done_callback(published_callback_wrapper) + self._tasks['pub'] = async(watch_future) + + def _watch_expiring_key(self): + def expiring_callback_wrapper(event_future): + logger.debug('expiring callback called') + event_result = None + + if not event_future.cancelled(): + try: + event_result = event_future.result() + if self._is_expired_keyspace_event(event_result): + # Get the value of the original key before the expiration happened. + key = self._key_from_expiration(event_future) + expired_value = yield From(self._client.get(key)) + + # $KEY/expiring is gone, but the original key still remains, set an expiration for it + # so that other managers have time to get the event and still read the expired value. + yield From(self._client.expire(key, ONE_DAY)) + except redis.ConnectionError: + _sleep_orchestrator() + except redis.RedisError: + logger.exception('Exception watching redis expirations: %s', key) + + # Schedule creating a new future if this one has been consumed. + if 'expire' not in self._tasks or self._tasks['expire'].done(): + self._watch_expiring_key() + + if self._is_expired_keyspace_event(event_result) and expired_value is not None: + for watched_key, callback in iteritems(self._watched_keys): + if key.startswith(watched_key): + async(callback(KeyChange(KeyEvent.EXPIRE, key, expired_value))) + + if not self._shutting_down: + logger.debug('Scheduling watch of expiration') + watch_future = self._pubsub_expiring.parse_response() + watch_future.add_done_callback(expiring_callback_wrapper) + self._tasks['expire'] = async(watch_future) + + def on_key_change(self, key, callback, restarter=None): + assert not self.is_canceller_only + + logger.debug('watching key: %s', key) + self._watched_keys[key] = callback + + @staticmethod + def _is_expired_keyspace_event(event_result): + """ + Sanity check that this isn't an unrelated keyspace event. + There could be a more efficient keyspace event config to avoid this client-side filter. + """ + if event_result is None: + return False + + (redis_event, _pattern, matched_key, expired) = event_result + return (redis_event == REDIS_EVENT_KIND_PMESSAGE and + expired == 'expired' and + REDIS_EXPIRED_KEYSPACE_REGEX.match(matched_key) is not None) + + @staticmethod + def _key_from_expiration(event_result): + (_redis_event, _pattern, matched_key, _expired) = event_result + return REDIS_EXPIRED_KEYSPACE_REGEX.match(matched_key).groups()[1] + + @staticmethod + def _publish_to_keychange(event_value): + e = json.loads(event_value) + return KeyChange(KeyEvent(e['event']), e['key'], e['value']) + + @coroutine + def get_prefixed_keys(self, prefix): + assert not self.is_canceller_only + + # TODO: This can probably be done with redis pipelines to make it transactional. + keys = yield From(self._client.keys(prefix + '*')) + + # Yielding to the event loop is required, thus this cannot be written as a dict comprehension. + results = {} + for key in keys: + if key.endswith(REDIS_EXPIRING_SUFFIX): + continue + ttl = yield From(self._client.ttl(key)) + if ttl != REDIS_NONEXPIRING_KEY: + # Only redis keys without expirations are live build manager keys. + value = yield From(self._client.get(key)) + results.update({key: value}) + + raise Return(results) + + @coroutine + def get_key(self, key): + assert not self.is_canceller_only + + value = yield From(self._client.get(key)) + raise Return(value) + + @coroutine + def set_key(self, key, value, overwrite=False, expiration=None): + assert not self.is_canceller_only + + already_exists = yield From(self._client.exists(key)) + + yield From(self._client.set(key, value, xx=overwrite)) + if expiration is not None: + yield From(self._client.set(slash_join(key, REDIS_EXPIRING_SUFFIX), value, + xx=overwrite, ex=expiration)) + + key_event = KeyEvent.SET if already_exists else KeyEvent.CREATE + yield From(self._publish(event=key_event, key=key, value=value)) + + def set_key_sync(self, key, value, overwrite=False, expiration=None): + already_exists = self._sync_client.exists(key) + + self._sync_client.set(key, value, xx=overwrite) + if expiration is not None: + self._sync_client.set(slash_join(key, REDIS_EXPIRING_SUFFIX), value, + xx=overwrite, ex=expiration) + + self._sync_client.publish(self._pubsub_key, json.dumps({ + 'event': int(KeyEvent.SET if already_exists else KeyEvent.CREATE), + 'key': key, + 'value': value, + })) + + @coroutine + def _publish(self, **kwargs): + kwargs['event'] = int(kwargs['event']) + event_json = json.dumps(kwargs) + logger.debug('publishing event: %s', event_json) + yield From(self._client.publish(self._pubsub_key, event_json)) + + @coroutine + def delete_key(self, key): + assert not self.is_canceller_only + + value = yield From(self._client.get(key)) + yield From(self._client.delete(key)) + yield From(self._client.delete(slash_join(key, REDIS_EXPIRING_SUFFIX))) + yield From(self._publish(event=KeyEvent.DELETE, key=key, value=value)) + + @coroutine + def lock(self, key, expiration=DEFAULT_LOCK_EXPIRATION): + assert not self.is_canceller_only + + yield From(self.set_key(key, '', ex=expiration)) + raise Return(True) + + @coroutine + def shutdown(self): + logger.debug('Shutting down redis client.') + + self._shutting_down = True + + if self.is_canceller_only: + return + + for key, task in iteritems(self._tasks): + if not task.done(): + logger.debug('Canceling watch task for %s', key) + task.cancel() + + if self._async_executor is not None: + self._async_executor.shutdown() + if self._async_executor_ex is not None: + self._async_executor_ex.shutdown() + if self._async_executor_pub is not None: + self._async_executor_pub.shutdown() diff --git a/buildman/qemu-coreos/Dockerfile b/buildman/qemu-coreos/Dockerfile new file mode 100644 index 000000000..cdad6f0fc --- /dev/null +++ b/buildman/qemu-coreos/Dockerfile @@ -0,0 +1,26 @@ +FROM debian + +RUN apt-get clean && apt-get update && apt-get upgrade -y # 03APR2017 +RUN apt-get install -y \ + bzip2 \ + curl \ + openssh-client \ + qemu-kvm + +ARG channel=stable +ARG version=current + +RUN echo "Downloading http://${channel}.release.core-os.net/amd64-usr/${version}/coreos_production_qemu_image.img.bz2" +RUN curl -s -O http://${channel}.release.core-os.net/amd64-usr/${version}/coreos_production_qemu_image.img.bz2 && \ + bzip2 -d coreos_production_qemu_image.img.bz2 + +RUN apt-get remove -y curl bzip2 && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + +COPY start.sh /start.sh + +LABEL com.coreos.channel ${channel} +LABEL com.coreos.version ${version} + +ENTRYPOINT ["/bin/bash", "/start.sh"] diff --git a/buildman/qemu-coreos/README.md b/buildman/qemu-coreos/README.md new file mode 100644 index 000000000..2d53e8f55 --- /dev/null +++ b/buildman/qemu-coreos/README.md @@ -0,0 +1,5 @@ +# Builder Image + +``` +docker build --build-arg channel=stable --build-arg version=current -t quay.io/quay/quay-builder-qemu-coreos:staging . +``` diff --git a/buildman/qemu-coreos/start.sh b/buildman/qemu-coreos/start.sh new file mode 100644 index 000000000..ccb1f63e1 --- /dev/null +++ b/buildman/qemu-coreos/start.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +VM_VOLUME_SIZE="${VM_VOLUME_SIZE:-32G}" +VM_MEMORY="${VM_MEMORY:-4G}" + +set -e +set -x +set -o nounset + +mkdir -p /userdata/openstack/latest +echo "${USERDATA}" > /userdata/openstack/latest/user_data + +time qemu-img resize ./coreos_production_qemu_image.img "${VM_VOLUME_SIZE}" + +qemu-system-x86_64 \ + -enable-kvm \ + -cpu host \ + -device virtio-9p-pci,fsdev=conf,mount_tag=config-2 \ + -nographic \ + -drive if=virtio,file=./coreos_production_qemu_image.img \ + -fsdev local,id=conf,security_model=none,readonly,path=/userdata \ + -m "${VM_MEMORY}" \ + -machine accel=kvm \ + -net nic,model=virtio \ + -net user,hostfwd=tcp::2222-:22 \ + -smp 2 diff --git a/buildman/server.py b/buildman/server.py index 80776f6e2..7aaf3b66b 100644 --- a/buildman/server.py +++ b/buildman/server.py @@ -1,32 +1,34 @@ import logging -import trollius import json +import trollius +from threading import Event +from datetime import timedelta +from trollius.coroutines import From from autobahn.asyncio.wamp import RouterFactory, RouterSessionFactory from autobahn.asyncio.websocket import WampWebSocketServerFactory from autobahn.wamp import types - from aiowsgi import create_server as create_wsgi_server from flask import Flask -from threading import Event -from trollius.coroutines import From -from datetime import timedelta -from buildman.enums import BuildJobResult, BuildServerStatus +from buildman.enums import BuildJobResult, BuildServerStatus, RESULT_PHASES from buildman.jobutil.buildstatus import StatusHandler from buildman.jobutil.buildjob import BuildJob, BuildJobLoadException -from data import database +from data import database, model from app import app, metric_queue + logger = logging.getLogger(__name__) WORK_CHECK_TIMEOUT = 10 TIMEOUT_PERIOD_MINUTES = 20 JOB_TIMEOUT_SECONDS = 300 -MINIMUM_JOB_EXTENSION = timedelta(minutes=2) +SETUP_LEEWAY_SECONDS = 30 +MINIMUM_JOB_EXTENSION = timedelta(minutes=1) HEARTBEAT_PERIOD_SEC = 30 + class BuilderServer(object): """ Server which handles both HTTP and WAMP requests, managing the full state of the build controller. @@ -45,12 +47,12 @@ class BuilderServer(object): self._build_logs = build_logs self._user_files = user_files self._lifecycle_manager = lifecycle_manager_klass( - self._register_component, - self._unregister_component, - self._job_heartbeat, - self._job_complete, - manager_hostname, - HEARTBEAT_PERIOD_SEC, + self._register_component, + self._unregister_component, + self._job_heartbeat, + self._job_complete, + manager_hostname, + HEARTBEAT_PERIOD_SEC, ) self._lifecycle_manager_config = lifecycle_manager_config @@ -130,28 +132,42 @@ class BuilderServer(object): def _unregister_component(self, component): logger.debug('Unregistering component with realm %s and token %s', - component.builder_realm, component.expected_token) + component.builder_realm, component.expected_token) - self._realm_map.pop(component.builder_realm) - self._current_components.remove(component) - self._session_factory.remove(component) + self._realm_map.pop(component.builder_realm, None) + + if component in self._current_components: + self._current_components.remove(component) + self._session_factory.remove(component) def _job_heartbeat(self, build_job): self._queue.extend_processing(build_job.job_item, seconds_from_now=JOB_TIMEOUT_SECONDS, minimum_extension=MINIMUM_JOB_EXTENSION) - def _job_complete(self, build_job, job_status): + @trollius.coroutine + def _job_complete(self, build_job, job_status, executor_name=None, update_phase=False): if job_status == BuildJobResult.INCOMPLETE: + logger.warning('[BUILD INCOMPLETE: job complete] Build ID: %s. No retry restore.', + build_job.repo_build.uuid) self._queue.incomplete(build_job.job_item, restore_retry=False, retry_after=30) else: self._queue.complete(build_job.job_item) + # Update the trigger failure tracking (if applicable). + if build_job.repo_build.trigger is not None: + model.build.update_trigger_disable_status(build_job.repo_build.trigger, + RESULT_PHASES[job_status]) + + if update_phase: + status_handler = StatusHandler(self._build_logs, build_job.repo_build.uuid) + yield From(status_handler.set_phase(RESULT_PHASES[job_status])) + self._job_count = self._job_count - 1 if self._current_status == BuildServerStatus.SHUTDOWN and not self._job_count: self._shutdown_event.set() - report_completion_status(job_status) + _report_completion_status(build_job, job_status, executor_name) @trollius.coroutine def _work_checker(self): @@ -163,7 +179,8 @@ class BuilderServer(object): logger.debug('Checking for more work for %d active workers', self._lifecycle_manager.num_workers()) - job_item = self._queue.get(processing_time=self._lifecycle_manager.setup_time()) + processing_time = self._lifecycle_manager.overall_setup_time() + SETUP_LEEWAY_SECONDS + job_item = self._queue.get(processing_time=processing_time, ordering_required=True) if job_item is None: logger.debug('No additional work found. Going to sleep for %s seconds', WORK_CHECK_TIMEOUT) continue @@ -171,6 +188,8 @@ class BuilderServer(object): try: build_job = BuildJob(job_item) except BuildJobLoadException as irbe: + logger.warning('[BUILD INCOMPLETE: job load exception] Job data: %s. No retry restore.', + job_item.body) logger.exception(irbe) self._queue.incomplete(job_item, restore_retry=False) continue @@ -181,19 +200,24 @@ class BuilderServer(object): try: schedule_success, retry_timeout = yield From(self._lifecycle_manager.schedule(build_job)) except: + logger.warning('[BUILD INCOMPLETE: scheduling] Build ID: %s. Retry restored.', + build_job.repo_build.uuid) logger.exception('Exception when scheduling job: %s', build_job.repo_build.uuid) self._current_status = BuildServerStatus.EXCEPTION + self._queue.incomplete(job_item, restore_retry=True, retry_after=WORK_CHECK_TIMEOUT) return if schedule_success: logger.debug('Marking build %s as scheduled', build_job.repo_build.uuid) status_handler = StatusHandler(self._build_logs, build_job.repo_build.uuid) - status_handler.set_phase(database.BUILD_PHASE.BUILD_SCHEDULED) + yield From(status_handler.set_phase(database.BUILD_PHASE.BUILD_SCHEDULED)) self._job_count = self._job_count + 1 logger.debug('Build job %s scheduled. Running: %s', build_job.repo_build.uuid, self._job_count) else: + logger.warning('[BUILD INCOMPLETE: no schedule] Build ID: %s. Retry restored.', + build_job.repo_build.uuid) logger.debug('All workers are busy for job %s Requeuing after %s seconds.', build_job.repo_build.uuid, retry_timeout) self._queue.incomplete(job_item, restore_retry=True, retry_after=retry_timeout) @@ -226,7 +250,10 @@ class BuilderServer(object): # Initialize the work queue checker. yield From(self._work_checker()) -def report_completion_status(status): +def _report_completion_status(build_job, status, executor_name): + metric_queue.build_counter.Inc(labelvalues=[status]) + metric_queue.repository_build_completed.Inc(labelvalues=[build_job.namespace, build_job.repo_name, + status, executor_name or 'executor']) if status == BuildJobResult.COMPLETE: status_name = 'CompleteBuilds' elif status == BuildJobResult.ERROR: @@ -236,4 +263,4 @@ def report_completion_status(status): else: return - metric_queue.put(status_name, 1, unit='Count') + metric_queue.put_deprecated(status_name, 1, unit='Count') diff --git a/buildman/templates/cloudconfig.yaml b/buildman/templates/cloudconfig.yaml index 03487a038..07b6e960d 100644 --- a/buildman/templates/cloudconfig.yaml +++ b/buildman/templates/cloudconfig.yaml @@ -1,20 +1,38 @@ #cloud-config +hostname: {{ build_uuid | default('quay-builder', True) }} + +users: + groups: + - sudo + - docker + +{% if ssh_authorized_keys -%} ssh_authorized_keys: -- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCC0m+hVmyR3vn/xoxJe9+atRWBxSK+YXgyufNVDMcb7H00Jfnc341QH3kDVYZamUbhVh/nyc2RP7YbnZR5zORFtgOaNSdkMYrPozzBvxjnvSUokkCCWbLqXDHvIKiR12r+UTSijPJE/Yk702Mb2ejAFuae1C3Ec+qKAoOCagDjpQ3THyb5oaKE7VPHdwCWjWIQLRhC+plu77ObhoXIFJLD13gCi01L/rp4mYVCxIc2lX5A8rkK+bZHnIZwWUQ4t8SIjWxIaUo0FE7oZ83nKuNkYj5ngmLHQLY23Nx2WhE9H6NBthUpik9SmqQPtVYbhIG+bISPoH9Xs8CLrFb0VRjz Joey's Mac -- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCo6FhAP7mFFOAzM91gtaKW7saahtaN4lur42FMMztz6aqUycIltCmvxo+3FmrXgCG30maMNU36Vm1+9QRtVQEd+eRuoIWP28t+8MT01Fh4zPuE2Wca3pOHSNo3X81FfWJLzmwEHiQKs9HPQqUhezR9PcVWVkbMyAzw85c0UycGmHGFNb0UiRd9HFY6XbgbxhZv/mvKLZ99xE3xkOzS1PNsdSNvjUKwZR7pSUPqNS5S/1NXyR4GhFTU24VPH/bTATOv2ATH+PSzsZ7Qyz9UHj38tKC+ALJHEDJ4HXGzobyOUP78cHGZOfCB5FYubq0zmOudAjKIAhwI8XTFvJ2DX1P3 jimmyzelinskie -- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDNvw8qo9m8np7yQ/Smv/oklM8bo8VyNRZriGYBDuolWDL/mZpYCQnZJXphQo7RFdNABYistikjJlBuuwUohLf2uSq0iKoFa2TgwI43wViWzvuzU4nA02/ITD5BZdmWAFNyIoqeB50Ol4qUgDwLAZ+7Kv7uCi6chcgr9gTi99jY3GHyZjrMiXMHGVGi+FExFuzhVC2drKjbz5q6oRfQeLtNfG4psl5GU3MQU6FkX4fgoCx0r9R48/b7l4+TT7pWblJQiRfeldixu6308vyoTUEHasdkU3/X0OTaGz/h5XqTKnGQc6stvvoED3w+L3QFp0H5Z8sZ9stSsitmCBrmbcKZ jakemoshenko -- ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAgEAo/JkbGO6R7g1ZxARi0xWVM7FOfN02snRAcIO6vT9M7xMUkWVLgD+hM/o91lk+UFiYdql0CATobpFWncRL36KaUqsbw9/1BlI40wg296XHXSSnxhxZ4L7ytf6G1tyN319HXlI2kh9vAf/fy++yDvkH8dI3k1oLoW+mZPET6Pff04/6AXXrRlS5mhmGv9irGwiDHtVKpj6lU8DN/UtOrv1tiQ0pgwEJq05fLGoQfgPNaBCnW2z4Ubpn2gyMcMBMpSwo4hCqJePd349e4bLmFcT+gXYg7Mnup1DoTDlowFFN56wpxQbdp96IxWzU+jYPaIAuRo+BJzCyOS8qBv0Z4RZrgop0qp2JYiVwmViO6TZhIDz6loQJXUOIleQmNgTbiZx8Bwv5GY2jMYoVwlBp7yy5bRjxfbFsJ0vU7TVzNAG7oEJy/74HmHmWzRQlSlQjesr8gRbm9zgR8wqc/L107UOWFg7Cgh8ZNjKuADbXqYuda1Y9m2upcfS26UPz5l5PW5uFRMHZSi8pb1XV6/0Z8H8vwsh37Ur6aLi/5jruRmKhdlsNrB1IiDicBsPW3yg7HHSIdPU4oBNPC77yDCT3l4CKr4el81RrZt7FbJPfY+Ig9Q5O+05f6I8+ZOlJGyZ/Qfyl2aVm1HnlJKuBqPxeic8tMng/9B5N7uZL6Y3k5jFU8c= quentin -- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDI7LtxLItapmUbt3Gs+4Oxa1i22fkx1+aJDkAjiRWPSX3+cxOzuPfHX9uFzr+qj5hy4J7ErrPp8q9alu+il9lE26GQuUxOZiaUrXu4dRCXXdCqTHARWBxGUXjkxdMp2HIzFpBxmVqcRubrgM36LBzKapdDOqQdz7XnNm5Jmf0tH/N0+TgV60P0WVY1CxmTya+JHNFVgazhd+oIGEhTyW/eszMGcFUgZet7DQFytYIQXYSwwGpGdJ+0InKAJ2SzCt/yuUlSrhrVM8vSGeami1XYmgQiyth1zjteMd8uTrc9NREH7bZTNcMFBqVYE3BYQWGRrv8pMMgP9gxgLbxtVsUl barakmich-titania -- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDiNawWSZL2MF99zwG9cFjGmML6agsKwaacQEoTsjcjHGixyUnqHXaLdrGma5i/uphZPkI5XRBKiuIROACY/aRoIxJUpV7AQ1Zx87cILx6fDVePvU5lW2DdhlCDUdwjuzDb/WO/c/qMWjOPqRG4q8XvB7nhuORMMgdpDXWVH4LXPmFez1iIBCKNk04l6Se7wiEOQjaBnTDiBDYlWD78r6RdiAU5eIxpq+lKBDTcET0vegwcA/WE4YOlYBbOrgtHrgwWqG/pXxUu77aapDOmfjtDrgim6XP5kEnytg5gCaN9iLvIpT8b1wD/1Z+LoNSZg6m9gkcC2yTRI0apOBa2G8lz silas@pro.local +{% for ssh_key in ssh_authorized_keys -%} +- {{ ssh_key }} +{%- endfor %} +{%- endif %} write_files: +- path: /root/disable-aws-metadata.sh + permission: '0755' + content: | + iptables -t nat -I PREROUTING -p tcp -d 169.254.169.254 --dport 80 -j DNAT --to-destination 1.1.1.1 + +- path: /etc/docker/daemon.json + permission: '0644' + content: | + { + "storage-driver": "overlay2" + } + - path: /root/overrides.list permission: '0644' content: | REALM={{ realm }} TOKEN={{ token }} - SERVER=wss://{{ manager_hostname }} + SERVER={{ websocket_scheme }}://{{ manager_hostname }} {% if logentries_token -%} LOGENTRIES_TOKEN={{ logentries_token }} {%- endif %} @@ -25,6 +43,10 @@ coreos: group: {{ coreos_channel }} units: + - name: update-engine.service + command: stop + - name: locksmithd.service + command: stop - name: systemd-journal-gatewayd.socket command: start enable: yes @@ -37,7 +59,7 @@ coreos: [Install] WantedBy=sockets.target {{ dockersystemd('quay-builder', - 'quay.io/coreos/registry-build-worker', + worker_image, quay_username, quay_password, worker_tag, @@ -47,34 +69,34 @@ coreos: restart_policy='no' ) | indent(4) }} {% if logentries_token -%} + # https://github.com/kelseyhightower/journal-2-logentries/pull/11 so moved journal-2-logentries to coreos {{ dockersystemd('builder-logs', - 'quay.io/kelseyhightower/journal-2-logentries', + 'quay.io/coreos/journal-2-logentries', extra_args='--env-file /root/overrides.list -v /run/journald.sock:/run/journald.sock', flattened=True, after_units=['quay-builder.service'] ) | indent(4) }} {%- endif %} - - name: format-var-lib-docker.service + - name: disable-aws-metadata.service command: start + enable: yes content: | [Unit] - Before=docker.service var-lib-docker.mount - ConditionPathExists=!/var/lib/docker.btrfs + Description=Disable AWS metadata service + Before=network-pre.target + Wants=network-pre.target [Service] Type=oneshot - ExecStart=/usr/bin/truncate --size=42G /var/lib/docker.btrfs - ExecStart=/usr/sbin/mkfs.btrfs /var/lib/docker.btrfs - - name: var-lib-docker.mount - enable: true + ExecStart=/root/disable-aws-metadata.sh + RemainAfterExit=yes + [Install] + WantedBy=multi-user.target + - name: machine-lifetime.service + command: start + enable: yes content: | [Unit] - Before=docker.service - After=format-var-lib-docker.service - Requires=format-var-lib-docker.service - [Install] - RequiredBy=docker.service - [Mount] - What=/var/lib/docker.btrfs - Where=/var/lib/docker - Type=btrfs - Options=loop,discard + Description=Machine Lifetime Service + [Service] + Type=oneshot + ExecStart=/bin/sh -xc "/bin/sleep {{ max_lifetime_s }}; /usr/bin/systemctl --no-block poweroff" diff --git a/buildman/test/test_buildman.py b/buildman/test/test_buildman.py new file mode 100644 index 000000000..49b9a20fc --- /dev/null +++ b/buildman/test/test_buildman.py @@ -0,0 +1,679 @@ +import unittest +import json +import uuid + +from mock import Mock, ANY +from six import iteritems +from trollius import coroutine, get_event_loop, From, Future, Return + +from app import metric_queue +from buildman.asyncutil import AsyncWrapper +from buildman.component.buildcomponent import BuildComponent +from buildman.manager.ephemeral import (EphemeralBuilderManager, REALM_PREFIX, + JOB_PREFIX) +from buildman.manager.executor import BuilderExecutor, ExecutorException +from buildman.orchestrator import KeyEvent, KeyChange +from buildman.server import BuildJobResult +from util import slash_join +from util.metrics.metricqueue import duration_collector_async + + +BUILD_UUID = 'deadbeef-dead-beef-dead-deadbeefdead' +REALM_ID = '1234-realm' + + +def async_test(f): + def wrapper(*args, **kwargs): + coro = coroutine(f) + future = coro(*args, **kwargs) + loop = get_event_loop() + loop.run_until_complete(future) + return wrapper + + +class TestExecutor(BuilderExecutor): + job_started = None + job_stopped = None + + @coroutine + @duration_collector_async(metric_queue.builder_time_to_start, labelvalues=["testlabel"]) + def start_builder(self, realm, token, build_uuid): + self.job_started = str(uuid.uuid4()) + raise Return(self.job_started) + + @coroutine + def stop_builder(self, execution_id): + self.job_stopped = execution_id + + +class BadExecutor(BuilderExecutor): + @coroutine + @duration_collector_async(metric_queue.builder_time_to_start, labelvalues=["testlabel"]) + def start_builder(self, realm, token, build_uuid): + raise ExecutorException('raised on purpose!') + + +class EphemeralBuilderTestCase(unittest.TestCase): + def __init__(self, *args, **kwargs): + self.etcd_client_mock = None + super(EphemeralBuilderTestCase, self).__init__(*args, **kwargs) + + @staticmethod + def _create_completed_future(result=None): + def inner(*args, **kwargs): + new_future = Future() + new_future.set_result(result) + return new_future + return inner + + def setUp(self): + self._existing_executors = dict(EphemeralBuilderManager.EXECUTORS) + + def tearDown(self): + EphemeralBuilderManager.EXECUTORS = self._existing_executors + + @coroutine + def _register_component(self, realm_spec, build_component, token): + raise Return('hello') + + def _create_build_job(self, namespace='namespace', retries=3): + mock_job = Mock() + mock_job.job_details = {'build_uuid': BUILD_UUID} + mock_job.job_item = { + 'body': json.dumps(mock_job.job_details), + 'id': 1, + } + + mock_job.namespace = namespace + mock_job.retries_remaining = retries + mock_job.build_uuid = BUILD_UUID + return mock_job + + +class TestEphemeralLifecycle(EphemeralBuilderTestCase): + """ Tests the various lifecycles of the ephemeral builder and its interaction with etcd. """ + + def __init__(self, *args, **kwargs): + super(TestEphemeralLifecycle, self).__init__(*args, **kwargs) + self.etcd_client_mock = None + self.test_executor = None + + def _create_completed_future(self, result=None): + def inner(*args, **kwargs): + new_future = Future() + new_future.set_result(result) + return new_future + return inner + + def _create_mock_executor(self, *args, **kwargs): + self.test_executor = Mock(spec=BuilderExecutor) + self.test_executor.start_builder = Mock(side_effect=self._create_completed_future('123')) + self.test_executor.stop_builder = Mock(side_effect=self._create_completed_future()) + self.test_executor.setup_time = 60 + self.test_executor.name = 'MockExecutor' + self.test_executor.minimum_retry_threshold = 0 + return self.test_executor + + def setUp(self): + super(TestEphemeralLifecycle, self).setUp() + + EphemeralBuilderManager.EXECUTORS['test'] = self._create_mock_executor + + self.register_component_callback = Mock() + self.unregister_component_callback = Mock() + self.job_heartbeat_callback = Mock() + self.job_complete_callback = AsyncWrapper(Mock()) + + self.manager = EphemeralBuilderManager( + self.register_component_callback, + self.unregister_component_callback, + self.job_heartbeat_callback, + self.job_complete_callback, + '127.0.0.1', + 30, + ) + + self.manager.initialize({ + 'EXECUTOR': 'test', + 'ORCHESTRATOR': {'MEM_CONFIG': None}, + }) + + # Ensure that that the realm and building callbacks have been registered + callback_keys = [key for key in self.manager._orchestrator.callbacks] + self.assertIn(REALM_PREFIX, callback_keys) + self.assertIn(JOB_PREFIX, callback_keys) + + self.mock_job = self._create_build_job() + self.mock_job_key = slash_join('building', BUILD_UUID) + + def tearDown(self): + super(TestEphemeralLifecycle, self).tearDown() + self.manager.shutdown() + + + @coroutine + def _setup_job_for_managers(self): + test_component = Mock(spec=BuildComponent) + test_component.builder_realm = REALM_ID + test_component.start_build = Mock(side_effect=self._create_completed_future()) + self.register_component_callback.return_value = test_component + + is_scheduled = yield From(self.manager.schedule(self.mock_job)) + self.assertTrue(is_scheduled) + self.assertEqual(self.test_executor.start_builder.call_count, 1) + + # Ensure that that the job, realm, and metric callbacks have been registered + callback_keys = [key for key in self.manager._orchestrator.callbacks] + self.assertIn(self.mock_job_key, self.manager._orchestrator.state) + self.assertIn(REALM_PREFIX, callback_keys) + # TODO: assert metric key has been set + + realm_for_build = self._find_realm_key(self.manager._orchestrator, BUILD_UUID) + + raw_realm_data = yield From(self.manager._orchestrator.get_key(slash_join('realm', + realm_for_build))) + realm_data = json.loads(raw_realm_data) + realm_data['realm'] = REALM_ID + + # Right now the job is not registered with any managers because etcd has not accepted the job + self.assertEqual(self.register_component_callback.call_count, 0) + + # Fire off a realm changed with the same data. + yield From(self.manager._realm_callback( + KeyChange(KeyEvent.CREATE, + slash_join(REALM_PREFIX, REALM_ID), + json.dumps(realm_data)))) + + # Ensure that we have at least one component node. + self.assertEqual(self.register_component_callback.call_count, 1) + self.assertEqual(1, self.manager.num_workers()) + + # Ensure that the build info exists. + self.assertIsNotNone(self.manager._build_uuid_to_info.get(BUILD_UUID)) + + raise Return(test_component) + + @staticmethod + def _find_realm_key(orchestrator, build_uuid): + for key, value in iteritems(orchestrator.state): + if key.startswith(REALM_PREFIX): + parsed_value = json.loads(value) + body = json.loads(parsed_value['job_queue_item']['body']) + if body['build_uuid'] == build_uuid: + return parsed_value['realm'] + continue + raise KeyError + + + @async_test + def test_schedule_and_complete(self): + # Test that a job is properly registered with all of the managers + test_component = yield From(self._setup_job_for_managers()) + + # Take the job ourselves + yield From(self.manager.build_component_ready(test_component)) + + self.assertIsNotNone(self.manager._build_uuid_to_info.get(BUILD_UUID)) + + # Finish the job + yield From(self.manager.job_completed(self.mock_job, BuildJobResult.COMPLETE, test_component)) + + # Ensure that the executor kills the job. + self.assertEqual(self.test_executor.stop_builder.call_count, 1) + + # Ensure the build information is cleaned up. + self.assertIsNone(self.manager._build_uuid_to_info.get(BUILD_UUID)) + self.assertEqual(0, self.manager.num_workers()) + + @async_test + def test_another_manager_takes_job(self): + # Prepare a job to be taken by another manager + test_component = yield From(self._setup_job_for_managers()) + + yield From(self.manager._realm_callback( + KeyChange(KeyEvent.DELETE, + slash_join(REALM_PREFIX, REALM_ID), + json.dumps({'realm': REALM_ID, + 'token': 'beef', + 'execution_id': '123', + 'job_queue_item': self.mock_job.job_item})))) + + self.unregister_component_callback.assert_called_once_with(test_component) + + # Ensure that the executor does not kill the job. + self.assertEqual(self.test_executor.stop_builder.call_count, 0) + + # Ensure that we still have the build info, but not the component. + self.assertEqual(0, self.manager.num_workers()) + self.assertIsNotNone(self.manager._build_uuid_to_info.get(BUILD_UUID)) + + # Delete the job once it has "completed". + yield From(self.manager._job_callback( + KeyChange(KeyEvent.DELETE, + self.mock_job_key, + json.dumps({'had_heartbeat': False, + 'job_queue_item': self.mock_job.job_item})))) + + # Ensure the job was removed from the info, but stop was not called. + self.assertIsNone(self.manager._build_uuid_to_info.get(BUILD_UUID)) + self.assertEqual(self.test_executor.stop_builder.call_count, 0) + + @async_test + def test_job_started_by_other_manager(self): + # Ensure that that the building callbacks have been registered + callback_keys = [key for key in self.manager._orchestrator.callbacks] + self.assertIn(JOB_PREFIX, callback_keys) + + # Send a signal to the callback that the job has been created. + yield From(self.manager._job_callback( + KeyChange(KeyEvent.CREATE, + self.mock_job_key, + json.dumps({'had_heartbeat': False, + 'job_queue_item': self.mock_job.job_item})))) + + # Ensure the create does nothing. + self.assertEqual(self.test_executor.stop_builder.call_count, 0) + + @async_test + def test_expiring_worker_not_started(self): + # Ensure that that the building callbacks have been registered + callback_keys = [key for key in self.manager._orchestrator.callbacks] + self.assertIn(JOB_PREFIX, callback_keys) + + # Send a signal to the callback that a worker has expired + yield From(self.manager._job_callback( + KeyChange(KeyEvent.EXPIRE, + self.mock_job_key, + json.dumps({'had_heartbeat': True, + 'job_queue_item': self.mock_job.job_item})))) + + # Since the realm was never registered, expiration should do nothing. + self.assertEqual(self.test_executor.stop_builder.call_count, 0) + + @async_test + def test_expiring_worker_started(self): + test_component = yield From(self._setup_job_for_managers()) + + # Ensure that that the building callbacks have been registered + callback_keys = [key for key in self.manager._orchestrator.callbacks] + self.assertIn(JOB_PREFIX, callback_keys) + + yield From(self.manager._job_callback( + KeyChange(KeyEvent.EXPIRE, + self.mock_job_key, + json.dumps({'had_heartbeat': True, + 'job_queue_item': self.mock_job.job_item})))) + + self.test_executor.stop_builder.assert_called_once_with('123') + self.assertEqual(self.test_executor.stop_builder.call_count, 1) + + @async_test + def test_buildjob_deleted(self): + test_component = yield From(self._setup_job_for_managers()) + + # Ensure that that the building callbacks have been registered + callback_keys = [key for key in self.manager._orchestrator.callbacks] + self.assertIn(JOB_PREFIX, callback_keys) + + # Send a signal to the callback that a worker has expired + yield From(self.manager._job_callback( + KeyChange(KeyEvent.DELETE, + self.mock_job_key, + json.dumps({'had_heartbeat': False, + 'job_queue_item': self.mock_job.job_item})))) + + self.assertEqual(self.test_executor.stop_builder.call_count, 0) + self.assertEqual(self.job_complete_callback.call_count, 0) + self.assertIsNone(self.manager._build_uuid_to_info.get(BUILD_UUID)) + + @async_test + def test_builder_never_starts(self): + test_component = yield From(self._setup_job_for_managers()) + + # Ensure that that the building callbacks have been registered + callback_keys = [key for key in self.manager._orchestrator.callbacks] + self.assertIn(JOB_PREFIX, callback_keys) + + # Send a signal to the callback that a worker has expired + yield From(self.manager._job_callback( + KeyChange(KeyEvent.EXPIRE, + self.mock_job_key, + json.dumps({'had_heartbeat': False, + 'job_queue_item': self.mock_job.job_item})))) + + self.test_executor.stop_builder.assert_called_once_with('123') + self.assertEqual(self.test_executor.stop_builder.call_count, 1) + + # Ensure the job was marked as incomplete, with an update_phase to True (so the DB record and + # logs are updated as well) + yield From(self.job_complete_callback.assert_called_once_with(ANY, BuildJobResult.INCOMPLETE, + 'MockExecutor', + update_phase=True)) + + @async_test + def test_change_worker(self): + # Send a signal to the callback that a worker key has been changed + self.manager._job_callback(KeyChange(KeyEvent.SET, self.mock_job_key, 'value')) + self.assertEqual(self.test_executor.stop_builder.call_count, 0) + + @async_test + def test_realm_expired(self): + test_component = yield From(self._setup_job_for_managers()) + + # Send a signal to the callback that a realm has expired + yield From(self.manager._realm_callback(KeyChange( + KeyEvent.EXPIRE, + self.mock_job_key, + json.dumps({ + 'realm': REALM_ID, + 'execution_id': 'foobar', + 'executor_name': 'MockExecutor', + 'job_queue_item': {'body': '{"build_uuid": "fakeid"}'}, + })))) + + # Ensure that the cleanup code for the executor was called. + self.test_executor.stop_builder.assert_called_once_with('foobar') + self.assertEqual(self.test_executor.stop_builder.call_count, 1) + + +class TestEphemeral(EphemeralBuilderTestCase): + """ Simple unit tests for the ephemeral builder around config management, starting and stopping + jobs. + """ + + def setUp(self): + super(TestEphemeral, self).setUp() + + unregister_component_callback = Mock() + job_heartbeat_callback = Mock() + + @coroutine + def job_complete_callback(*args, **kwargs): + raise Return() + + self.manager = EphemeralBuilderManager( + self._register_component, + unregister_component_callback, + job_heartbeat_callback, + job_complete_callback, + '127.0.0.1', + 30, + ) + + def tearDown(self): + super(TestEphemeral, self).tearDown() + self.manager.shutdown() + + def test_verify_executor_oldconfig(self): + EphemeralBuilderManager.EXECUTORS['test'] = TestExecutor + self.manager.initialize({ + 'EXECUTOR': 'test', + 'EXECUTOR_CONFIG': dict(MINIMUM_RETRY_THRESHOLD=42), + 'ORCHESTRATOR': {'MEM_CONFIG': None}, + }) + + # Ensure that we have a single test executor. + self.assertEqual(1, len(self.manager.registered_executors)) + self.assertEqual(42, self.manager.registered_executors[0].minimum_retry_threshold) + self.assertEqual('TestExecutor', self.manager.registered_executors[0].name) + + def test_verify_executor_newconfig(self): + EphemeralBuilderManager.EXECUTORS['test'] = TestExecutor + self.manager.initialize({ + 'EXECUTORS': [{ + 'EXECUTOR': 'test', + 'MINIMUM_RETRY_THRESHOLD': 42 + }], + 'ORCHESTRATOR': {'MEM_CONFIG': None}, + }) + + # Ensure that we have a single test executor. + self.assertEqual(1, len(self.manager.registered_executors)) + self.assertEqual(42, self.manager.registered_executors[0].minimum_retry_threshold) + + + def test_multiple_executors_samename(self): + EphemeralBuilderManager.EXECUTORS['test'] = TestExecutor + EphemeralBuilderManager.EXECUTORS['anotherexecutor'] = TestExecutor + + with self.assertRaises(Exception): + self.manager.initialize({ + 'EXECUTORS': [ + { + 'NAME': 'primary', + 'EXECUTOR': 'test', + 'MINIMUM_RETRY_THRESHOLD': 42 + }, + { + 'NAME': 'primary', + 'EXECUTOR': 'anotherexecutor', + 'MINIMUM_RETRY_THRESHOLD': 24 + }, + ], + 'ORCHESTRATOR': {'MEM_CONFIG': None}, + }) + + + def test_verify_multiple_executors(self): + EphemeralBuilderManager.EXECUTORS['test'] = TestExecutor + EphemeralBuilderManager.EXECUTORS['anotherexecutor'] = TestExecutor + + self.manager.initialize({ + 'EXECUTORS': [ + { + 'NAME': 'primary', + 'EXECUTOR': 'test', + 'MINIMUM_RETRY_THRESHOLD': 42 + }, + { + 'NAME': 'secondary', + 'EXECUTOR': 'anotherexecutor', + 'MINIMUM_RETRY_THRESHOLD': 24 + }, + ], + 'ORCHESTRATOR': {'MEM_CONFIG': None}, + }) + + # Ensure that we have a two test executors. + self.assertEqual(2, len(self.manager.registered_executors)) + self.assertEqual(42, self.manager.registered_executors[0].minimum_retry_threshold) + self.assertEqual(24, self.manager.registered_executors[1].minimum_retry_threshold) + + def test_skip_invalid_executor(self): + self.manager.initialize({ + 'EXECUTORS': [ + { + 'EXECUTOR': 'unknown', + 'MINIMUM_RETRY_THRESHOLD': 42 + }, + ], + 'ORCHESTRATOR': {'MEM_CONFIG': None}, + }) + + self.assertEqual(0, len(self.manager.registered_executors)) + + @async_test + def test_schedule_job_namespace_filter(self): + EphemeralBuilderManager.EXECUTORS['test'] = TestExecutor + self.manager.initialize({ + 'EXECUTORS': [{ + 'EXECUTOR': 'test', + 'NAMESPACE_WHITELIST': ['something'], + }], + 'ORCHESTRATOR': {'MEM_CONFIG': None}, + }) + + # Try with a build job in an invalid namespace. + build_job = self._create_build_job(namespace='somethingelse') + result = yield From(self.manager.schedule(build_job)) + self.assertFalse(result[0]) + + # Try with a valid namespace. + build_job = self._create_build_job(namespace='something') + result = yield From(self.manager.schedule(build_job)) + self.assertTrue(result[0]) + + @async_test + def test_schedule_job_retries_filter(self): + EphemeralBuilderManager.EXECUTORS['test'] = TestExecutor + self.manager.initialize({ + 'EXECUTORS': [{ + 'EXECUTOR': 'test', + 'MINIMUM_RETRY_THRESHOLD': 2, + }], + 'ORCHESTRATOR': {'MEM_CONFIG': None}, + }) + + # Try with a build job that has too few retries. + build_job = self._create_build_job(retries=1) + result = yield From(self.manager.schedule(build_job)) + self.assertFalse(result[0]) + + # Try with a valid job. + build_job = self._create_build_job(retries=2) + result = yield From(self.manager.schedule(build_job)) + self.assertTrue(result[0]) + + @async_test + def test_schedule_job_executor_fallback(self): + EphemeralBuilderManager.EXECUTORS['primary'] = TestExecutor + EphemeralBuilderManager.EXECUTORS['secondary'] = TestExecutor + + self.manager.initialize({ + 'EXECUTORS': [ + { + 'NAME': 'primary', + 'EXECUTOR': 'primary', + 'NAMESPACE_WHITELIST': ['something'], + 'MINIMUM_RETRY_THRESHOLD': 3, + }, + { + 'NAME': 'secondary', + 'EXECUTOR': 'secondary', + 'MINIMUM_RETRY_THRESHOLD': 2, + }, + ], + 'ALLOWED_WORKER_COUNT': 5, + 'ORCHESTRATOR': {'MEM_CONFIG': None}, + }) + + # Try a job not matching the primary's namespace filter. Should schedule on secondary. + build_job = self._create_build_job(namespace='somethingelse') + result = yield From(self.manager.schedule(build_job)) + self.assertTrue(result[0]) + + self.assertIsNone(self.manager.registered_executors[0].job_started) + self.assertIsNotNone(self.manager.registered_executors[1].job_started) + + self.manager.registered_executors[0].job_started = None + self.manager.registered_executors[1].job_started = None + + # Try a job not matching the primary's retry minimum. Should schedule on secondary. + build_job = self._create_build_job(namespace='something', retries=2) + result = yield From(self.manager.schedule(build_job)) + self.assertTrue(result[0]) + + self.assertIsNone(self.manager.registered_executors[0].job_started) + self.assertIsNotNone(self.manager.registered_executors[1].job_started) + + self.manager.registered_executors[0].job_started = None + self.manager.registered_executors[1].job_started = None + + # Try a job matching the primary. Should schedule on the primary. + build_job = self._create_build_job(namespace='something', retries=3) + result = yield From(self.manager.schedule(build_job)) + self.assertTrue(result[0]) + + self.assertIsNotNone(self.manager.registered_executors[0].job_started) + self.assertIsNone(self.manager.registered_executors[1].job_started) + + self.manager.registered_executors[0].job_started = None + self.manager.registered_executors[1].job_started = None + + # Try a job not matching either's restrictions. + build_job = self._create_build_job(namespace='somethingelse', retries=1) + result = yield From(self.manager.schedule(build_job)) + self.assertFalse(result[0]) + + self.assertIsNone(self.manager.registered_executors[0].job_started) + self.assertIsNone(self.manager.registered_executors[1].job_started) + + self.manager.registered_executors[0].job_started = None + self.manager.registered_executors[1].job_started = None + + + @async_test + def test_schedule_job_single_executor(self): + EphemeralBuilderManager.EXECUTORS['test'] = TestExecutor + + self.manager.initialize({ + 'EXECUTOR': 'test', + 'EXECUTOR_CONFIG': {}, + 'ALLOWED_WORKER_COUNT': 5, + 'ORCHESTRATOR': {'MEM_CONFIG': None}, + }) + + build_job = self._create_build_job(namespace='something', retries=3) + result = yield From(self.manager.schedule(build_job)) + self.assertTrue(result[0]) + + self.assertIsNotNone(self.manager.registered_executors[0].job_started) + self.manager.registered_executors[0].job_started = None + + + build_job = self._create_build_job(namespace='something', retries=0) + result = yield From(self.manager.schedule(build_job)) + self.assertTrue(result[0]) + + self.assertIsNotNone(self.manager.registered_executors[0].job_started) + self.manager.registered_executors[0].job_started = None + + @async_test + def test_executor_exception(self): + EphemeralBuilderManager.EXECUTORS['bad'] = BadExecutor + + self.manager.initialize({ + 'EXECUTOR': 'bad', + 'EXECUTOR_CONFIG': {}, + 'ORCHESTRATOR': {'MEM_CONFIG': None}, + }) + + build_job = self._create_build_job(namespace='something', retries=3) + result = yield From(self.manager.schedule(build_job)) + self.assertFalse(result[0]) + + @async_test + def test_schedule_and_stop(self): + EphemeralBuilderManager.EXECUTORS['test'] = TestExecutor + + self.manager.initialize({ + 'EXECUTOR': 'test', + 'EXECUTOR_CONFIG': {}, + 'ORCHESTRATOR': {'MEM_CONFIG': None}, + }) + + # Start the build job. + build_job = self._create_build_job(namespace='something', retries=3) + result = yield From(self.manager.schedule(build_job)) + self.assertTrue(result[0]) + + executor = self.manager.registered_executors[0] + self.assertIsNotNone(executor.job_started) + + # Register the realm so the build information is added. + yield From(self.manager._register_realm({ + 'realm': str(uuid.uuid4()), + 'token': str(uuid.uuid4()), + 'execution_id': executor.job_started, + 'executor_name': 'TestExecutor', + 'build_uuid': build_job.build_uuid, + 'job_queue_item': build_job.job_item, + })) + + # Stop the build job. + yield From(self.manager.kill_builder_executor(build_job.build_uuid)) + self.assertEqual(executor.job_stopped, executor.job_started) + + +if __name__ == '__main__': + unittest.main() diff --git a/buildstatus/cancelled.svg b/buildstatus/cancelled.svg new file mode 100644 index 000000000..0e565cf97 --- /dev/null +++ b/buildstatus/cancelled.svg @@ -0,0 +1 @@ +containercontainercancelledcancelled diff --git a/buildtrigger/basehandler.py b/buildtrigger/basehandler.py index 2555b09ed..8d9b0f753 100644 --- a/buildtrigger/basehandler.py +++ b/buildtrigger/basehandler.py @@ -1,7 +1,85 @@ +import os +from abc import ABCMeta, abstractmethod +from jsonschema import validate +from six import add_metaclass + +from active_migration import ActiveDataMigration, ERTMigrationFlags from endpoints.building import PreparedBuild from data import model from buildtrigger.triggerutil import get_trigger_config, InvalidServiceException -from jsonschema import validate + +NAMESPACES_SCHEMA = { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'personal': { + 'type': 'boolean', + 'description': 'True if the namespace is the user\'s personal namespace', + }, + 'score': { + 'type': 'number', + 'description': 'Score of the relevance of the namespace', + }, + 'avatar_url': { + 'type': ['string', 'null'], + 'description': 'URL of the avatar for this namespace', + }, + 'url': { + 'type': 'string', + 'description': 'URL of the website to view the namespace', + }, + 'id': { + 'type': 'string', + 'description': 'Trigger-internal ID of the namespace', + }, + 'title': { + 'type': 'string', + 'description': 'Human-readable title of the namespace', + }, + }, + 'required': ['personal', 'score', 'avatar_url', 'id', 'title'], + }, +} + +BUILD_SOURCES_SCHEMA = { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'name': { + 'type': 'string', + 'description': 'The name of the repository, without its namespace', + }, + 'full_name': { + 'type': 'string', + 'description': 'The name of the repository, with its namespace', + }, + 'description': { + 'type': 'string', + 'description': 'The description of the repository. May be an empty string', + }, + 'last_updated': { + 'type': 'number', + 'description': 'The date/time when the repository was last updated, since epoch in UTC', + }, + 'url': { + 'type': 'string', + 'description': 'The URL at which to view the repository in the browser', + }, + 'has_admin_permissions': { + 'type': 'boolean', + 'description': 'True if the current user has admin permissions on the repository', + }, + 'private': { + 'type': 'boolean', + 'description': 'True if the repository is private', + }, + }, + 'required': ['name', 'full_name', 'description', 'last_updated', + 'has_admin_permissions', 'private'], + }, +} METADATA_SCHEMA = { 'type': 'object', @@ -18,7 +96,7 @@ METADATA_SCHEMA = { 'ref': { 'type': 'string', 'description': 'git reference for a git commit', - 'pattern': '^refs\/(heads|tags|remotes)\/(.+)$', + 'pattern': r'^refs\/(heads|tags|remotes)\/(.+)$', }, 'default_branch': { 'type': 'string', @@ -79,13 +157,14 @@ METADATA_SCHEMA = { 'required': ['username'], }, }, - 'required': ['url', 'message', 'date'], + 'required': ['message'], }, }, 'required': ['commit', 'git_url'], } +@add_metaclass(ABCMeta) class BuildTriggerHandler(object): def __init__(self, trigger, override_config=None): self.trigger = trigger @@ -94,74 +173,108 @@ class BuildTriggerHandler(object): @property def auth_token(self): """ Returns the auth token for the trigger. """ - return self.trigger.auth_token + # NOTE: This check is for testing. + if isinstance(self.trigger.auth_token, str): + return self.trigger.auth_token + # TODO(remove-unenc): Remove legacy field. + if self.trigger.secure_auth_token is not None: + return self.trigger.secure_auth_token.decrypt() + + if ActiveDataMigration.has_flag(ERTMigrationFlags.READ_OLD_FIELDS): + return self.trigger.auth_token + + return None + + @abstractmethod def load_dockerfile_contents(self): """ Loads the Dockerfile found for the trigger's config and returns them or None if none could be found/loaded. """ - raise NotImplementedError + pass - def list_build_sources(self): + @abstractmethod + def list_build_source_namespaces(self): """ Take the auth information for the specific trigger type and load the - list of build sources(repositories). + list of namespaces that can contain build sources. """ - raise NotImplementedError + pass + @abstractmethod + def list_build_sources_for_namespace(self, namespace): + """ + Take the auth information for the specific trigger type and load the + list of repositories under the given namespace. + """ + pass + + @abstractmethod def list_build_subdirs(self): """ Take the auth information and the specified config so far and list all of the possible subdirs containing dockerfiles. """ - raise NotImplementedError + pass - def handle_trigger_request(self): + @abstractmethod + def handle_trigger_request(self, request): """ Transform the incoming request data into a set of actions. Returns a PreparedBuild. """ - raise NotImplementedError + pass + @abstractmethod def is_active(self): """ Returns True if the current build trigger is active. Inactive means further setup is needed. """ - raise NotImplementedError + pass + @abstractmethod def activate(self, standard_webhook_url): """ Activates the trigger for the service, with the given new configuration. Returns new public and private config that should be stored if successful. """ - raise NotImplementedError + pass + @abstractmethod def deactivate(self): """ Deactivates the trigger for the service, removing any hooks installed in the remote service. Returns the new config that should be stored if this trigger is going to be re-activated. """ - raise NotImplementedError + pass + @abstractmethod def manual_start(self, run_parameters=None): """ Manually creates a repository build for this trigger. Returns a PreparedBuild. """ - raise NotImplementedError + pass + @abstractmethod def list_field_values(self, field_name, limit=None): """ Lists all values for the given custom trigger field. For example, a trigger might have a field named "branches", and this method would return all branches. """ - raise NotImplementedError + pass + @abstractmethod def get_repository_url(self): """ Returns the URL of the current trigger's repository. Note that this operation can be called in a loop, so it should be as fast as possible. """ - raise NotImplementedError + pass + + @classmethod + def filename_is_dockerfile(cls, file_name): + """ Returns whether the file is named Dockerfile or follows the convention .Dockerfile""" + return file_name.endswith(".Dockerfile") or u"Dockerfile" == file_name @classmethod def service_name(cls): @@ -190,14 +303,10 @@ class BuildTriggerHandler(object): def get_dockerfile_path(self): """ Returns the normalized path to the Dockerfile found in the subdirectory in the config. """ - subdirectory = self.config.get('subdir', '') - if subdirectory == '/': - subdirectory = '' - else: - if not subdirectory.endswith('/'): - subdirectory = subdirectory + '/' - - return subdirectory + 'Dockerfile' + dockerfile_path = self.config.get('dockerfile_path') or 'Dockerfile' + if dockerfile_path[0] == '/': + dockerfile_path = dockerfile_path[1:] + return dockerfile_path def prepare_build(self, metadata, is_manual=False): # Ensure that the metadata meets the scheme. @@ -207,10 +316,10 @@ class BuildTriggerHandler(object): ref = metadata.get('ref', None) commit_sha = metadata['commit'] default_branch = metadata.get('default_branch', None) - prepared = PreparedBuild(self.trigger) prepared.name_from_sha(commit_sha) - prepared.subdirectory = config.get('subdir', None) + prepared.subdirectory = config.get('dockerfile_path', None) + prepared.context = config.get('context', None) prepared.is_manual = is_manual prepared.metadata = metadata @@ -220,3 +329,39 @@ class BuildTriggerHandler(object): prepared.tags = [commit_sha[:7]] return prepared + + @classmethod + def build_sources_response(cls, sources): + validate(sources, BUILD_SOURCES_SCHEMA) + return sources + + @classmethod + def build_namespaces_response(cls, namespaces_dict): + namespaces = list(namespaces_dict.values()) + validate(namespaces, NAMESPACES_SCHEMA) + return namespaces + + @classmethod + def get_parent_directory_mappings(cls, dockerfile_path, current_paths=None): + """ Returns a map of dockerfile_paths to it's possible contexts. """ + if dockerfile_path == "": + return {} + + if dockerfile_path[0] != os.path.sep: + dockerfile_path = os.path.sep + dockerfile_path + + dockerfile_path = os.path.normpath(dockerfile_path) + all_paths = set() + path, _ = os.path.split(dockerfile_path) + if path == "": + path = os.path.sep + + all_paths.add(path) + for i in range(1, len(path.split(os.path.sep))): + path, _ = os.path.split(path) + all_paths.add(path) + + if current_paths: + return dict({dockerfile_path: list(all_paths)}, **current_paths) + + return {dockerfile_path: list(all_paths)} diff --git a/buildtrigger/bitbuckethandler.py b/buildtrigger/bitbuckethandler.py index 529b635cc..9573f5c60 100644 --- a/buildtrigger/bitbuckethandler.py +++ b/buildtrigger/bitbuckethandler.py @@ -1,19 +1,22 @@ import logging +import os import re +from calendar import timegm +import dateutil.parser +from bitbucket import BitBucket from jsonschema import validate + +from app import app, get_app_url +from buildtrigger.basehandler import BuildTriggerHandler from buildtrigger.triggerutil import (RepositoryReadException, TriggerActivationException, TriggerDeactivationException, TriggerStartException, InvalidPayloadException, TriggerProviderException, + SkipRequestException, determine_build_ref, raise_if_skipped_build, find_matching_branches) - -from buildtrigger.basehandler import BuildTriggerHandler - -from app import app, get_app_url -from bitbucket import BitBucket -from util.security.ssh import generate_ssh_keypair from util.dict_wrappers import JSONPathDict, SafeDictSetter +from util.security.ssh import generate_ssh_keypair logger = logging.getLogger(__name__) @@ -31,7 +34,7 @@ BITBUCKET_WEBHOOK_PAYLOAD_SCHEMA = { }, }, 'required': ['full_name'], - }, + }, # /Repository 'push': { 'type': 'object', 'properties': { @@ -61,21 +64,15 @@ BITBUCKET_WEBHOOK_PAYLOAD_SCHEMA = { 'user': { 'type': 'object', 'properties': { - 'username': { + 'display_name': { + 'type': 'string', + }, + 'account_id': { 'type': 'string', }, 'links': { 'type': 'object', 'properties': { - 'html': { - 'type': 'object', - 'properties': { - 'href': { - 'type': 'string', - }, - }, - 'required': ['href'], - }, 'avatar': { 'type': 'object', 'properties': { @@ -86,59 +83,37 @@ BITBUCKET_WEBHOOK_PAYLOAD_SCHEMA = { 'required': ['href'], }, }, - 'required': ['html', 'avatar'], - }, + 'required': ['avatar'], + }, # /User }, - 'required': ['username'], - }, + }, # /Author }, - }, - 'links': { - 'type': 'object', - 'properties': { - 'html': { - 'type': 'object', - 'properties': { - 'href': { - 'type': 'string', - }, - }, - 'required': ['href'], - }, - }, - 'required': ['html'], - }, + }, }, 'required': ['hash', 'message', 'date'], - }, + }, # /Target }, - 'required': ['target'], - }, + 'required': ['name', 'target'], + }, # /New }, - }, - }, + }, # /Changes item + }, # /Changes }, 'required': ['changes'], - }, + }, # / Push }, 'actor': { 'type': 'object', 'properties': { - 'username': { + 'account_id': { + 'type': 'string', + }, + 'display_name': { 'type': 'string', }, 'links': { 'type': 'object', 'properties': { - 'html': { - 'type': 'object', - 'properties': { - 'href': { - 'type': 'string', - }, - }, - 'required': ['href'], - }, 'avatar': { 'type': 'object', 'properties': { @@ -149,13 +124,12 @@ BITBUCKET_WEBHOOK_PAYLOAD_SCHEMA = { 'required': ['href'], }, }, - 'required': ['html', 'avatar'], + 'required': ['avatar'], }, }, - 'required': ['username'], - }, + }, # /Actor 'required': ['push', 'repository'], -} +} # /Root BITBUCKET_COMMIT_INFO_SCHEMA = { 'type': 'object', @@ -203,8 +177,7 @@ def get_transformed_commit_info(bb_commit, ref, default_branch, repository_name, author = lookup_author(match.group(1)) author_info = JSONPathDict(author) if author is not None else None if author_info: - config['commit_info.author.username'] = author_info['user.username'] - config['commit_info.author.url'] = 'https://bitbucket.org/%s/' % author_info['user.username'] + config['commit_info.author.username'] = author_info['user.display_name'] config['commit_info.author.avatar_url'] = author_info['user.avatar'] return config.dict_value() @@ -217,16 +190,17 @@ def get_transformed_webhook_payload(bb_payload, default_branch=None): try: validate(bb_payload, BITBUCKET_WEBHOOK_PAYLOAD_SCHEMA) except Exception as exc: - logger.exception('Exception when validating Bitbucket webhook payload: %s from %s', exc.message, bb_payload) + logger.exception('Exception when validating Bitbucket webhook payload: %s from %s', exc.message, + bb_payload) raise InvalidPayloadException(exc.message) payload = JSONPathDict(bb_payload) change = payload['push.changes[-1].new'] if not change: - return None + raise SkipRequestException - ref = ('refs/heads/' + change['name'] if change['type'] == 'branch' - else 'refs/tags/' + change['name']) + is_branch = change['type'] == 'branch' + ref = 'refs/heads/' + change['name'] if is_branch else 'refs/tags/' + change['name'] repository_name = payload['repository.full_name'] target = change['target'] @@ -237,16 +211,14 @@ def get_transformed_webhook_payload(bb_payload, default_branch=None): config['default_branch'] = default_branch config['git_url'] = 'git@bitbucket.org:%s.git' % repository_name - config['commit_info.url'] = target['links.html.href'] + config['commit_info.url'] = target['links.html.href'] or '' config['commit_info.message'] = target['message'] config['commit_info.date'] = target['date'] - config['commit_info.author.username'] = target['author.user.username'] - config['commit_info.author.url'] = target['author.user.links.html.href'] + config['commit_info.author.username'] = target['author.user.display_name'] config['commit_info.author.avatar_url'] = target['author.user.links.avatar.href'] - config['commit_info.committer.username'] = payload['actor.username'] - config['commit_info.committer.url'] = payload['actor.links.html.href'] + config['commit_info.committer.username'] = payload['actor.display_name'] config['commit_info.committer.avatar_url'] = payload['actor.links.avatar.href'] return config.dict_value() @@ -324,8 +296,8 @@ class BitbucketBuildTrigger(BuildTriggerHandler): if not result: return False - username = data['user']['username'] - self.put_config_key('username', username) + self.put_config_key('account_id', data['user']['account_id']) + self.put_config_key('nickname', data['user']['nickname']) return True def is_active(self): @@ -390,7 +362,7 @@ class BitbucketBuildTrigger(BuildTriggerHandler): return config - def list_build_sources(self): + def list_build_source_namespaces(self): bitbucket_client = self._get_authorized_client() (result, data, err_msg) = bitbucket_client.get_visible_repositories() if not result: @@ -398,22 +370,43 @@ class BitbucketBuildTrigger(BuildTriggerHandler): namespaces = {} for repo in data: - if not repo['scm'] == 'git': - continue - owner = repo['owner'] - if not owner in namespaces: + + if owner in namespaces: + namespaces[owner]['score'] = namespaces[owner]['score'] + 1 + else: namespaces[owner] = { - 'personal': owner == self.config.get('username'), - 'repos': [], - 'info': { - 'name': owner - } + 'personal': owner == self.config.get('nickname', self.config.get('username')), + 'id': owner, + 'title': owner, + 'avatar_url': repo['logo'], + 'url': 'https://bitbucket.org/%s' % (owner), + 'score': 1, } - namespaces[owner]['repos'].append(owner + '/' + repo['slug']) + return BuildTriggerHandler.build_namespaces_response(namespaces) - return namespaces.values() + def list_build_sources_for_namespace(self, namespace): + def repo_view(repo): + last_modified = dateutil.parser.parse(repo['utc_last_updated']) + + return { + 'name': repo['slug'], + 'full_name': '%s/%s' % (repo['owner'], repo['slug']), + 'description': repo['description'] or '', + 'last_updated': timegm(last_modified.utctimetuple()), + 'url': 'https://bitbucket.org/%s/%s' % (repo['owner'], repo['slug']), + 'has_admin_permissions': repo['read_only'] is False, + 'private': repo['is_private'], + } + + bitbucket_client = self._get_authorized_client() + (result, data, err_msg) = bitbucket_client.get_visible_repositories() + if not result: + raise RepositoryReadException('Could not read repository list: ' + err_msg) + + repos = [repo_view(repo) for repo in data if repo['owner'] == namespace] + return BuildTriggerHandler.build_sources_response(repos) def list_build_subdirs(self): config = self.config @@ -430,10 +423,7 @@ class BitbucketBuildTrigger(BuildTriggerHandler): raise RepositoryReadException(err_msg) files = set([f['path'] for f in data['files']]) - if 'Dockerfile' in files: - return ['/'] - - return [] + return ["/" + file_path for file_path in files if self.filename_is_dockerfile(os.path.basename(file_path))] def load_dockerfile_contents(self): repository = self._get_repository_client() @@ -441,11 +431,14 @@ class BitbucketBuildTrigger(BuildTriggerHandler): (result, data, err_msg) = repository.get_raw_path_contents(path, revision='master') if not result: - raise RepositoryReadException(err_msg) + return None return data def list_field_values(self, field_name, limit=None): + if 'build_source' not in self.config: + return None + source = self.config['build_source'] (namespace, name) = source.split('/') @@ -494,6 +487,9 @@ class BitbucketBuildTrigger(BuildTriggerHandler): def handle_trigger_request(self, request): payload = request.get_json() + if payload is None: + raise InvalidPayloadException('Missing payload') + logger.debug('Got BitBucket request: %s', payload) repository = self._get_repository_client() @@ -513,19 +509,19 @@ class BitbucketBuildTrigger(BuildTriggerHandler): def get_branch_sha(branch_name): # Lookup the commit SHA for the branch. - (result, data, _) = repository.get_branches() - if not result or not branch_name in data: - raise TriggerStartException('Could not find branch commit SHA') + (result, data, _) = repository.get_branch(branch_name) + if not result: + raise TriggerStartException('Could not find branch in repository') - return data[branch_name]['node'] + return data['target']['hash'] def get_tag_sha(tag_name): # Lookup the commit SHA for the tag. - (result, data, _) = repository.get_tags() - if not result or not tag_name in data: - raise TriggerStartException('Could not find tag commit SHA') + (result, data, _) = repository.get_tag(tag_name) + if not result: + raise TriggerStartException('Could not find tag in repository') - return data[tag_name]['node'] + return data['target']['hash'] def lookup_author(email_address): (result, data, _) = bitbucket_client.accounts().get_profile(email_address) diff --git a/buildtrigger/customhandler.py b/buildtrigger/customhandler.py index b3b1b01ba..193445ee2 100644 --- a/buildtrigger/customhandler.py +++ b/buildtrigger/customhandler.py @@ -16,9 +16,6 @@ from buildtrigger.bitbuckethandler import (BITBUCKET_WEBHOOK_PAYLOAD_SCHEMA as b from buildtrigger.githubhandler import (GITHUB_WEBHOOK_PAYLOAD_SCHEMA as gh_schema, get_transformed_webhook_payload as gh_payload) -from buildtrigger.bitbuckethandler import (BITBUCKET_WEBHOOK_PAYLOAD_SCHEMA as bb_schema, - get_transformed_webhook_payload as bb_payload) - from buildtrigger.gitlabhandler import (GITLAB_WEBHOOK_PAYLOAD_SCHEMA as gl_schema, get_transformed_webhook_payload as gl_payload) @@ -162,7 +159,7 @@ class CustomBuildTrigger(BuildTriggerHandler): def handle_trigger_request(self, request): payload = request.data if not payload: - raise InvalidPayloadException() + raise InvalidPayloadException('Missing expected payload') logger.debug('Payload %s', payload) @@ -186,7 +183,10 @@ class CustomBuildTrigger(BuildTriggerHandler): 'git_url': config['build_source'], } - return self.prepare_build(metadata, is_manual=True) + try: + return self.prepare_build(metadata, is_manual=True) + except ValidationError as ve: + raise TriggerStartException(ve.message) def activate(self, standard_webhook_url): config = self.config @@ -212,3 +212,18 @@ class CustomBuildTrigger(BuildTriggerHandler): def get_repository_url(self): return None + + def list_build_source_namespaces(self): + raise NotImplementedError + + def list_build_sources_for_namespace(self, namespace): + raise NotImplementedError + + def list_build_subdirs(self): + raise NotImplementedError + + def list_field_values(self, field_name, limit=None): + raise NotImplementedError + + def load_dockerfile_contents(self): + raise NotImplementedError diff --git a/buildtrigger/githubhandler.py b/buildtrigger/githubhandler.py index 12bb3dc7f..bc40f993c 100644 --- a/buildtrigger/githubhandler.py +++ b/buildtrigger/githubhandler.py @@ -1,25 +1,29 @@ import logging import os.path import base64 +import re + +from calendar import timegm +from functools import wraps +from ssl import SSLError + +from github import (Github, UnknownObjectException, GithubException, + BadCredentialsException as GitHubBadCredentialsException) -from app import app, github_trigger from jsonschema import validate +from app import app, github_trigger from buildtrigger.triggerutil import (RepositoryReadException, TriggerActivationException, TriggerDeactivationException, TriggerStartException, EmptyRepositoryException, ValidationRequestException, SkipRequestException, InvalidPayloadException, determine_build_ref, raise_if_skipped_build, find_matching_branches) - from buildtrigger.basehandler import BuildTriggerHandler - +from endpoints.exception import ExternalServiceError from util.security.ssh import generate_ssh_keypair from util.dict_wrappers import JSONPathDict, SafeDictSetter -from github import (Github, UnknownObjectException, GithubException, - BadCredentialsException as GitHubBadCredentialsException) - logger = logging.getLogger(__name__) GITHUB_WEBHOOK_PAYLOAD_SCHEMA = { @@ -29,7 +33,7 @@ GITHUB_WEBHOOK_PAYLOAD_SCHEMA = { 'type': 'string', }, 'head_commit': { - 'type': 'object', + 'type': ['object', 'null'], 'properties': { 'id': { 'type': 'string', @@ -98,10 +102,13 @@ def get_transformed_webhook_payload(gh_payload, default_branch=None, lookup_user payload = JSONPathDict(gh_payload) + if payload['head_commit'] is None: + raise SkipRequestException + config = SafeDictSetter() config['commit'] = payload['head_commit.id'] config['ref'] = payload['ref'] - config['default_branch'] = default_branch + config['default_branch'] = payload['repository.default_branch'] or default_branch config['git_url'] = payload['repository.ssh_url'] config['commit_info.url'] = payload['head_commit.url'] @@ -136,6 +143,18 @@ def get_transformed_webhook_payload(gh_payload, default_branch=None, lookup_user return config.dict_value() +def _catch_ssl_errors(func): + @wraps(func) + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except SSLError as se: + msg = 'Request to the GitHub API failed: %s' % se.message + logger.exception(msg) + raise ExternalServiceError(msg) + return wrapper + + class GithubBuildTrigger(BuildTriggerHandler): """ BuildTrigger for GitHub that uses the archive API and buildpacks. @@ -166,6 +185,7 @@ class GithubBuildTrigger(BuildTriggerHandler): return default_msg + @_catch_ssl_errors def activate(self, standard_webhook_url): config = self.config new_build_source = config['build_source'] @@ -213,6 +233,7 @@ class GithubBuildTrigger(BuildTriggerHandler): return config, {'private_key': private_key} + @_catch_ssl_errors def deactivate(self): config = self.config gh_client = self._get_client() @@ -241,68 +262,84 @@ class GithubBuildTrigger(BuildTriggerHandler): raise TriggerDeactivationException(msg) # Remove the webhook. - try: - hook = repo.get_hook(config['hook_id']) - hook.delete() - except GithubException as ghe: - default_msg = 'Unable to remove hook: %s' % config['hook_id'] - msg = GithubBuildTrigger._get_error_message(ghe, default_msg) - raise TriggerDeactivationException(msg) + if 'hook_id' in config: + try: + hook = repo.get_hook(config['hook_id']) + hook.delete() + except GithubException as ghe: + default_msg = 'Unable to remove hook: %s' % config['hook_id'] + msg = GithubBuildTrigger._get_error_message(ghe, default_msg) + raise TriggerDeactivationException(msg) config.pop('hook_id', None) self.config = config return config - def list_build_sources(self): + @_catch_ssl_errors + def list_build_source_namespaces(self): gh_client = self._get_client() usr = gh_client.get_user() - try: - repos = usr.get_repos() - except GithubException: - raise RepositoryReadException('Unable to list user repositories') - + # Build the full set of namespaces for the user, starting with their own. namespaces = {} - has_non_personal = False + namespaces[usr.login] = { + 'personal': True, + 'id': usr.login, + 'title': usr.name or usr.login, + 'avatar_url': usr.avatar_url, + 'url': usr.html_url, + 'score': usr.plan.private_repos if usr.plan else 0, + } - for repository in repos: - namespace = repository.owner.login - if not namespace in namespaces: - is_personal_repo = namespace == usr.login - namespaces[namespace] = { - 'personal': is_personal_repo, - 'repos': [], - 'info': { - 'name': namespace, - 'avatar_url': repository.owner.avatar_url - } - } + for org in usr.get_orgs(): + organization = org.login if org.login else org.name - if not is_personal_repo: - has_non_personal = True + # NOTE: We don't load the organization's html_url nor its plan, because doing + # so requires loading *each organization* via its own API call in this tight + # loop, which was massively slowing down the load time for users when setting + # up triggers. + namespaces[organization] = { + 'personal': False, + 'id': organization, + 'title': organization, + 'avatar_url': org.avatar_url, + 'url': '', + 'score': 0, + } - namespaces[namespace]['repos'].append(repository.full_name) + return BuildTriggerHandler.build_namespaces_response(namespaces) - # In older versions of GitHub Enterprise, the get_repos call above does not - # return any non-personal repositories. In that case, we need to lookup the - # repositories manually. - # TODO: Remove this once we no longer support GHE versions <= 2.1 - if not has_non_personal: - for org in usr.get_orgs(): - repo_list = [repo.full_name for repo in org.get_repos(type='member')] - namespaces[org.name] = { - 'personal': False, - 'repos': repo_list, - 'info': { - 'name': org.name or org.login, - 'avatar_url': org.avatar_url - } - } + @_catch_ssl_errors + def list_build_sources_for_namespace(self, namespace): + def repo_view(repo): + return { + 'name': repo.name, + 'full_name': repo.full_name, + 'description': repo.description or '', + 'last_updated': timegm(repo.pushed_at.utctimetuple()) if repo.pushed_at else 0, + 'url': repo.html_url, + 'has_admin_permissions': repo.permissions.admin, + 'private': repo.private, + } - entries = list(namespaces.values()) - entries.sort(key=lambda e: e['info']['name']) - return entries + gh_client = self._get_client() + usr = gh_client.get_user() + if namespace == usr.login: + repos = [repo_view(repo) for repo in usr.get_repos(type='owner', sort='updated')] + return BuildTriggerHandler.build_sources_response(repos) + try: + org = gh_client.get_organization(namespace) + if org is None: + return [] + except GithubException: + return [] + + repos = [repo_view(repo) for repo in org.get_repos(type='member')] + return BuildTriggerHandler.build_sources_response(repos) + + + @_catch_ssl_errors def list_build_subdirs(self): config = self.config gh_client = self._get_client() @@ -318,9 +355,8 @@ class GithubBuildTrigger(BuildTriggerHandler): default_commit = repo.get_branch(branches[0]).commit commit_tree = repo.get_git_tree(default_commit.sha, recursive=True) - return [os.path.dirname(elem.path) for elem in commit_tree.tree - if (elem.type == u'blob' and - os.path.basename(elem.path) == u'Dockerfile')] + return [elem.path for elem in commit_tree.tree + if (elem.type == u'blob' and self.filename_is_dockerfile(os.path.basename(elem.path)))] except GithubException as ghe: message = ghe.data.get('message', 'Unable to list contents of repository: %s' % source) if message == 'Branch not found': @@ -328,27 +364,41 @@ class GithubBuildTrigger(BuildTriggerHandler): raise RepositoryReadException(message) + @_catch_ssl_errors def load_dockerfile_contents(self): config = self.config gh_client = self._get_client() - source = config['build_source'] - path = self.get_dockerfile_path() + try: repo = gh_client.get_repo(source) - file_info = repo.get_file_contents(path) - if file_info is None: - return None - - content = file_info.content - if file_info.encoding == 'base64': - content = base64.b64decode(content) - return content - except GithubException as ghe: - message = ghe.data.get('message', 'Unable to read Dockerfile: %s' % source) + message = ghe.data.get('message', 'Unable to list contents of repository: %s' % source) raise RepositoryReadException(message) + path = self.get_dockerfile_path() + if not path: + return None + + try: + file_info = repo.get_contents(path) + # TypeError is needed because directory inputs cause a TypeError + except (GithubException, TypeError) as ghe: + logger.error("got error from trying to find github file %s" % ghe) + return None + + if file_info is None: + return None + + if isinstance(file_info, list): + return None + + content = file_info.content + if file_info.encoding == 'base64': + content = base64.b64decode(content) + return content + + @_catch_ssl_errors def list_field_values(self, field_name, limit=None): if field_name == 'refs': branches = self.list_field_values('branch_name') @@ -358,10 +408,13 @@ class GithubBuildTrigger(BuildTriggerHandler): [{'kind': 'tag', 'name': tag} for tag in tags]) config = self.config + source = config.get('build_source') + if source is None: + return [] + if field_name == 'tag_name': try: gh_client = self._get_client() - source = config['build_source'] repo = gh_client.get_repo(source) gh_tags = repo.get_tags() if limit: @@ -378,7 +431,6 @@ class GithubBuildTrigger(BuildTriggerHandler): if field_name == 'branch_name': try: gh_client = self._get_client() - source = config['build_source'] repo = gh_client.get_repo(source) gh_branches = repo.get_branches() if limit: @@ -439,6 +491,7 @@ class GithubBuildTrigger(BuildTriggerHandler): 'commit_info': commit_info } + @_catch_ssl_errors def manual_start(self, run_parameters=None): config = self.config source = config['build_source'] @@ -452,8 +505,11 @@ class GithubBuildTrigger(BuildTriggerHandler): raise TriggerStartException(msg) def get_branch_sha(branch_name): - branch = repo.get_branch(branch_name) - return branch.commit.sha + try: + branch = repo.get_branch(branch_name) + return branch.commit.sha + except GithubException: + raise TriggerStartException('Could not find branch in repository') def get_tag_sha(tag_name): tags = {tag.name: tag for tag in repo.get_tags()} @@ -469,6 +525,7 @@ class GithubBuildTrigger(BuildTriggerHandler): metadata = GithubBuildTrigger._build_metadata_for_commit(commit_sha, ref, repo) return self.prepare_build(metadata, is_manual=True) + @_catch_ssl_errors def lookup_user(self, username): try: gh_client = self._get_client() @@ -480,15 +537,30 @@ class GithubBuildTrigger(BuildTriggerHandler): except GithubException: return None + @_catch_ssl_errors def handle_trigger_request(self, request): # Check the payload to see if we should skip it based on the lack of a head_commit. payload = request.get_json() + if payload is None: + raise InvalidPayloadException('Missing payload') # This is for GitHub's probing/testing. if 'zen' in payload: - raise ValidationRequestException() + raise SkipRequestException() # Lookup the default branch for the repository. + if 'repository' not in payload: + raise InvalidPayloadException("Missing 'repository' on request") + + if 'owner' not in payload['repository']: + raise InvalidPayloadException("Missing 'owner' on repository") + + if 'name' not in payload['repository']['owner']: + raise InvalidPayloadException("Missing owner 'name' on repository") + + if 'name' not in payload['repository']: + raise InvalidPayloadException("Missing 'name' on repository") + default_branch = None lookup_user = None try: @@ -507,7 +579,7 @@ class GithubBuildTrigger(BuildTriggerHandler): logger.debug('GitHub trigger payload %s', payload) metadata = get_transformed_webhook_payload(payload, default_branch=default_branch, - lookup_user=lookup_user) + lookup_user=lookup_user) prepared = self.prepare_build(metadata) # Check if we should skip this build. diff --git a/buildtrigger/gitlabhandler.py b/buildtrigger/gitlabhandler.py index 0d45ea2ba..9ed3e91d0 100644 --- a/buildtrigger/gitlabhandler.py +++ b/buildtrigger/gitlabhandler.py @@ -1,25 +1,26 @@ +import os.path import logging +from calendar import timegm from functools import wraps -from urlparse import urljoin -from app import app, gitlab_trigger +import dateutil.parser +import gitlab +import requests from jsonschema import validate + +from app import app, gitlab_trigger from buildtrigger.triggerutil import (RepositoryReadException, TriggerActivationException, TriggerDeactivationException, TriggerStartException, SkipRequestException, InvalidPayloadException, + TriggerAuthException, determine_build_ref, raise_if_skipped_build, find_matching_branches) - from buildtrigger.basehandler import BuildTriggerHandler - +from endpoints.exception import ExternalServiceError from util.security.ssh import generate_ssh_keypair from util.dict_wrappers import JSONPathDict, SafeDictSetter -from endpoints.exception import ExternalServiceTimeout - -import gitlab -import requests logger = logging.getLogger(__name__) @@ -30,7 +31,7 @@ GITLAB_WEBHOOK_PAYLOAD_SCHEMA = { 'type': 'string', }, 'checkout_sha': { - 'type': 'string', + 'type': ['string', 'null'], }, 'repository': { 'type': 'object', @@ -46,9 +47,12 @@ GITLAB_WEBHOOK_PAYLOAD_SCHEMA = { 'items': { 'type': 'object', 'properties': { - 'url': { + 'id': { 'type': 'string', }, + 'url': { + 'type': ['string', 'null'], + }, 'message': { 'type': 'string', }, @@ -65,15 +69,25 @@ GITLAB_WEBHOOK_PAYLOAD_SCHEMA = { 'required': ['email'], }, }, - 'required': ['url', 'message', 'timestamp'], + 'required': ['id', 'message', 'timestamp'], }, - 'minItems': 1, - } + }, }, 'required': ['ref', 'checkout_sha', 'repository'], } -def _catch_timeouts(func): +_ACCESS_LEVEL_MAP = { + 50: ("owner", True), + 40: ("master", True), + 30: ("developer", False), + 20: ("reporter", False), + 10: ("guest", False), +} + +_PER_PAGE_COUNT = 20 + + +def _catch_timeouts_and_errors(func): @wraps(func) def wrapper(*args, **kwargs): try: @@ -81,11 +95,37 @@ def _catch_timeouts(func): except requests.exceptions.Timeout: msg = 'Request to the GitLab API timed out' logger.exception(msg) - raise ExternalServiceTimeout(msg) + raise ExternalServiceError(msg) + except gitlab.GitlabError: + msg = 'GitLab API error. Please contact support.' + logger.exception(msg) + raise ExternalServiceError(msg) return wrapper -def get_transformed_webhook_payload(gl_payload, default_branch=None, lookup_user=None): +def _paginated_iterator(func, exc, **kwargs): + """ Returns an iterator over invocations of the given function, automatically handling + pagination. + """ + page = 1 + while True: + result = func(page=page, per_page=_PER_PAGE_COUNT, **kwargs) + if result is None or result is False: + raise exc + + counter = 0 + for item in result: + yield item + counter = counter + 1 + + if counter < _PER_PAGE_COUNT: + break + + page = page + 1 + + +def get_transformed_webhook_payload(gl_payload, default_branch=None, lookup_user=None, + lookup_commit=None): """ Returns the Gitlab webhook JSON payload transformed into our own payload format. If the gl_payload is not valid, returns None. """ @@ -96,19 +136,52 @@ def get_transformed_webhook_payload(gl_payload, default_branch=None, lookup_user payload = JSONPathDict(gl_payload) + if payload['object_kind'] != 'push' and payload['object_kind'] != 'tag_push': + # Unknown kind of webhook. + raise SkipRequestException + + # Check for empty commits. The commits list will be empty if the branch is deleted. + commits = payload['commits'] + if payload['object_kind'] == 'push' and not commits: + raise SkipRequestException + + # Check for missing commit information. + commit_sha = payload['checkout_sha'] or payload['after'] + if commit_sha is None or commit_sha == '0000000000000000000000000000000000000000': + raise SkipRequestException + config = SafeDictSetter() - config['commit'] = payload['checkout_sha'] + config['commit'] = commit_sha config['ref'] = payload['ref'] config['default_branch'] = default_branch config['git_url'] = payload['repository.git_ssh_url'] - config['commit_info.url'] = payload['commits[0].url'] - config['commit_info.message'] = payload['commits[0].message'] - config['commit_info.date'] = payload['commits[0].timestamp'] + found_commit = JSONPathDict({}) + if payload['object_kind'] == 'push' or payload['object_kind'] == 'tag_push': + # Find the commit associated with the checkout_sha. Gitlab doesn't (necessary) send this in + # any order, so we cannot simply index into the commits list. + found_commit = None + if commits is not None: + for commit in commits: + if commit['id'] == payload['checkout_sha']: + found_commit = JSONPathDict(commit) + break + + if found_commit is None and lookup_commit: + checkout_sha = payload['checkout_sha'] or payload['after'] + found_commit_info = lookup_commit(payload['project_id'], checkout_sha) + found_commit = JSONPathDict(dict(found_commit_info) if found_commit_info else {}) + + if found_commit is None: + raise SkipRequestException + + config['commit_info.url'] = found_commit['url'] + config['commit_info.message'] = found_commit['message'] + config['commit_info.date'] = found_commit['timestamp'] # Note: Gitlab does not send full user information with the payload, so we have to # (optionally) look it up. - author_email = payload['commits[0].author.email'] + author_email = found_commit['author.email'] or found_commit['author_email'] if lookup_user and author_email: author_info = lookup_user(author_email) if author_info: @@ -129,20 +202,28 @@ class GitLabBuildTrigger(BuildTriggerHandler): def _get_authorized_client(self): auth_token = self.auth_token or 'invalid' - return gitlab.Gitlab(gitlab_trigger.api_endpoint(), oauth_token=auth_token, timeout=5) + api_version = self.config.get('API_VERSION', '4') + client = gitlab.Gitlab(gitlab_trigger.api_endpoint(), oauth_token=auth_token, timeout=20, + api_version=api_version) + try: + client.auth() + except gitlab.GitlabGetError as ex: + raise TriggerAuthException(ex.message) + + return client def is_active(self): return 'hook_id' in self.config - @_catch_timeouts + @_catch_timeouts_and_errors def activate(self, standard_webhook_url): config = self.config new_build_source = config['build_source'] gl_client = self._get_authorized_client() # Find the GitLab repository. - repository = gl_client.getproject(new_build_source) - if repository is False: + gl_project = gl_client.projects.get(new_build_source) + if not gl_project: msg = 'Unable to find GitLab repository for source: %s' % new_build_source raise TriggerActivationException(msg) @@ -154,20 +235,31 @@ class GitLabBuildTrigger(BuildTriggerHandler): 'value': public_key, }, ] - key = gl_client.adddeploykey(repository['id'], '%s Builder' % app.config['REGISTRY_TITLE'], - public_key) - if key is False: + + key = gl_project.keys.create({ + 'title': '%s Builder' % app.config['REGISTRY_TITLE'], + 'key': public_key, + }) + + if not key: msg = 'Unable to add deploy key to repository: %s' % new_build_source raise TriggerActivationException(msg) - config['key_id'] = key['id'] + + config['key_id'] = key.get_id() # Add the webhook to the GitLab repository. - hook = gl_client.addprojecthook(repository['id'], standard_webhook_url, push=True) - if hook is False: + hook = gl_project.hooks.create({ + 'url': standard_webhook_url, + 'push': True, + 'tag_push': True, + 'push_events': True, + 'tag_push_events': True, + }) + if not hook: msg = 'Unable to create webhook on repository: %s' % new_build_source raise TriggerActivationException(msg) - config['hook_id'] = hook['id'] + config['hook_id'] = hook.get_id() self.config = config return config, {'private_key': private_key} @@ -176,94 +268,169 @@ class GitLabBuildTrigger(BuildTriggerHandler): gl_client = self._get_authorized_client() # Find the GitLab repository. - repository = gl_client.getproject(config['build_source']) - if repository is False: - msg = 'Unable to find GitLab repository for source: %s' % config['build_source'] - raise TriggerDeactivationException(msg) + try: + gl_project = gl_client.projects.get(config['build_source']) + if not gl_project: + config.pop('key_id', None) + config.pop('hook_id', None) + self.config = config + return config + except gitlab.GitlabGetError as ex: + if ex.response_code != 404: + raise # Remove the webhook. - success = gl_client.deleteprojecthook(repository['id'], config['hook_id']) - if success is False: - msg = 'Unable to remove hook: %s' % config['hook_id'] - raise TriggerDeactivationException(msg) + try: + gl_project.hooks.delete(config['hook_id']) + except gitlab.GitlabDeleteError as ex: + if ex.response_code != 404: + raise + config.pop('hook_id', None) # Remove the key - success = gl_client.deletedeploykey(repository['id'], config['key_id']) - if success is False: - msg = 'Unable to remove deploy key: %s' % config['key_id'] - raise TriggerDeactivationException(msg) + try: + gl_project.keys.delete(config['key_id']) + except gitlab.GitlabDeleteError as ex: + if ex.response_code != 404: + raise + config.pop('key_id', None) self.config = config - return config - @_catch_timeouts - def list_build_sources(self): + @_catch_timeouts_and_errors + def list_build_source_namespaces(self): gl_client = self._get_authorized_client() - current_user = gl_client.currentuser() - if current_user is False: + current_user = gl_client.user + if not current_user: raise RepositoryReadException('Unable to get current user') - repositories = gl_client.getprojects() - if repositories is False: - raise RepositoryReadException('Unable to list user repositories') - namespaces = {} - for repo in repositories: - owner = repo['namespace']['name'] - if not owner in namespaces: - namespaces[owner] = { - 'personal': owner == current_user['username'], - 'repos': [], - 'info': { - 'name': owner, - } + for namespace in _paginated_iterator(gl_client.namespaces.list, RepositoryReadException): + namespace_id = namespace.get_id() + if namespace_id in namespaces: + namespaces[namespace_id]['score'] = namespaces[namespace_id]['score'] + 1 + else: + owner = namespace.attributes['name'] + namespaces[namespace_id] = { + 'personal': namespace.attributes['kind'] == 'user', + 'id': str(namespace_id), + 'title': namespace.attributes['name'], + 'avatar_url': namespace.attributes.get('avatar_url'), + 'score': 1, + 'url': namespace.attributes.get('web_url') or '', } - namespaces[owner]['repos'].append(repo['path_with_namespace']) + return BuildTriggerHandler.build_namespaces_response(namespaces) - return namespaces.values() + def _get_namespace(self, gl_client, gl_namespace, lazy=False): + try: + if gl_namespace.attributes['kind'] == 'group': + return gl_client.groups.get(gl_namespace.attributes['id'], lazy=lazy) - @_catch_timeouts + if gl_namespace.attributes['kind'] == 'user': + return gl_client.users.get(gl_client.user.attributes['id'], lazy=lazy) + + # Note: This doesn't seem to work for IDs retrieved via the namespaces API; the IDs are + # different. + return gl_client.users.get(gl_namespace.attributes['id'], lazy=lazy) + except gitlab.GitlabGetError: + return None + + @_catch_timeouts_and_errors + def list_build_sources_for_namespace(self, namespace_id): + if not namespace_id: + return [] + + def repo_view(repo): + # Because *anything* can be None in GitLab API! + permissions = repo.attributes.get('permissions') or {} + group_access = permissions.get('group_access') or {} + project_access = permissions.get('project_access') or {} + + missing_group_access = permissions.get('group_access') is None + missing_project_access = permissions.get('project_access') is None + + access_level = max(group_access.get('access_level') or 0, + project_access.get('access_level') or 0) + + has_admin_permission = _ACCESS_LEVEL_MAP.get(access_level, ("", False))[1] + if missing_group_access or missing_project_access: + # Default to has permission if we cannot check the permissions. This will allow our users + # to select the repository and then GitLab's own checks will ensure that the webhook is + # added only if allowed. + # TODO: Do we want to display this differently in the UI? + has_admin_permission = True + + view = { + 'name': repo.attributes['path'], + 'full_name': repo.attributes['path_with_namespace'], + 'description': repo.attributes.get('description') or '', + 'url': repo.attributes.get('web_url'), + 'has_admin_permissions': has_admin_permission, + 'private': repo.attributes.get('visibility') == 'private', + } + + if repo.attributes.get('last_activity_at'): + try: + last_modified = dateutil.parser.parse(repo.attributes['last_activity_at']) + view['last_updated'] = timegm(last_modified.utctimetuple()) + except ValueError: + logger.exception('Gitlab gave us an invalid last_activity_at: %s', last_modified) + + return view + + gl_client = self._get_authorized_client() + + try: + gl_namespace = gl_client.namespaces.get(namespace_id) + except gitlab.GitlabGetError: + return [] + + namespace_obj = self._get_namespace(gl_client, gl_namespace, lazy=True) + repositories = _paginated_iterator(namespace_obj.projects.list, RepositoryReadException) + + try: + return BuildTriggerHandler.build_sources_response([repo_view(repo) for repo in repositories]) + except gitlab.GitlabGetError: + return [] + + @_catch_timeouts_and_errors def list_build_subdirs(self): config = self.config gl_client = self._get_authorized_client() new_build_source = config['build_source'] - repository = gl_client.getproject(new_build_source) - if repository is False: + gl_project = gl_client.projects.get(new_build_source) + if not gl_project: msg = 'Unable to find GitLab repository for source: %s' % new_build_source raise RepositoryReadException(msg) - repo_branches = gl_client.getbranches(repository['id']) - if repo_branches is False: + repo_branches = gl_project.branches.list() + if not repo_branches: msg = 'Unable to find GitLab branches for source: %s' % new_build_source raise RepositoryReadException(msg) - branches = [branch['name'] for branch in repo_branches] + branches = [branch.attributes['name'] for branch in repo_branches] branches = find_matching_branches(config, branches) - branches = branches or [repository['default_branch'] or 'master'] + branches = branches or [gl_project.attributes['default_branch'] or 'master'] - repo_tree = gl_client.getrepositorytree(repository['id'], ref_name=branches[0]) - if repo_tree is False: + repo_tree = gl_project.repository_tree(ref=branches[0]) + if not repo_tree: msg = 'Unable to find GitLab repository tree for source: %s' % new_build_source raise RepositoryReadException(msg) - for node in repo_tree: - if node['name'] == 'Dockerfile': - return ['/'] + return [node['name'] for node in repo_tree if self.filename_is_dockerfile(node['name'])] - return [] - - @_catch_timeouts + @_catch_timeouts_and_errors def load_dockerfile_contents(self): gl_client = self._get_authorized_client() path = self.get_dockerfile_path() - repository = gl_client.getproject(self.config['build_source']) - if repository is False: + gl_project = gl_client.projects.get(self.config['build_source']) + if not gl_project: return None branches = self.list_field_values('branch_name') @@ -272,16 +439,15 @@ class GitLabBuildTrigger(BuildTriggerHandler): return None branch_name = branches[0] - if repository['default_branch'] in branches: - branch_name = repository['default_branch'] + if gl_project.attributes['default_branch'] in branches: + branch_name = gl_project.attributes['default_branch'] - contents = gl_client.getrawfile(repository['id'], branch_name, path) - if contents is False: + try: + return gl_project.files.get(path, branch_name).decode() + except gitlab.GitlabGetError: return None - return contents - - @_catch_timeouts + @_catch_timeouts_and_errors def list_field_values(self, field_name, limit=None): if field_name == 'refs': branches = self.list_field_values('branch_name') @@ -291,140 +457,163 @@ class GitLabBuildTrigger(BuildTriggerHandler): [{'kind': 'tag', 'name': t} for t in tags]) gl_client = self._get_authorized_client() - repo = gl_client.getproject(self.config['build_source']) - if repo is False: + gl_project = gl_client.projects.get(self.config['build_source']) + if not gl_project: return [] if field_name == 'tag_name': - tags = gl_client.getrepositorytags(repo['id']) - if tags is False: + tags = gl_project.tags.list() + if not tags: return [] if limit: tags = tags[0:limit] - return [tag['name'] for tag in tags] + return [tag.attributes['name'] for tag in tags] if field_name == 'branch_name': - branches = gl_client.getbranches(repo['id']) - if branches is False: + branches = gl_project.branches.list() + if not branches: return [] if limit: branches = branches[0:limit] - return [branch['name'] for branch in branches] + return [branch.attributes['name'] for branch in branches] return None def get_repository_url(self): return gitlab_trigger.get_public_url(self.config['build_source']) - @_catch_timeouts + @_catch_timeouts_and_errors + def lookup_commit(self, repo_id, commit_sha): + if repo_id is None: + return None + + gl_client = self._get_authorized_client() + gl_project = gl_client.projects.get(self.config['build_source'], lazy=True) + commit = gl_project.commits.get(commit_sha) + if not commit: + return None + + return commit + + @_catch_timeouts_and_errors def lookup_user(self, email): gl_client = self._get_authorized_client() try: - [user] = gl_client.getusers(search=email) + result = gl_client.users.list(search=email) + if not result: + return None + [user] = result return { - 'username': user['username'], - 'html_url': gl_client.host + '/' + user['username'], - 'avatar_url': user['avatar_url'] + 'username': user.attributes['username'], + 'html_url': user.attributes['web_url'], + 'avatar_url': user.attributes['avatar_url'] } except ValueError: return None - @_catch_timeouts + @_catch_timeouts_and_errors def get_metadata_for_commit(self, commit_sha, ref, repo): - gl_client = self._get_authorized_client() - commit = gl_client.getrepositorycommit(repo['id'], commit_sha) + commit = self.lookup_commit(repo.get_id(), commit_sha) + if commit is None: + return None metadata = { - 'commit': commit['id'], + 'commit': commit.attributes['id'], 'ref': ref, - 'default_branch': repo['default_branch'], - 'git_url': repo['ssh_url_to_repo'], + 'default_branch': repo.attributes['default_branch'], + 'git_url': repo.attributes['ssh_url_to_repo'], 'commit_info': { - 'url': gl_client.host + '/' + repo['path_with_namespace'] + '/commit/' + commit['id'], - 'message': commit['message'], - 'date': commit['committed_date'], + 'url': os.path.join(repo.attributes['web_url'], 'commit', commit.attributes['id']), + 'message': commit.attributes['message'], + 'date': commit.attributes['committed_date'], }, } committer = None - if 'committer_email' in commit: - committer = self.lookup_user(commit['committer_email']) + if 'committer_email' in commit.attributes: + committer = self.lookup_user(commit.attributes['committer_email']) author = None - if 'author_email' in commit: - author = self.lookup_user(commit['author_email']) + if 'author_email' in commit.attributes: + author = self.lookup_user(commit.attributes['author_email']) if committer is not None: metadata['commit_info']['committer'] = { 'username': committer['username'], 'avatar_url': committer['avatar_url'], - 'url': gl_client.host + '/' + committer['username'], + 'url': committer.get('http_url', ''), } if author is not None: metadata['commit_info']['author'] = { 'username': author['username'], 'avatar_url': author['avatar_url'], - 'url': gl_client.host + '/' + author['username'] + 'url': author.get('http_url', ''), } return metadata - @_catch_timeouts + @_catch_timeouts_and_errors def manual_start(self, run_parameters=None): gl_client = self._get_authorized_client() - - repo = gl_client.getproject(self.config['build_source']) - if repo is False: + gl_project = gl_client.projects.get(self.config['build_source']) + if not gl_project: raise TriggerStartException('Could not find repository') def get_tag_sha(tag_name): - tags = gl_client.getrepositorytags(repo['id']) - if tags is False: - raise TriggerStartException('Could not find tags') + try: + tag = gl_project.tags.get(tag_name) + except gitlab.GitlabGetError: + raise TriggerStartException('Could not find tag in repository') - for tag in tags: - if tag['name'] == tag_name: - return tag['commit']['id'] - - raise TriggerStartException('Could not find commit') + return tag.attributes['commit']['id'] def get_branch_sha(branch_name): - branch = gl_client.getbranch(repo['id'], branch_name) - if branch is False: - raise TriggerStartException('Could not find branch') + try: + branch = gl_project.branches.get(branch_name) + except gitlab.GitlabGetError: + raise TriggerStartException('Could not find branch in repository') - return branch['commit']['id'] + return branch.attributes['commit']['id'] # Find the branch or tag to build. (commit_sha, ref) = determine_build_ref(run_parameters, get_branch_sha, get_tag_sha, - repo['default_branch']) + gl_project.attributes['default_branch']) - metadata = self.get_metadata_for_commit(commit_sha, ref, repo) + metadata = self.get_metadata_for_commit(commit_sha, ref, gl_project) return self.prepare_build(metadata, is_manual=True) - @_catch_timeouts + @_catch_timeouts_and_errors def handle_trigger_request(self, request): payload = request.get_json() if not payload: - raise SkipRequestException() - - # Lookup the default branch. - default_branch = None - gl_client = self._get_authorized_client() - repo = gl_client.getproject(self.config['build_source']) - if repo is not False: - default_branch = repo['default_branch'] - lookup_user = self.lookup_user + raise InvalidPayloadException() logger.debug('GitLab trigger payload %s', payload) + + # Lookup the default branch. + gl_client = self._get_authorized_client() + gl_project = gl_client.projects.get(self.config['build_source']) + if not gl_project: + logger.debug('Skipping GitLab build; project %s not found', self.config['build_source']) + raise InvalidPayloadException() + + def lookup_commit(repo_id, commit_sha): + commit = self.lookup_commit(repo_id, commit_sha) + if commit is None: + return None + + return dict(commit.attributes) + + default_branch = gl_project.attributes['default_branch'] metadata = get_transformed_webhook_payload(payload, default_branch=default_branch, - lookup_user=lookup_user) + lookup_user=self.lookup_user, + lookup_commit=lookup_commit) prepared = self.prepare_build(metadata) # Check if we should skip this build. diff --git a/formats/__init__.py b/buildtrigger/test/__init__.py similarity index 100% rename from formats/__init__.py rename to buildtrigger/test/__init__.py diff --git a/buildtrigger/test/bitbucketmock.py b/buildtrigger/test/bitbucketmock.py new file mode 100644 index 000000000..0e5cad97f --- /dev/null +++ b/buildtrigger/test/bitbucketmock.py @@ -0,0 +1,159 @@ +from datetime import datetime +from mock import Mock + +from buildtrigger.bitbuckethandler import BitbucketBuildTrigger +from util.morecollections import AttrDict + +def get_bitbucket_trigger(dockerfile_path=''): + trigger_obj = AttrDict(dict(auth_token='foobar', id='sometrigger')) + trigger = BitbucketBuildTrigger(trigger_obj, { + 'build_source': 'foo/bar', + 'dockerfile_path': dockerfile_path, + 'nickname': 'knownuser', + 'account_id': 'foo', + }) + + trigger._get_client = get_mock_bitbucket + return trigger + +def get_repo_path_contents(path, revision): + data = { + 'files': [{'path': 'Dockerfile'}], + } + + return (True, data, None) + +def get_raw_path_contents(path, revision): + if path == 'Dockerfile': + return (True, 'hello world', None) + + if path == 'somesubdir/Dockerfile': + return (True, 'hi universe', None) + + return (False, None, None) + +def get_branches_and_tags(): + data = { + 'branches': [{'name': 'master'}, {'name': 'otherbranch'}], + 'tags': [{'name': 'sometag'}, {'name': 'someothertag'}], + } + return (True, data, None) + +def get_branches(): + return (True, {'master': {}, 'otherbranch': {}}, None) + +def get_tags(): + return (True, {'sometag': {}, 'someothertag': {}}, None) + +def get_branch(branch_name): + if branch_name != 'master': + return (False, None, None) + + data = { + 'target': { + 'hash': 'aaaaaaa', + }, + } + + return (True, data, None) + +def get_tag(tag_name): + if tag_name != 'sometag': + return (False, None, None) + + data = { + 'target': { + 'hash': 'aaaaaaa', + }, + } + + return (True, data, None) + +def get_changeset_mock(commit_sha): + if commit_sha != 'aaaaaaa': + return (False, None, 'Not found') + + data = { + 'node': 'aaaaaaa', + 'message': 'some message', + 'timestamp': 'now', + 'raw_author': 'foo@bar.com', + } + + return (True, data, None) + +def get_changesets(): + changesets_mock = Mock() + changesets_mock.get = Mock(side_effect=get_changeset_mock) + return changesets_mock + +def get_deploykeys(): + deploykeys_mock = Mock() + deploykeys_mock.create = Mock(return_value=(True, {'pk': 'someprivatekey'}, None)) + deploykeys_mock.delete = Mock(return_value=(True, {}, None)) + return deploykeys_mock + +def get_webhooks(): + webhooks_mock = Mock() + webhooks_mock.create = Mock(return_value=(True, {'uuid': 'someuuid'}, None)) + webhooks_mock.delete = Mock(return_value=(True, {}, None)) + return webhooks_mock + +def get_repo_mock(name): + if name != 'bar': + return None + + repo_mock = Mock() + repo_mock.get_main_branch = Mock(return_value=(True, {'name': 'master'}, None)) + repo_mock.get_path_contents = Mock(side_effect=get_repo_path_contents) + repo_mock.get_raw_path_contents = Mock(side_effect=get_raw_path_contents) + repo_mock.get_branches_and_tags = Mock(side_effect=get_branches_and_tags) + repo_mock.get_branches = Mock(side_effect=get_branches) + repo_mock.get_tags = Mock(side_effect=get_tags) + repo_mock.get_branch = Mock(side_effect=get_branch) + repo_mock.get_tag = Mock(side_effect=get_tag) + + repo_mock.changesets = Mock(side_effect=get_changesets) + repo_mock.deploykeys = Mock(side_effect=get_deploykeys) + repo_mock.webhooks = Mock(side_effect=get_webhooks) + return repo_mock + +def get_repositories_mock(): + repos_mock = Mock() + repos_mock.get = Mock(side_effect=get_repo_mock) + return repos_mock + +def get_namespace_mock(namespace): + namespace_mock = Mock() + namespace_mock.repositories = Mock(side_effect=get_repositories_mock) + return namespace_mock + +def get_repo(namespace, name): + return { + 'owner': namespace, + 'logo': 'avatarurl', + 'slug': name, + 'description': 'some %s repo' % (name), + 'utc_last_updated': str(datetime.utcfromtimestamp(0)), + 'read_only': namespace != 'knownuser', + 'is_private': name == 'somerepo', + } + +def get_visible_repos(): + repos = [ + get_repo('knownuser', 'somerepo'), + get_repo('someorg', 'somerepo'), + get_repo('someorg', 'anotherrepo'), + ] + return (True, repos, None) + +def get_authed_mock(token, secret): + authed_mock = Mock() + authed_mock.for_namespace = Mock(side_effect=get_namespace_mock) + authed_mock.get_visible_repositories = Mock(side_effect=get_visible_repos) + return authed_mock + +def get_mock_bitbucket(): + bitbucket_mock = Mock() + bitbucket_mock.get_authorized_client = Mock(side_effect=get_authed_mock) + return bitbucket_mock diff --git a/buildtrigger/test/githubmock.py b/buildtrigger/test/githubmock.py new file mode 100644 index 000000000..e0f8daffc --- /dev/null +++ b/buildtrigger/test/githubmock.py @@ -0,0 +1,178 @@ +from datetime import datetime +from mock import Mock + +from github import GithubException + +from buildtrigger.githubhandler import GithubBuildTrigger +from util.morecollections import AttrDict + +def get_github_trigger(dockerfile_path=''): + trigger_obj = AttrDict(dict(auth_token='foobar', id='sometrigger')) + trigger = GithubBuildTrigger(trigger_obj, {'build_source': 'foo', 'dockerfile_path': dockerfile_path}) + trigger._get_client = get_mock_github + return trigger + +def get_mock_github(): + def get_commit_mock(commit_sha): + if commit_sha == 'aaaaaaa': + commit_mock = Mock() + commit_mock.sha = commit_sha + commit_mock.html_url = 'http://url/to/commit' + commit_mock.last_modified = 'now' + + commit_mock.commit = Mock() + commit_mock.commit.message = 'some cool message' + + commit_mock.committer = Mock() + commit_mock.committer.login = 'someuser' + commit_mock.committer.avatar_url = 'avatarurl' + commit_mock.committer.html_url = 'htmlurl' + + commit_mock.author = Mock() + commit_mock.author.login = 'someuser' + commit_mock.author.avatar_url = 'avatarurl' + commit_mock.author.html_url = 'htmlurl' + return commit_mock + + raise GithubException(None, None) + + def get_branch_mock(branch_name): + if branch_name == 'master': + branch_mock = Mock() + branch_mock.commit = Mock() + branch_mock.commit.sha = 'aaaaaaa' + return branch_mock + + raise GithubException(None, None) + + def get_repo_mock(namespace, name): + repo_mock = Mock() + repo_mock.owner = Mock() + repo_mock.owner.login = namespace + + repo_mock.full_name = '%s/%s' % (namespace, name) + repo_mock.name = name + repo_mock.description = 'some %s repo' % (name) + + if name != 'anotherrepo': + repo_mock.pushed_at = datetime.utcfromtimestamp(0) + else: + repo_mock.pushed_at = None + + repo_mock.html_url = 'https://bitbucket.org/%s/%s' % (namespace, name) + repo_mock.private = name == 'somerepo' + repo_mock.permissions = Mock() + repo_mock.permissions.admin = namespace == 'knownuser' + return repo_mock + + def get_user_repos_mock(type='all', sort='created'): + return [get_repo_mock('knownuser', 'somerepo')] + + def get_org_repos_mock(type='all'): + return [get_repo_mock('someorg', 'somerepo'), get_repo_mock('someorg', 'anotherrepo')] + + def get_orgs_mock(): + return [get_org_mock('someorg')] + + def get_user_mock(username='knownuser'): + if username == 'knownuser': + user_mock = Mock() + user_mock.name = username + user_mock.plan = Mock() + user_mock.plan.private_repos = 1 + user_mock.login = username + user_mock.html_url = 'https://bitbucket.org/%s' % (username) + user_mock.avatar_url = 'avatarurl' + user_mock.get_repos = Mock(side_effect=get_user_repos_mock) + user_mock.get_orgs = Mock(side_effect=get_orgs_mock) + return user_mock + + raise GithubException(None, None) + + def get_org_mock(namespace): + if namespace == 'someorg': + org_mock = Mock() + org_mock.get_repos = Mock(side_effect=get_org_repos_mock) + org_mock.login = namespace + org_mock.html_url = 'https://bitbucket.org/%s' % (namespace) + org_mock.avatar_url = 'avatarurl' + org_mock.name = namespace + org_mock.plan = Mock() + org_mock.plan.private_repos = 2 + return org_mock + + raise GithubException(None, None) + + def get_tags_mock(): + sometag = Mock() + sometag.name = 'sometag' + sometag.commit = get_commit_mock('aaaaaaa') + + someothertag = Mock() + someothertag.name = 'someothertag' + someothertag.commit = get_commit_mock('aaaaaaa') + return [sometag, someothertag] + + def get_branches_mock(): + master = Mock() + master.name = 'master' + master.commit = get_commit_mock('aaaaaaa') + + otherbranch = Mock() + otherbranch.name = 'otherbranch' + otherbranch.commit = get_commit_mock('aaaaaaa') + return [master, otherbranch] + + def get_contents_mock(filepath): + if filepath == 'Dockerfile': + m = Mock() + m.content = 'hello world' + return m + + if filepath == 'somesubdir/Dockerfile': + m = Mock() + m.content = 'hi universe' + return m + + raise GithubException(None, None) + + def get_git_tree_mock(commit_sha, recursive=False): + first_file = Mock() + first_file.type = 'blob' + first_file.path = 'Dockerfile' + + second_file = Mock() + second_file.type = 'other' + second_file.path = '/some/Dockerfile' + + third_file = Mock() + third_file.type = 'blob' + third_file.path = 'somesubdir/Dockerfile' + + t = Mock() + + if commit_sha == 'aaaaaaa': + t.tree = [ + first_file, second_file, third_file, + ] + else: + t.tree = [] + + return t + + repo_mock = Mock() + repo_mock.default_branch = 'master' + repo_mock.ssh_url = 'ssh_url' + + repo_mock.get_branch = Mock(side_effect=get_branch_mock) + repo_mock.get_tags = Mock(side_effect=get_tags_mock) + repo_mock.get_branches = Mock(side_effect=get_branches_mock) + repo_mock.get_commit = Mock(side_effect=get_commit_mock) + repo_mock.get_contents = Mock(side_effect=get_contents_mock) + repo_mock.get_git_tree = Mock(side_effect=get_git_tree_mock) + + gh_mock = Mock() + gh_mock.get_repo = Mock(return_value=repo_mock) + gh_mock.get_user = Mock(side_effect=get_user_mock) + gh_mock.get_organization = Mock(side_effect=get_org_mock) + return gh_mock diff --git a/buildtrigger/test/gitlabmock.py b/buildtrigger/test/gitlabmock.py new file mode 100644 index 000000000..cd864241e --- /dev/null +++ b/buildtrigger/test/gitlabmock.py @@ -0,0 +1,598 @@ +import base64 +import json + +from contextlib import contextmanager + +import gitlab + +from httmock import urlmatch, HTTMock + +from buildtrigger.gitlabhandler import GitLabBuildTrigger +from util.morecollections import AttrDict + + +@urlmatch(netloc=r'fakegitlab') +def catchall_handler(url, request): + return {'status_code': 404} + + +@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/users$') +def users_handler(url, request): + if not request.headers.get('Authorization') == 'Bearer foobar': + return {'status_code': 401} + + if url.query.find('knownuser') < 0: + return { + 'status_code': 200, + 'headers': { + 'Content-Type': 'application/json', + }, + 'content': json.dumps([]), + } + + return { + 'status_code': 200, + 'headers': { + 'Content-Type': 'application/json', + }, + 'content': json.dumps([ + { + "id": 1, + "username": "knownuser", + "name": "Known User", + "state": "active", + "avatar_url": "avatarurl", + "web_url": "https://bitbucket.org/knownuser", + }, + ]), + } + + +@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/user$') +def user_handler(_, request): + if not request.headers.get('Authorization') == 'Bearer foobar': + return {'status_code': 401} + + return { + 'status_code': 200, + 'headers': { + 'Content-Type': 'application/json', + }, + 'content': json.dumps({ + "id": 1, + "username": "john_smith", + "email": "john@example.com", + "name": "John Smith", + "state": "active", + }), + } + + +@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/foo%2Fbar$') +def project_handler(_, request): + if not request.headers.get('Authorization') == 'Bearer foobar': + return {'status_code': 401} + + return { + 'status_code': 200, + 'headers': { + 'Content-Type': 'application/json', + }, + 'content': json.dumps({ + "id": 4, + "description": None, + "default_branch": "master", + "visibility": "private", + "path_with_namespace": "someorg/somerepo", + "ssh_url_to_repo": "git@example.com:someorg/somerepo.git", + "web_url": "http://example.com/someorg/somerepo", + }), + } + + +@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/repository/tree$') +def project_tree_handler(_, request): + if not request.headers.get('Authorization') == 'Bearer foobar': + return {'status_code': 401} + + return { + 'status_code': 200, + 'headers': { + 'Content-Type': 'application/json', + }, + 'content': json.dumps([ + { + "id": "a1e8f8d745cc87e3a9248358d9352bb7f9a0aeba", + "name": "Dockerfile", + "type": "tree", + "path": "files/Dockerfile", + "mode": "040000", + }, + ]), + } + + +@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/repository/tags$') +def project_tags_handler(_, request): + if not request.headers.get('Authorization') == 'Bearer foobar': + return {'status_code': 401} + + return { + 'status_code': 200, + 'headers': { + 'Content-Type': 'application/json', + }, + 'content': json.dumps([ + { + 'name': 'sometag', + 'commit': { + 'id': '60a8ff033665e1207714d6670fcd7b65304ec02f', + }, + }, + { + 'name': 'someothertag', + 'commit': { + 'id': '60a8ff033665e1207714d6670fcd7b65304ec02f', + }, + }, + ]), + } + + +@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/repository/branches$') +def project_branches_handler(_, request): + if not request.headers.get('Authorization') == 'Bearer foobar': + return {'status_code': 401} + + return { + 'status_code': 200, + 'headers': { + 'Content-Type': 'application/json', + }, + 'content': json.dumps([ + { + 'name': 'master', + 'commit': { + 'id': '60a8ff033665e1207714d6670fcd7b65304ec02f', + }, + }, + { + 'name': 'otherbranch', + 'commit': { + 'id': '60a8ff033665e1207714d6670fcd7b65304ec02f', + }, + }, + ]), + } + + +@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/repository/branches/master$') +def project_branch_handler(_, request): + if not request.headers.get('Authorization') == 'Bearer foobar': + return {'status_code': 401} + + return { + 'status_code': 200, + 'headers': { + 'Content-Type': 'application/json', + }, + 'content': json.dumps({ + "name": "master", + "merged": True, + "protected": True, + "developers_can_push": False, + "developers_can_merge": False, + "commit": { + "author_email": "john@example.com", + "author_name": "John Smith", + "authored_date": "2012-06-27T05:51:39-07:00", + "committed_date": "2012-06-28T03:44:20-07:00", + "committer_email": "john@example.com", + "committer_name": "John Smith", + "id": "60a8ff033665e1207714d6670fcd7b65304ec02f", + "short_id": "7b5c3cc", + "title": "add projects API", + "message": "add projects API", + "parent_ids": [ + "4ad91d3c1144c406e50c7b33bae684bd6837faf8", + ], + }, + }), + } + + +@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/namespaces/someorg$') +def namespace_handler(_, request): + if not request.headers.get('Authorization') == 'Bearer foobar': + return {'status_code': 401} + + return { + 'status_code': 200, + 'headers': { + 'Content-Type': 'application/json', + }, + 'content': json.dumps({ + "id": 2, + "name": "someorg", + "path": "someorg", + "kind": "group", + "full_path": "someorg", + "parent_id": None, + "members_count_with_descendants": 2 + }), + } + + +@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/namespaces/knownuser$') +def user_namespace_handler(_, request): + if not request.headers.get('Authorization') == 'Bearer foobar': + return {'status_code': 401} + + return { + 'status_code': 200, + 'headers': { + 'Content-Type': 'application/json', + }, + 'content': json.dumps({ + "id": 1, + "name": "knownuser", + "path": "knownuser", + "kind": "user", + "full_path": "knownuser", + "parent_id": None, + "members_count_with_descendants": 2 + }), + } + + +@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/namespaces(/)?$') +def namespaces_handler(_, request): + if not request.headers.get('Authorization') == 'Bearer foobar': + return {'status_code': 401} + + return { + 'status_code': 200, + 'headers': { + 'Content-Type': 'application/json', + }, + 'content': json.dumps([{ + "id": 2, + "name": "someorg", + "path": "someorg", + "kind": "group", + "full_path": "someorg", + "parent_id": None, + "web_url": "http://gitlab.com/groups/someorg", + "members_count_with_descendants": 2 + }]), + } + + +def get_projects_handler(add_permissions_block): + @urlmatch(netloc=r'fakegitlab', path=r'/api/v4/groups/2/projects$') + def projects_handler(_, request): + if not request.headers.get('Authorization') == 'Bearer foobar': + return {'status_code': 401} + + permissions_block = { + "project_access": { + "access_level": 10, + "notification_level": 3 + }, + "group_access": { + "access_level": 20, + "notification_level": 3 + }, + } + + return { + 'status_code': 200, + 'headers': { + 'Content-Type': 'application/json', + }, + 'content': json.dumps([{ + "id": 4, + "name": "Some project", + "description": None, + "default_branch": "master", + "visibility": "private", + "path": "someproject", + "path_with_namespace": "someorg/someproject", + "last_activity_at": "2013-09-30T13:46:02Z", + "web_url": "http://example.com/someorg/someproject", + "permissions": permissions_block if add_permissions_block else None, + }, + { + "id": 5, + "name": "Another project", + "description": None, + "default_branch": "master", + "visibility": "public", + "path": "anotherproject", + "path_with_namespace": "someorg/anotherproject", + "last_activity_at": "2013-09-30T13:46:02Z", + "web_url": "http://example.com/someorg/anotherproject", + }]), + } + return projects_handler + + +def get_group_handler(null_avatar): + @urlmatch(netloc=r'fakegitlab', path=r'/api/v4/groups/2$') + def group_handler(_, request): + if not request.headers.get('Authorization') == 'Bearer foobar': + return {'status_code': 401} + + return { + 'status_code': 200, + 'headers': { + 'Content-Type': 'application/json', + }, + 'content': json.dumps({ + "id": 1, + "name": "SomeOrg Group", + "path": "someorg", + "description": "An interesting group", + "visibility": "public", + "lfs_enabled": True, + "avatar_url": 'avatar_url' if not null_avatar else None, + "web_url": "http://gitlab.com/groups/someorg", + "request_access_enabled": False, + "full_name": "SomeOrg Group", + "full_path": "someorg", + "parent_id": None, + }), + } + return group_handler + + +@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/repository/files/Dockerfile$') +def dockerfile_handler(_, request): + if not request.headers.get('Authorization') == 'Bearer foobar': + return {'status_code': 401} + + return { + 'status_code': 200, + 'headers': { + 'Content-Type': 'application/json', + }, + 'content': json.dumps({ + "file_name": "Dockerfile", + "file_path": "Dockerfile", + "size": 10, + "encoding": "base64", + "content": base64.b64encode('hello world'), + "ref": "master", + "blob_id": "79f7bbd25901e8334750839545a9bd021f0e4c83", + "commit_id": "d5a3ff139356ce33e37e73add446f16869741b50", + "last_commit_id": "570e7b2abdd848b95f2f578043fc23bd6f6fd24d" + }), + } + + +@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/repository/files/somesubdir%2FDockerfile$') +def sub_dockerfile_handler(_, request): + if not request.headers.get('Authorization') == 'Bearer foobar': + return {'status_code': 401} + + return { + 'status_code': 200, + 'headers': { + 'Content-Type': 'application/json', + }, + 'content': json.dumps({ + "file_name": "Dockerfile", + "file_path": "somesubdir/Dockerfile", + "size": 10, + "encoding": "base64", + "content": base64.b64encode('hi universe'), + "ref": "master", + "blob_id": "79f7bbd25901e8334750839545a9bd021f0e4c83", + "commit_id": "d5a3ff139356ce33e37e73add446f16869741b50", + "last_commit_id": "570e7b2abdd848b95f2f578043fc23bd6f6fd24d" + }), + } + + +@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/repository/tags/sometag$') +def tag_handler(_, request): + if not request.headers.get('Authorization') == 'Bearer foobar': + return {'status_code': 401} + + return { + 'status_code': 200, + 'headers': { + 'Content-Type': 'application/json', + }, + 'content': json.dumps({ + "name": "sometag", + "message": "some cool message", + "target": "60a8ff033665e1207714d6670fcd7b65304ec02f", + "commit": { + "id": "60a8ff033665e1207714d6670fcd7b65304ec02f", + "short_id": "60a8ff03", + "title": "Initial commit", + "created_at": "2017-07-26T11:08:53.000+02:00", + "parent_ids": [ + "f61c062ff8bcbdb00e0a1b3317a91aed6ceee06b" + ], + "message": "v5.0.0\n", + "author_name": "Arthur Verschaeve", + "author_email": "contact@arthurverschaeve.be", + "authored_date": "2015-02-01T21:56:31.000+01:00", + "committer_name": "Arthur Verschaeve", + "committer_email": "contact@arthurverschaeve.be", + "committed_date": "2015-02-01T21:56:31.000+01:00" + }, + "release": None, + }), + } + + +@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/foo%2Fbar/repository/commits/60a8ff033665e1207714d6670fcd7b65304ec02f$') +def commit_handler(_, request): + if not request.headers.get('Authorization') == 'Bearer foobar': + return {'status_code': 401} + + return { + 'status_code': 200, + 'headers': { + 'Content-Type': 'application/json', + }, + 'content': json.dumps({ + "id": "60a8ff033665e1207714d6670fcd7b65304ec02f", + "short_id": "60a8ff03366", + "title": "Sanitize for network graph", + "author_name": "someguy", + "author_email": "some.guy@gmail.com", + "committer_name": "Some Guy", + "committer_email": "some.guy@gmail.com", + "created_at": "2012-09-20T09:06:12+03:00", + "message": "Sanitize for network graph", + "committed_date": "2012-09-20T09:06:12+03:00", + "authored_date": "2012-09-20T09:06:12+03:00", + "parent_ids": [ + "ae1d9fb46aa2b07ee9836d49862ec4e2c46fbbba" + ], + "last_pipeline" : { + "id": 8, + "ref": "master", + "sha": "2dc6aa325a317eda67812f05600bdf0fcdc70ab0", + "status": "created", + }, + "stats": { + "additions": 15, + "deletions": 10, + "total": 25 + }, + "status": "running" + }), + } + + +@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/deploy_keys$', method='POST') +def create_deploykey_handler(_, request): + if not request.headers.get('Authorization') == 'Bearer foobar': + return {'status_code': 401} + + return { + 'status_code': 200, + 'headers': { + 'Content-Type': 'application/json', + }, + 'content': json.dumps({ + "id": 1, + "title": "Public key", + "key": "ssh-rsa some stuff", + "created_at": "2013-10-02T10:12:29Z", + "can_push": False, + }), + } + + +@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/hooks$', method='POST') +def create_hook_handler(_, request): + if not request.headers.get('Authorization') == 'Bearer foobar': + return {'status_code': 401} + + return { + 'status_code': 200, + 'headers': { + 'Content-Type': 'application/json', + }, + 'content': json.dumps({ + "id": 1, + "url": "http://example.com/hook", + "project_id": 4, + "push_events": True, + "issues_events": True, + "confidential_issues_events": True, + "merge_requests_events": True, + "tag_push_events": True, + "note_events": True, + "job_events": True, + "pipeline_events": True, + "wiki_page_events": True, + "enable_ssl_verification": True, + "created_at": "2012-10-12T17:04:47Z", + }), + } + + +@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/hooks/1$', method='DELETE') +def delete_hook_handler(_, request): + if not request.headers.get('Authorization') == 'Bearer foobar': + return {'status_code': 401} + + return { + 'status_code': 200, + 'headers': { + 'Content-Type': 'application/json', + }, + 'content': json.dumps({}), + } + + +@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/projects/4/deploy_keys/1$', method='DELETE') +def delete_deploykey_handker(_, request): + if not request.headers.get('Authorization') == 'Bearer foobar': + return {'status_code': 401} + + return { + 'status_code': 200, + 'headers': { + 'Content-Type': 'application/json', + }, + 'content': json.dumps({}), + } + + +@urlmatch(netloc=r'fakegitlab', path=r'/api/v4/users/1/projects$') +def user_projects_list_handler(_, request): + if not request.headers.get('Authorization') == 'Bearer foobar': + return {'status_code': 401} + + return { + 'status_code': 200, + 'headers': { + 'Content-Type': 'application/json', + }, + 'content': json.dumps([ + { + "id": 2, + "name": "Another project", + "description": None, + "default_branch": "master", + "visibility": "public", + "path": "anotherproject", + "path_with_namespace": "knownuser/anotherproject", + "last_activity_at": "2013-09-30T13:46:02Z", + "web_url": "http://example.com/knownuser/anotherproject", + } + ]), + } + + +@contextmanager +def get_gitlab_trigger(dockerfile_path='', add_permissions=True, missing_avatar_url=False): + handlers = [user_handler, users_handler, project_branches_handler, project_tree_handler, + project_handler, get_projects_handler(add_permissions), tag_handler, + project_branch_handler, get_group_handler(missing_avatar_url), dockerfile_handler, + sub_dockerfile_handler, namespace_handler, user_namespace_handler, namespaces_handler, + commit_handler, create_deploykey_handler, delete_deploykey_handker, + create_hook_handler, delete_hook_handler, project_tags_handler, + user_projects_list_handler, catchall_handler] + + with HTTMock(*handlers): + trigger_obj = AttrDict(dict(auth_token='foobar', id='sometrigger')) + trigger = GitLabBuildTrigger(trigger_obj, { + 'build_source': 'foo/bar', + 'dockerfile_path': dockerfile_path, + 'username': 'knownuser' + }) + + client = gitlab.Gitlab('http://fakegitlab', oauth_token='foobar', timeout=20, api_version=4) + client.auth() + + trigger._get_authorized_client = lambda: client + yield trigger diff --git a/buildtrigger/test/test_basehandler.py b/buildtrigger/test/test_basehandler.py new file mode 100644 index 000000000..7162c2535 --- /dev/null +++ b/buildtrigger/test/test_basehandler.py @@ -0,0 +1,55 @@ +import pytest + +from buildtrigger.basehandler import BuildTriggerHandler + + +@pytest.mark.parametrize('input,output', [ + ("Dockerfile", True), + ("server.Dockerfile", True), + (u"Dockerfile", True), + (u"server.Dockerfile", True), + ("bad file name", False), + (u"bad file name", False), +]) +def test_path_is_dockerfile(input, output): + assert BuildTriggerHandler.filename_is_dockerfile(input) == output + + +@pytest.mark.parametrize('input,output', [ + ("", {}), + ("/a", {"/a": ["/"]}), + ("a", {"/a": ["/"]}), + ("/b/a", {"/b/a": ["/b", "/"]}), + ("b/a", {"/b/a": ["/b", "/"]}), + ("/c/b/a", {"/c/b/a": ["/c/b", "/c", "/"]}), + ("/a//b//c", {"/a/b/c": ["/", "/a", "/a/b"]}), + ("/a", {"/a": ["/"]}), +]) +def test_subdir_path_map_no_previous(input, output): + actual_mapping = BuildTriggerHandler.get_parent_directory_mappings(input) + for key in actual_mapping: + value = actual_mapping[key] + actual_mapping[key] = value.sort() + for key in output: + value = output[key] + output[key] = value.sort() + + assert actual_mapping == output + + +@pytest.mark.parametrize('new_path,original_dictionary,output', [ + ("/a", {}, {"/a": ["/"]}), + ("b", {"/a": ["some_path", "another_path"]}, {"/a": ["some_path", "another_path"], "/b": ["/"]}), + ("/a/b/c/d", {"/e": ["some_path", "another_path"]}, + {"/e": ["some_path", "another_path"], "/a/b/c/d": ["/", "/a", "/a/b", "/a/b/c"]}), +]) +def test_subdir_path_map(new_path, original_dictionary, output): + actual_mapping = BuildTriggerHandler.get_parent_directory_mappings(new_path, original_dictionary) + for key in actual_mapping: + value = actual_mapping[key] + actual_mapping[key] = value.sort() + for key in output: + value = output[key] + output[key] = value.sort() + + assert actual_mapping == output diff --git a/buildtrigger/test/test_bitbuckethandler.py b/buildtrigger/test/test_bitbuckethandler.py new file mode 100644 index 000000000..dbb47521a --- /dev/null +++ b/buildtrigger/test/test_bitbuckethandler.py @@ -0,0 +1,91 @@ +import json +import pytest + +from buildtrigger.test.bitbucketmock import get_bitbucket_trigger +from buildtrigger.triggerutil import (SkipRequestException, ValidationRequestException, + InvalidPayloadException) +from endpoints.building import PreparedBuild +from util.morecollections import AttrDict + +@pytest.fixture +def bitbucket_trigger(): + return get_bitbucket_trigger() + + +def test_list_build_subdirs(bitbucket_trigger): + assert bitbucket_trigger.list_build_subdirs() == ["/Dockerfile"] + + +@pytest.mark.parametrize('dockerfile_path, contents', [ + ('/Dockerfile', 'hello world'), + ('somesubdir/Dockerfile', 'hi universe'), + ('unknownpath', None), +]) +def test_load_dockerfile_contents(dockerfile_path, contents): + trigger = get_bitbucket_trigger(dockerfile_path) + assert trigger.load_dockerfile_contents() == contents + + +@pytest.mark.parametrize('payload, expected_error, expected_message', [ + ('{}', InvalidPayloadException, "'push' is a required property"), + + # Valid payload: + ('''{ + "push": { + "changes": [{ + "new": { + "name": "somechange", + "target": { + "hash": "aaaaaaa", + "message": "foo", + "date": "now", + "links": { + "html": { + "href": "somelink" + } + } + } + } + }] + }, + "repository": { + "full_name": "foo/bar" + } + }''', None, None), + + # Skip message: + ('''{ + "push": { + "changes": [{ + "new": { + "name": "somechange", + "target": { + "hash": "aaaaaaa", + "message": "[skip build] foo", + "date": "now", + "links": { + "html": { + "href": "somelink" + } + } + } + } + }] + }, + "repository": { + "full_name": "foo/bar" + } + }''', SkipRequestException, ''), +]) +def test_handle_trigger_request(bitbucket_trigger, payload, expected_error, expected_message): + def get_payload(): + return json.loads(payload) + + request = AttrDict(dict(get_json=get_payload)) + + if expected_error is not None: + with pytest.raises(expected_error) as ipe: + bitbucket_trigger.handle_trigger_request(request) + assert str(ipe.value) == expected_message + else: + assert isinstance(bitbucket_trigger.handle_trigger_request(request), PreparedBuild) diff --git a/buildtrigger/test/test_customhandler.py b/buildtrigger/test/test_customhandler.py new file mode 100644 index 000000000..cbb5f484e --- /dev/null +++ b/buildtrigger/test/test_customhandler.py @@ -0,0 +1,51 @@ +import pytest + +from buildtrigger.customhandler import CustomBuildTrigger +from buildtrigger.triggerutil import (InvalidPayloadException, SkipRequestException, + TriggerStartException) +from endpoints.building import PreparedBuild +from util.morecollections import AttrDict + +@pytest.mark.parametrize('payload, expected_error, expected_message', [ + ('', InvalidPayloadException, 'Missing expected payload'), + ('{}', InvalidPayloadException, "'commit' is a required property"), + + ('{"commit": "foo", "ref": "refs/heads/something", "default_branch": "baz"}', + InvalidPayloadException, "u'foo' does not match '^([A-Fa-f0-9]{7,})$'"), + + ('{"commit": "11d6fbc", "ref": "refs/heads/something", "default_branch": "baz"}', None, None), + ('''{ + "commit": "11d6fbc", + "ref": "refs/heads/something", + "default_branch": "baz", + "commit_info": { + "message": "[skip build]", + "url": "http://foo.bar", + "date": "NOW" + } + }''', SkipRequestException, ''), +]) +def test_handle_trigger_request(payload, expected_error, expected_message): + trigger = CustomBuildTrigger(None, {'build_source': 'foo'}) + request = AttrDict(dict(data=payload)) + + if expected_error is not None: + with pytest.raises(expected_error) as ipe: + trigger.handle_trigger_request(request) + assert str(ipe.value) == expected_message + else: + assert isinstance(trigger.handle_trigger_request(request), PreparedBuild) + +@pytest.mark.parametrize('run_parameters, expected_error, expected_message', [ + ({}, TriggerStartException, 'missing required parameter'), + ({'commit_sha': 'foo'}, TriggerStartException, "'foo' does not match '^([A-Fa-f0-9]{7,})$'"), + ({'commit_sha': '11d6fbc'}, None, None), +]) +def test_manual_start(run_parameters, expected_error, expected_message): + trigger = CustomBuildTrigger(None, {'build_source': 'foo'}) + if expected_error is not None: + with pytest.raises(expected_error) as ipe: + trigger.manual_start(run_parameters) + assert str(ipe.value) == expected_message + else: + assert isinstance(trigger.manual_start(run_parameters), PreparedBuild) diff --git a/buildtrigger/test/test_githosthandler.py b/buildtrigger/test/test_githosthandler.py new file mode 100644 index 000000000..fadf8dce5 --- /dev/null +++ b/buildtrigger/test/test_githosthandler.py @@ -0,0 +1,121 @@ +import pytest + +from buildtrigger.triggerutil import TriggerStartException +from buildtrigger.test.bitbucketmock import get_bitbucket_trigger +from buildtrigger.test.githubmock import get_github_trigger +from endpoints.building import PreparedBuild + +# Note: This test suite executes a common set of tests against all the trigger types specified +# in this fixture. Each trigger's mock is expected to return the same data for all of these calls. +@pytest.fixture(params=[get_github_trigger(), get_bitbucket_trigger()]) +def githost_trigger(request): + return request.param + +@pytest.mark.parametrize('run_parameters, expected_error, expected_message', [ + # No branch or tag specified: use the commit of the default branch. + ({}, None, None), + + # Invalid branch. + ({'refs': {'kind': 'branch', 'name': 'invalid'}}, TriggerStartException, + 'Could not find branch in repository'), + + # Invalid tag. + ({'refs': {'kind': 'tag', 'name': 'invalid'}}, TriggerStartException, + 'Could not find tag in repository'), + + # Valid branch. + ({'refs': {'kind': 'branch', 'name': 'master'}}, None, None), + + # Valid tag. + ({'refs': {'kind': 'tag', 'name': 'sometag'}}, None, None), +]) +def test_manual_start(run_parameters, expected_error, expected_message, githost_trigger): + if expected_error is not None: + with pytest.raises(expected_error) as ipe: + githost_trigger.manual_start(run_parameters) + assert str(ipe.value) == expected_message + else: + assert isinstance(githost_trigger.manual_start(run_parameters), PreparedBuild) + + +@pytest.mark.parametrize('name, expected', [ + ('refs', [ + {'kind': 'branch', 'name': 'master'}, + {'kind': 'branch', 'name': 'otherbranch'}, + {'kind': 'tag', 'name': 'sometag'}, + {'kind': 'tag', 'name': 'someothertag'}, + ]), + ('tag_name', set(['sometag', 'someothertag'])), + ('branch_name', set(['master', 'otherbranch'])), + ('invalid', None) +]) +def test_list_field_values(name, expected, githost_trigger): + if expected is None: + assert githost_trigger.list_field_values(name) is None + elif isinstance(expected, set): + assert set(githost_trigger.list_field_values(name)) == set(expected) + else: + assert githost_trigger.list_field_values(name) == expected + + +def test_list_build_source_namespaces(): + namespaces_expected = [ + { + 'personal': True, + 'score': 1, + 'avatar_url': 'avatarurl', + 'id': 'knownuser', + 'title': 'knownuser', + 'url': 'https://bitbucket.org/knownuser', + }, + { + 'score': 2, + 'title': 'someorg', + 'personal': False, + 'url': 'https://bitbucket.org/someorg', + 'avatar_url': 'avatarurl', + 'id': 'someorg' + } + ] + + found = get_bitbucket_trigger().list_build_source_namespaces() + found.sort() + + namespaces_expected.sort() + assert found == namespaces_expected + + +@pytest.mark.parametrize('namespace, expected', [ + ('', []), + ('unknown', []), + + ('knownuser', [ + { + 'last_updated': 0, 'name': 'somerepo', + 'url': 'https://bitbucket.org/knownuser/somerepo', 'private': True, + 'full_name': 'knownuser/somerepo', 'has_admin_permissions': True, + 'description': 'some somerepo repo' + }]), + + ('someorg', [ + { + 'last_updated': 0, 'name': 'somerepo', + 'url': 'https://bitbucket.org/someorg/somerepo', 'private': True, + 'full_name': 'someorg/somerepo', 'has_admin_permissions': False, + 'description': 'some somerepo repo' + }, + { + 'last_updated': 0, 'name': 'anotherrepo', + 'url': 'https://bitbucket.org/someorg/anotherrepo', 'private': False, + 'full_name': 'someorg/anotherrepo', 'has_admin_permissions': False, + 'description': 'some anotherrepo repo' + }]), +]) +def test_list_build_sources_for_namespace(namespace, expected, githost_trigger): + assert githost_trigger.list_build_sources_for_namespace(namespace) == expected + + +def test_activate_and_deactivate(githost_trigger): + _, private_key = githost_trigger.activate('http://some/url') + assert 'private_key' in private_key + githost_trigger.deactivate() diff --git a/buildtrigger/test/test_githubhandler.py b/buildtrigger/test/test_githubhandler.py new file mode 100644 index 000000000..f7012b0cf --- /dev/null +++ b/buildtrigger/test/test_githubhandler.py @@ -0,0 +1,117 @@ +import json +import pytest + +from buildtrigger.test.githubmock import get_github_trigger +from buildtrigger.triggerutil import (SkipRequestException, ValidationRequestException, + InvalidPayloadException) +from endpoints.building import PreparedBuild +from util.morecollections import AttrDict + +@pytest.fixture +def github_trigger(): + return get_github_trigger() + + +@pytest.mark.parametrize('payload, expected_error, expected_message', [ + ('{"zen": true}', SkipRequestException, ""), + + ('{}', InvalidPayloadException, "Missing 'repository' on request"), + ('{"repository": "foo"}', InvalidPayloadException, "Missing 'owner' on repository"), + + # Valid payload: + ('''{ + "repository": { + "owner": { + "name": "someguy" + }, + "name": "somerepo", + "ssh_url": "someurl" + }, + "ref": "refs/tags/foo", + "head_commit": { + "id": "11d6fbc", + "url": "http://some/url", + "message": "some message", + "timestamp": "NOW" + } + }''', None, None), + + # Skip message: + ('''{ + "repository": { + "owner": { + "name": "someguy" + }, + "name": "somerepo", + "ssh_url": "someurl" + }, + "ref": "refs/tags/foo", + "head_commit": { + "id": "11d6fbc", + "url": "http://some/url", + "message": "[skip build]", + "timestamp": "NOW" + } + }''', SkipRequestException, ''), +]) +def test_handle_trigger_request(github_trigger, payload, expected_error, expected_message): + def get_payload(): + return json.loads(payload) + + request = AttrDict(dict(get_json=get_payload)) + + if expected_error is not None: + with pytest.raises(expected_error) as ipe: + github_trigger.handle_trigger_request(request) + assert str(ipe.value) == expected_message + else: + assert isinstance(github_trigger.handle_trigger_request(request), PreparedBuild) + + +@pytest.mark.parametrize('dockerfile_path, contents', [ + ('/Dockerfile', 'hello world'), + ('somesubdir/Dockerfile', 'hi universe'), + ('unknownpath', None), +]) +def test_load_dockerfile_contents(dockerfile_path, contents): + trigger = get_github_trigger(dockerfile_path) + assert trigger.load_dockerfile_contents() == contents + + +@pytest.mark.parametrize('username, expected_response', [ + ('unknownuser', None), + ('knownuser', {'html_url': 'https://bitbucket.org/knownuser', 'avatar_url': 'avatarurl'}), +]) +def test_lookup_user(username, expected_response, github_trigger): + assert github_trigger.lookup_user(username) == expected_response + + +def test_list_build_subdirs(github_trigger): + assert github_trigger.list_build_subdirs() == ['Dockerfile', 'somesubdir/Dockerfile'] + + +def test_list_build_source_namespaces(github_trigger): + namespaces_expected = [ + { + 'personal': True, + 'score': 1, + 'avatar_url': 'avatarurl', + 'id': 'knownuser', + 'title': 'knownuser', + 'url': 'https://bitbucket.org/knownuser', + }, + { + 'score': 0, + 'title': 'someorg', + 'personal': False, + 'url': '', + 'avatar_url': 'avatarurl', + 'id': 'someorg' + } + ] + + found = github_trigger.list_build_source_namespaces() + found.sort() + + namespaces_expected.sort() + assert found == namespaces_expected diff --git a/buildtrigger/test/test_gitlabhandler.py b/buildtrigger/test/test_gitlabhandler.py new file mode 100644 index 000000000..b74095a8c --- /dev/null +++ b/buildtrigger/test/test_gitlabhandler.py @@ -0,0 +1,231 @@ +import json +import pytest + +from mock import Mock + +from buildtrigger.test.gitlabmock import get_gitlab_trigger +from buildtrigger.triggerutil import (SkipRequestException, ValidationRequestException, + InvalidPayloadException, TriggerStartException) +from endpoints.building import PreparedBuild +from util.morecollections import AttrDict + +@pytest.fixture() +def gitlab_trigger(): + with get_gitlab_trigger() as t: + yield t + + +def test_list_build_subdirs(gitlab_trigger): + assert gitlab_trigger.list_build_subdirs() == ['Dockerfile'] + + +@pytest.mark.parametrize('dockerfile_path, contents', [ + ('/Dockerfile', 'hello world'), + ('somesubdir/Dockerfile', 'hi universe'), + ('unknownpath', None), +]) +def test_load_dockerfile_contents(dockerfile_path, contents): + with get_gitlab_trigger(dockerfile_path=dockerfile_path) as trigger: + assert trigger.load_dockerfile_contents() == contents + + +@pytest.mark.parametrize('email, expected_response', [ + ('unknown@email.com', None), + ('knownuser', {'username': 'knownuser', 'html_url': 'https://bitbucket.org/knownuser', + 'avatar_url': 'avatarurl'}), +]) +def test_lookup_user(email, expected_response, gitlab_trigger): + assert gitlab_trigger.lookup_user(email) == expected_response + + +def test_null_permissions(): + with get_gitlab_trigger(add_permissions=False) as trigger: + sources = trigger.list_build_sources_for_namespace('someorg') + source = sources[0] + assert source['has_admin_permissions'] + + +def test_list_build_sources(): + with get_gitlab_trigger() as trigger: + sources = trigger.list_build_sources_for_namespace('someorg') + assert sources == [ + { + 'last_updated': 1380548762, + 'name': u'someproject', + 'url': u'http://example.com/someorg/someproject', + 'private': True, + 'full_name': u'someorg/someproject', + 'has_admin_permissions': False, + 'description': '' + }, + { + 'last_updated': 1380548762, + 'name': u'anotherproject', + 'url': u'http://example.com/someorg/anotherproject', + 'private': False, + 'full_name': u'someorg/anotherproject', + 'has_admin_permissions': True, + 'description': '', + }] + + +def test_null_avatar(): + with get_gitlab_trigger(missing_avatar_url=True) as trigger: + namespace_data = trigger.list_build_source_namespaces() + expected = { + 'avatar_url': None, + 'personal': False, + 'title': u'someorg', + 'url': u'http://gitlab.com/groups/someorg', + 'score': 1, + 'id': '2', + } + + assert namespace_data == [expected] + + +@pytest.mark.parametrize('payload, expected_error, expected_message', [ + ('{}', InvalidPayloadException, ''), + + # Valid payload: + ('''{ + "object_kind": "push", + "ref": "refs/heads/master", + "checkout_sha": "aaaaaaa", + "repository": { + "git_ssh_url": "foobar" + }, + "commits": [ + { + "id": "aaaaaaa", + "url": "someurl", + "message": "hello there!", + "timestamp": "now" + } + ] + }''', None, None), + + # Skip message: + ('''{ + "object_kind": "push", + "ref": "refs/heads/master", + "checkout_sha": "aaaaaaa", + "repository": { + "git_ssh_url": "foobar" + }, + "commits": [ + { + "id": "aaaaaaa", + "url": "someurl", + "message": "[skip build] hello there!", + "timestamp": "now" + } + ] + }''', SkipRequestException, ''), +]) +def test_handle_trigger_request(gitlab_trigger, payload, expected_error, expected_message): + def get_payload(): + return json.loads(payload) + + request = AttrDict(dict(get_json=get_payload)) + + if expected_error is not None: + with pytest.raises(expected_error) as ipe: + gitlab_trigger.handle_trigger_request(request) + assert str(ipe.value) == expected_message + else: + assert isinstance(gitlab_trigger.handle_trigger_request(request), PreparedBuild) + + +@pytest.mark.parametrize('run_parameters, expected_error, expected_message', [ + # No branch or tag specified: use the commit of the default branch. + ({}, None, None), + + # Invalid branch. + ({'refs': {'kind': 'branch', 'name': 'invalid'}}, TriggerStartException, + 'Could not find branch in repository'), + + # Invalid tag. + ({'refs': {'kind': 'tag', 'name': 'invalid'}}, TriggerStartException, + 'Could not find tag in repository'), + + # Valid branch. + ({'refs': {'kind': 'branch', 'name': 'master'}}, None, None), + + # Valid tag. + ({'refs': {'kind': 'tag', 'name': 'sometag'}}, None, None), +]) +def test_manual_start(run_parameters, expected_error, expected_message, gitlab_trigger): + if expected_error is not None: + with pytest.raises(expected_error) as ipe: + gitlab_trigger.manual_start(run_parameters) + assert str(ipe.value) == expected_message + else: + assert isinstance(gitlab_trigger.manual_start(run_parameters), PreparedBuild) + + +def test_activate_and_deactivate(gitlab_trigger): + _, private_key = gitlab_trigger.activate('http://some/url') + assert 'private_key' in private_key + + gitlab_trigger.deactivate() + + +@pytest.mark.parametrize('name, expected', [ + ('refs', [ + {'kind': 'branch', 'name': 'master'}, + {'kind': 'branch', 'name': 'otherbranch'}, + {'kind': 'tag', 'name': 'sometag'}, + {'kind': 'tag', 'name': 'someothertag'}, + ]), + ('tag_name', set(['sometag', 'someothertag'])), + ('branch_name', set(['master', 'otherbranch'])), + ('invalid', None) +]) +def test_list_field_values(name, expected, gitlab_trigger): + if expected is None: + assert gitlab_trigger.list_field_values(name) is None + elif isinstance(expected, set): + assert set(gitlab_trigger.list_field_values(name)) == set(expected) + else: + assert gitlab_trigger.list_field_values(name) == expected + + +@pytest.mark.parametrize('namespace, expected', [ + ('', []), + ('unknown', []), + + ('knownuser', [ + { + 'last_updated': 1380548762, + 'name': u'anotherproject', + 'url': u'http://example.com/knownuser/anotherproject', + 'private': False, + 'full_name': u'knownuser/anotherproject', + 'has_admin_permissions': True, + 'description': '' + }, + ]), + + ('someorg', [ + { + 'last_updated': 1380548762, + 'name': u'someproject', + 'url': u'http://example.com/someorg/someproject', + 'private': True, + 'full_name': u'someorg/someproject', + 'has_admin_permissions': False, + 'description': '' + }, + { + 'last_updated': 1380548762, + 'name': u'anotherproject', + 'url': u'http://example.com/someorg/anotherproject', + 'private': False, + 'full_name': u'someorg/anotherproject', + 'has_admin_permissions': True, + 'description': '', + }]), +]) +def test_list_build_sources_for_namespace(namespace, expected, gitlab_trigger): + assert gitlab_trigger.list_build_sources_for_namespace(namespace) == expected diff --git a/buildtrigger/test/test_prepare_trigger.py b/buildtrigger/test/test_prepare_trigger.py new file mode 100644 index 000000000..e3aab6b48 --- /dev/null +++ b/buildtrigger/test/test_prepare_trigger.py @@ -0,0 +1,572 @@ +import json + +import pytest + +from jsonschema import validate + +from buildtrigger.customhandler import custom_trigger_payload +from buildtrigger.basehandler import METADATA_SCHEMA +from buildtrigger.bitbuckethandler import get_transformed_webhook_payload as bb_webhook +from buildtrigger.bitbuckethandler import get_transformed_commit_info as bb_commit +from buildtrigger.githubhandler import get_transformed_webhook_payload as gh_webhook +from buildtrigger.gitlabhandler import get_transformed_webhook_payload as gl_webhook +from buildtrigger.triggerutil import SkipRequestException + +def assertSkipped(filename, processor, *args, **kwargs): + with open('buildtrigger/test/triggerjson/%s.json' % filename) as f: + payload = json.loads(f.read()) + + nargs = [payload] + nargs.extend(args) + + with pytest.raises(SkipRequestException): + processor(*nargs, **kwargs) + + +def assertSchema(filename, expected, processor, *args, **kwargs): + with open('buildtrigger/test/triggerjson/%s.json' % filename) as f: + payload = json.loads(f.read()) + + nargs = [payload] + nargs.extend(args) + + created = processor(*nargs, **kwargs) + assert created == expected + validate(created, METADATA_SCHEMA) + + +def test_custom_custom(): + expected = { + u'commit':u'1c002dd', + u'commit_info': { + u'url': u'gitsoftware.com/repository/commits/1234567', + u'date': u'timestamp', + u'message': u'initial commit', + u'committer': { + u'username': u'user', + u'url': u'gitsoftware.com/users/user', + u'avatar_url': u'gravatar.com/user.png' + }, + u'author': { + u'username': u'user', + u'url': u'gitsoftware.com/users/user', + u'avatar_url': u'gravatar.com/user.png' + } + }, + u'ref': u'refs/heads/master', + u'default_branch': u'master', + u'git_url': u'foobar', + } + + assertSchema('custom_webhook', expected, custom_trigger_payload, git_url='foobar') + + +def test_custom_gitlab(): + expected = { + 'commit': u'fb88379ee45de28a0a4590fddcbd8eff8b36026e', + 'ref': u'refs/heads/master', + 'git_url': u'git@gitlab.com:jsmith/somerepo.git', + 'commit_info': { + 'url': u'https://gitlab.com/jsmith/somerepo/commit/fb88379ee45de28a0a4590fddcbd8eff8b36026e', + 'date': u'2015-08-13T19:33:18+00:00', + 'message': u'Fix link\n', + }, + } + + assertSchema('gitlab_webhook', expected, custom_trigger_payload, git_url='git@gitlab.com:jsmith/somerepo.git') + + +def test_custom_github(): + expected = { + 'commit': u'410f4cdf8ff09b87f245b13845e8497f90b90a4c', + 'ref': u'refs/heads/master', + 'default_branch': u'master', + 'git_url': u'git@github.com:jsmith/anothertest.git', + 'commit_info': { + 'url': u'https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c', + 'date': u'2015-09-11T14:26:16-04:00', + 'message': u'Update Dockerfile', + 'committer': { + 'username': u'jsmith', + }, + 'author': { + 'username': u'jsmith', + }, + }, + } + + assertSchema('github_webhook', expected, custom_trigger_payload, + git_url='git@github.com:jsmith/anothertest.git') + + +def test_custom_bitbucket(): + expected = { + "commit": u"af64ae7188685f8424040b4735ad12941b980d75", + "ref": u"refs/heads/master", + "git_url": u"git@bitbucket.org:jsmith/another-repo.git", + "commit_info": { + "url": u"https://bitbucket.org/jsmith/another-repo/commits/af64ae7188685f8424040b4735ad12941b980d75", + "date": u"2015-09-10T20:40:54+00:00", + "message": u"Dockerfile edited online with Bitbucket", + "author": { + "username": u"John Smith", + "avatar_url": u"https://bitbucket.org/account/jsmith/avatar/32/", + }, + "committer": { + "username": u"John Smith", + "avatar_url": u"https://bitbucket.org/account/jsmith/avatar/32/", + }, + }, + } + + assertSchema('bitbucket_webhook', expected, custom_trigger_payload, git_url='git@bitbucket.org:jsmith/another-repo.git') + + +def test_bitbucket_customer_payload_noauthor(): + expected = { + "commit": "a0ec139843b2bb281ab21a433266ddc498e605dc", + "ref": "refs/heads/master", + "git_url": "git@bitbucket.org:somecoollabs/svc-identity.git", + "commit_info": { + "url": "https://bitbucket.org/somecoollabs/svc-identity/commits/a0ec139843b2bb281ab21a433266ddc498e605dc", + "date": "2015-09-25T00:55:08+00:00", + "message": "Update version.py to 0.1.2 [skip ci]\n\n(by utilitybelt/scripts/autotag_version.py)\n", + "committer": { + "username": "CodeShip Tagging", + "avatar_url": "https://bitbucket.org/account/SomeCoolLabs_CodeShip/avatar/32/", + }, + }, + } + + assertSchema('bitbucket_customer_example_noauthor', expected, bb_webhook) + + +def test_bitbucket_customer_payload_tag(): + expected = { + "commit": "a0ec139843b2bb281ab21a433266ddc498e605dc", + "ref": "refs/tags/0.1.2", + "git_url": "git@bitbucket.org:somecoollabs/svc-identity.git", + "commit_info": { + "url": "https://bitbucket.org/somecoollabs/svc-identity/commits/a0ec139843b2bb281ab21a433266ddc498e605dc", + "date": "2015-09-25T00:55:08+00:00", + "message": "Update version.py to 0.1.2 [skip ci]\n\n(by utilitybelt/scripts/autotag_version.py)\n", + "committer": { + "username": "CodeShip Tagging", + "avatar_url": "https://bitbucket.org/account/SomeCoolLabs_CodeShip/avatar/32/", + }, + }, + } + + assertSchema('bitbucket_customer_example_tag', expected, bb_webhook) + + +def test_bitbucket_commit(): + ref = 'refs/heads/somebranch' + default_branch = 'somebranch' + repository_name = 'foo/bar' + + def lookup_author(_): + return { + 'user': { + 'display_name': 'cooluser', + 'avatar': 'http://some/avatar/url' + } + } + + expected = { + "commit": u"abdeaf1b2b4a6b9ddf742c1e1754236380435a62", + "ref": u"refs/heads/somebranch", + "git_url": u"git@bitbucket.org:foo/bar.git", + "default_branch": u"somebranch", + "commit_info": { + "url": u"https://bitbucket.org/foo/bar/commits/abdeaf1b2b4a6b9ddf742c1e1754236380435a62", + "date": u"2012-07-24 00:26:36", + "message": u"making some changes\n", + "author": { + "avatar_url": u"http://some/avatar/url", + "username": u"cooluser", + } + } + } + + assertSchema('bitbucket_commit', expected, bb_commit, ref, default_branch, + repository_name, lookup_author) + +def test_bitbucket_webhook_payload(): + expected = { + "commit": u"af64ae7188685f8424040b4735ad12941b980d75", + "ref": u"refs/heads/master", + "git_url": u"git@bitbucket.org:jsmith/another-repo.git", + "commit_info": { + "url": u"https://bitbucket.org/jsmith/another-repo/commits/af64ae7188685f8424040b4735ad12941b980d75", + "date": u"2015-09-10T20:40:54+00:00", + "message": u"Dockerfile edited online with Bitbucket", + "author": { + "username": u"John Smith", + "avatar_url": u"https://bitbucket.org/account/jsmith/avatar/32/", + }, + "committer": { + "username": u"John Smith", + "avatar_url": u"https://bitbucket.org/account/jsmith/avatar/32/", + }, + }, + } + + assertSchema('bitbucket_webhook', expected, bb_webhook) + + +def test_github_webhook_payload_slash_branch(): + expected = { + 'commit': u'410f4cdf8ff09b87f245b13845e8497f90b90a4c', + 'ref': u'refs/heads/slash/branch', + 'default_branch': u'master', + 'git_url': u'git@github.com:jsmith/anothertest.git', + 'commit_info': { + 'url': u'https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c', + 'date': u'2015-09-11T14:26:16-04:00', + 'message': u'Update Dockerfile', + 'committer': { + 'username': u'jsmith', + }, + 'author': { + 'username': u'jsmith', + }, + }, + } + + assertSchema('github_webhook_slash_branch', expected, gh_webhook) + + +def test_github_webhook_payload(): + expected = { + 'commit': u'410f4cdf8ff09b87f245b13845e8497f90b90a4c', + 'ref': u'refs/heads/master', + 'default_branch': u'master', + 'git_url': u'git@github.com:jsmith/anothertest.git', + 'commit_info': { + 'url': u'https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c', + 'date': u'2015-09-11T14:26:16-04:00', + 'message': u'Update Dockerfile', + 'committer': { + 'username': u'jsmith', + }, + 'author': { + 'username': u'jsmith', + }, + }, + } + + assertSchema('github_webhook', expected, gh_webhook) + + +def test_github_webhook_payload_with_lookup(): + expected = { + 'commit': u'410f4cdf8ff09b87f245b13845e8497f90b90a4c', + 'ref': u'refs/heads/master', + 'default_branch': u'master', + 'git_url': u'git@github.com:jsmith/anothertest.git', + 'commit_info': { + 'url': u'https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c', + 'date': u'2015-09-11T14:26:16-04:00', + 'message': u'Update Dockerfile', + 'committer': { + 'username': u'jsmith', + 'url': u'http://github.com/jsmith', + 'avatar_url': u'http://some/avatar/url', + }, + 'author': { + 'username': u'jsmith', + 'url': u'http://github.com/jsmith', + 'avatar_url': u'http://some/avatar/url', + }, + }, + } + + def lookup_user(_): + return { + 'html_url': 'http://github.com/jsmith', + 'avatar_url': 'http://some/avatar/url' + } + + assertSchema('github_webhook', expected, gh_webhook, lookup_user=lookup_user) + + +def test_github_webhook_payload_missing_fields_with_lookup(): + expected = { + 'commit': u'410f4cdf8ff09b87f245b13845e8497f90b90a4c', + 'ref': u'refs/heads/master', + 'default_branch': u'master', + 'git_url': u'git@github.com:jsmith/anothertest.git', + 'commit_info': { + 'url': u'https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c', + 'date': u'2015-09-11T14:26:16-04:00', + 'message': u'Update Dockerfile' + }, + } + + def lookup_user(username): + if not username: + raise Exception('Fail!') + + return { + 'html_url': 'http://github.com/jsmith', + 'avatar_url': 'http://some/avatar/url' + } + + assertSchema('github_webhook_missing', expected, gh_webhook, lookup_user=lookup_user) + + +def test_gitlab_webhook_payload(): + expected = { + 'commit': u'fb88379ee45de28a0a4590fddcbd8eff8b36026e', + 'ref': u'refs/heads/master', + 'git_url': u'git@gitlab.com:jsmith/somerepo.git', + 'commit_info': { + 'url': u'https://gitlab.com/jsmith/somerepo/commit/fb88379ee45de28a0a4590fddcbd8eff8b36026e', + 'date': u'2015-08-13T19:33:18+00:00', + 'message': u'Fix link\n', + }, + } + + assertSchema('gitlab_webhook', expected, gl_webhook) + + +def test_github_webhook_payload_known_issue(): + expected = { + "commit": "118b07121695d9f2e40a5ff264fdcc2917680870", + "ref": "refs/heads/master", + "default_branch": "master", + "git_url": "git@github.com:jsmith/docker-test.git", + "commit_info": { + "url": "https://github.com/jsmith/docker-test/commit/118b07121695d9f2e40a5ff264fdcc2917680870", + "date": "2015-09-25T14:55:11-04:00", + "message": "Fail", + }, + } + + assertSchema('github_webhook_noname', expected, gh_webhook) + + +def test_github_webhook_payload_missing_fields(): + expected = { + 'commit': u'410f4cdf8ff09b87f245b13845e8497f90b90a4c', + 'ref': u'refs/heads/master', + 'default_branch': u'master', + 'git_url': u'git@github.com:jsmith/anothertest.git', + 'commit_info': { + 'url': u'https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c', + 'date': u'2015-09-11T14:26:16-04:00', + 'message': u'Update Dockerfile' + }, + } + + assertSchema('github_webhook_missing', expected, gh_webhook) + + +def test_gitlab_webhook_nocommit_payload(): + assertSkipped('gitlab_webhook_nocommit', gl_webhook) + + +def test_gitlab_webhook_multiple_commits(): + expected = { + 'commit': u'9a052a0b2fbe01d4a1a88638dd9fe31c1c56ef53', + 'ref': u'refs/heads/master', + 'git_url': u'git@gitlab.com:jsmith/some-test-project.git', + 'commit_info': { + 'url': u'https://gitlab.com/jsmith/some-test-project/commit/9a052a0b2fbe01d4a1a88638dd9fe31c1c56ef53', + 'date': u'2016-09-29T15:02:41+00:00', + 'message': u"Merge branch 'foobar' into 'master'\r\n\r\nAdd changelog\r\n\r\nSome merge thing\r\n\r\nSee merge request !1", + 'author': { + 'username': 'jsmith', + 'url': 'http://gitlab.com/jsmith', + 'avatar_url': 'http://some/avatar/url' + }, + }, + } + + def lookup_user(_): + return { + 'username': 'jsmith', + 'html_url': 'http://gitlab.com/jsmith', + 'avatar_url': 'http://some/avatar/url', + } + + assertSchema('gitlab_webhook_multicommit', expected, gl_webhook, lookup_user=lookup_user) + + +def test_gitlab_webhook_for_tag(): + expected = { + 'commit': u'82b3d5ae55f7080f1e6022629cdb57bfae7cccc7', + 'commit_info': { + 'author': { + 'avatar_url': 'http://some/avatar/url', + 'url': 'http://gitlab.com/jsmith', + 'username': 'jsmith' + }, + 'date': '2015-08-13T19:33:18+00:00', + 'message': 'Fix link\n', + 'url': 'https://some/url', + }, + 'git_url': u'git@example.com:jsmith/example.git', + 'ref': u'refs/tags/v1.0.0', + } + + def lookup_user(_): + return { + 'username': 'jsmith', + 'html_url': 'http://gitlab.com/jsmith', + 'avatar_url': 'http://some/avatar/url', + } + + def lookup_commit(repo_id, commit_sha): + if commit_sha == '82b3d5ae55f7080f1e6022629cdb57bfae7cccc7': + return { + "id": "82b3d5ae55f7080f1e6022629cdb57bfae7cccc7", + "message": "Fix link\n", + "timestamp": "2015-08-13T19:33:18+00:00", + "url": "https://some/url", + "author_name": "Foo Guy", + "author_email": "foo@bar.com", + } + + return None + + assertSchema('gitlab_webhook_tag', expected, gl_webhook, lookup_user=lookup_user, + lookup_commit=lookup_commit) + + +def test_gitlab_webhook_for_tag_nocommit(): + assertSkipped('gitlab_webhook_tag', gl_webhook) + + +def test_gitlab_webhook_for_tag_commit_sha_null(): + assertSkipped('gitlab_webhook_tag_commit_sha_null', gl_webhook) + + +def test_gitlab_webhook_for_tag_known_issue(): + expected = { + 'commit': u'770830e7ca132856991e6db4f7fc0f4dbe20bd5f', + 'ref': u'refs/tags/thirdtag', + 'git_url': u'git@gitlab.com:someuser/some-test-project.git', + 'commit_info': { + 'url': u'https://gitlab.com/someuser/some-test-project/commit/770830e7ca132856991e6db4f7fc0f4dbe20bd5f', + 'date': u'2019-10-17T18:07:48Z', + 'message': u'Update Dockerfile', + 'author': { + 'username': 'someuser', + 'url': 'http://gitlab.com/someuser', + 'avatar_url': 'http://some/avatar/url', + }, + }, + } + + def lookup_user(_): + return { + 'username': 'someuser', + 'html_url': 'http://gitlab.com/someuser', + 'avatar_url': 'http://some/avatar/url', + } + + assertSchema('gitlab_webhook_tag_commit_issue', expected, gl_webhook, lookup_user=lookup_user) + + +def test_gitlab_webhook_payload_known_issue(): + expected = { + 'commit': u'770830e7ca132856991e6db4f7fc0f4dbe20bd5f', + 'ref': u'refs/tags/fourthtag', + 'git_url': u'git@gitlab.com:someuser/some-test-project.git', + 'commit_info': { + 'url': u'https://gitlab.com/someuser/some-test-project/commit/770830e7ca132856991e6db4f7fc0f4dbe20bd5f', + 'date': u'2019-10-17T18:07:48Z', + 'message': u'Update Dockerfile', + }, + } + + def lookup_commit(repo_id, commit_sha): + if commit_sha == '770830e7ca132856991e6db4f7fc0f4dbe20bd5f': + return { + "added": [], + "author": { + "name": "Some User", + "email": "someuser@somedomain.com" + }, + "url": "https://gitlab.com/someuser/some-test-project/commit/770830e7ca132856991e6db4f7fc0f4dbe20bd5f", + "message": "Update Dockerfile", + "removed": [], + "modified": [ + "Dockerfile" + ], + "id": "770830e7ca132856991e6db4f7fc0f4dbe20bd5f" + } + + return None + + assertSchema('gitlab_webhook_known_issue', expected, gl_webhook, lookup_commit=lookup_commit) + + +def test_gitlab_webhook_for_other(): + assertSkipped('gitlab_webhook_other', gl_webhook) + + +def test_gitlab_webhook_payload_with_lookup(): + expected = { + 'commit': u'fb88379ee45de28a0a4590fddcbd8eff8b36026e', + 'ref': u'refs/heads/master', + 'git_url': u'git@gitlab.com:jsmith/somerepo.git', + 'commit_info': { + 'url': u'https://gitlab.com/jsmith/somerepo/commit/fb88379ee45de28a0a4590fddcbd8eff8b36026e', + 'date': u'2015-08-13T19:33:18+00:00', + 'message': u'Fix link\n', + 'author': { + 'username': 'jsmith', + 'url': 'http://gitlab.com/jsmith', + 'avatar_url': 'http://some/avatar/url', + }, + }, + } + + def lookup_user(_): + return { + 'username': 'jsmith', + 'html_url': 'http://gitlab.com/jsmith', + 'avatar_url': 'http://some/avatar/url', + } + + assertSchema('gitlab_webhook', expected, gl_webhook, lookup_user=lookup_user) + + +def test_github_webhook_payload_deleted_commit(): + expected = { + 'commit': u'456806b662cb903a0febbaed8344f3ed42f27bab', + 'commit_info': { + 'author': { + 'username': u'jsmith' + }, + 'committer': { + 'username': u'jsmith' + }, + 'date': u'2015-12-08T18:07:03-05:00', + 'message': (u'Merge pull request #1044 from jsmith/errerror\n\n' + + 'Assign the exception to a variable to log it'), + 'url': u'https://github.com/jsmith/somerepo/commit/456806b662cb903a0febbaed8344f3ed42f27bab' + }, + 'git_url': u'git@github.com:jsmith/somerepo.git', + 'ref': u'refs/heads/master', + 'default_branch': u'master', + } + + def lookup_user(_): + return None + + assertSchema('github_webhook_deletedcommit', expected, gh_webhook, lookup_user=lookup_user) + + +def test_github_webhook_known_issue(): + def lookup_user(_): + return None + + assertSkipped('github_webhook_knownissue', gh_webhook, lookup_user=lookup_user) + + +def test_bitbucket_webhook_known_issue(): + assertSkipped('bitbucket_knownissue', bb_webhook) diff --git a/buildtrigger/test/test_triggerutil.py b/buildtrigger/test/test_triggerutil.py new file mode 100644 index 000000000..15f1bec10 --- /dev/null +++ b/buildtrigger/test/test_triggerutil.py @@ -0,0 +1,25 @@ +import re + +import pytest + +from buildtrigger.triggerutil import matches_ref + +@pytest.mark.parametrize('ref, filt, matches', [ + ('ref/heads/master', '.+', True), + ('ref/heads/master', 'heads/.+', True), + ('ref/heads/master', 'heads/master', True), + ('ref/heads/slash/branch', 'heads/slash/branch', True), + ('ref/heads/slash/branch', 'heads/.+', True), + + ('ref/heads/foobar', 'heads/master', False), + ('ref/heads/master', 'tags/master', False), + + ('ref/heads/master', '(((heads/alpha)|(heads/beta))|(heads/gamma))|(heads/master)', True), + ('ref/heads/alpha', '(((heads/alpha)|(heads/beta))|(heads/gamma))|(heads/master)', True), + ('ref/heads/beta', '(((heads/alpha)|(heads/beta))|(heads/gamma))|(heads/master)', True), + ('ref/heads/gamma', '(((heads/alpha)|(heads/beta))|(heads/gamma))|(heads/master)', True), + + ('ref/heads/delta', '(((heads/alpha)|(heads/beta))|(heads/gamma))|(heads/master)', False), +]) +def test_matches_ref(ref, filt, matches): + assert matches_ref(ref, re.compile(filt)) == matches diff --git a/test/triggerjson/bitbucket_commit.json b/buildtrigger/test/triggerjson/bitbucket_commit.json similarity index 81% rename from test/triggerjson/bitbucket_commit.json rename to buildtrigger/test/triggerjson/bitbucket_commit.json index 1cdd5bf65..6307001d4 100644 --- a/test/triggerjson/bitbucket_commit.json +++ b/buildtrigger/test/triggerjson/bitbucket_commit.json @@ -9,9 +9,9 @@ "file": "Readme" } ], - "raw_author": "Mary Anthony ", + "raw_author": "Mark Anthony ", "utctimestamp": "2012-07-23 22:26:36+00:00", - "author": "Mary Anthony", + "author": "Mark Anthony", "timestamp": "2012-07-24 00:26:36", "node": "abdeaf1b2b4a6b9ddf742c1e1754236380435a62", "parents": [ diff --git a/test/triggerjson/bitbucket_customer_example_noauthor.json b/buildtrigger/test/triggerjson/bitbucket_customer_example_noauthor.json similarity index 55% rename from test/triggerjson/bitbucket_customer_example_noauthor.json rename to buildtrigger/test/triggerjson/bitbucket_customer_example_noauthor.json index 7ebc9296d..b7560359d 100644 --- a/test/triggerjson/bitbucket_customer_example_noauthor.json +++ b/buildtrigger/test/triggerjson/bitbucket_customer_example_noauthor.json @@ -1,55 +1,43 @@ { "actor": { - "username": "LightSide_CodeShip", + "account_id": "SomeCoolLabs_CodeShip", "links": { "self": { - "href": "https://api.bitbucket.org/2.0/users/LightSide_CodeShip" + "href": "https://api.bitbucket.org/2.0/users/SomeCoolLabs_CodeShip" }, "avatar": { - "href": "https://bitbucket.org/account/LightSide_CodeShip/avatar/32/" - }, - "html": { - "href": "https://bitbucket.org/LightSide_CodeShip/" + "href": "https://bitbucket.org/account/SomeCoolLabs_CodeShip/avatar/32/" } }, - "uuid": "{d009ab20-b8b8-4840-9491-bfe72fbf666e}", "type": "user", "display_name": "CodeShip Tagging" }, "repository": { - "full_name": "lightsidelabs/svc-identity", + "full_name": "somecoollabs/svc-identity", "name": "svc-identity", "scm": "git", "type": "repository", "links": { "self": { - "href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity" + "href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity" }, "avatar": { - "href": "https://bitbucket.org/lightsidelabs/svc-identity/avatar/16/" - }, - "html": { - "href": "https://bitbucket.org/lightsidelabs/svc-identity" + "href": "https://bitbucket.org/somecoollabs/svc-identity/avatar/16/" } }, "is_private": true, - "uuid": "{3400bed9-5cde-45b9-8d86-c1dac5d5e610}", "owner": { - "username": "lightsidelabs", + "account_id": "somecoollabs", "links": { "self": { - "href": "https://api.bitbucket.org/2.0/teams/lightsidelabs" + "href": "https://api.bitbucket.org/2.0/teams/somecoollabs" }, "avatar": { - "href": "https://bitbucket.org/account/lightsidelabs/avatar/32/" - }, - "html": { - "href": "https://bitbucket.org/lightsidelabs/" + "href": "https://bitbucket.org/account/somecoollabs/avatar/32/" } }, - "uuid": "{456c5f28-7338-4d89-9506-c7b889ba2d11}", "type": "team", - "display_name": "LightSIDE Labs" + "display_name": "Some Cool Labs" } }, "push": { @@ -60,14 +48,14 @@ "hash": "a0ec139843b2bb281ab21a433266ddc498e605dc", "links": { "self": { - "href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commit/a0ec139843b2bb281ab21a433266ddc498e605dc" + "href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commit/a0ec139843b2bb281ab21a433266ddc498e605dc" }, "html": { - "href": "https://bitbucket.org/lightsidelabs/svc-identity/commits/a0ec139843b2bb281ab21a433266ddc498e605dc" + "href": "https://bitbucket.org/somecoollabs/svc-identity/commits/a0ec139843b2bb281ab21a433266ddc498e605dc" } }, "author": { - "raw": "scripts/autotag_version.py " + "raw": "scripts/autotag_version.py " }, "type": "commit", "message": "Update version.py to 0.1.2 [skip ci]\n\n(by utilitybelt/scripts/autotag_version.py)\n" @@ -82,10 +70,10 @@ "hash": "bd749165b0c50c65c15fc4df526b8e9df26eff10", "links": { "self": { - "href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commit/bd749165b0c50c65c15fc4df526b8e9df26eff10" + "href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commit/bd749165b0c50c65c15fc4df526b8e9df26eff10" }, "html": { - "href": "https://bitbucket.org/lightsidelabs/svc-identity/commits/bd749165b0c50c65c15fc4df526b8e9df26eff10" + "href": "https://bitbucket.org/somecoollabs/svc-identity/commits/bd749165b0c50c65c15fc4df526b8e9df26eff10" } }, "type": "commit" @@ -94,10 +82,10 @@ "hash": "910b5624b74190dfaa51938d851563a4c5254926", "links": { "self": { - "href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commit/910b5624b74190dfaa51938d851563a4c5254926" + "href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commit/910b5624b74190dfaa51938d851563a4c5254926" }, "html": { - "href": "https://bitbucket.org/lightsidelabs/svc-identity/commits/910b5624b74190dfaa51938d851563a4c5254926" + "href": "https://bitbucket.org/somecoollabs/svc-identity/commits/910b5624b74190dfaa51938d851563a4c5254926" } }, "type": "commit" @@ -109,42 +97,38 @@ "hash": "263736ecc250113fad56a93f83b712093554ad42", "links": { "self": { - "href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commit/263736ecc250113fad56a93f83b712093554ad42" + "href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commit/263736ecc250113fad56a93f83b712093554ad42" }, "html": { - "href": "https://bitbucket.org/lightsidelabs/svc-identity/commits/263736ecc250113fad56a93f83b712093554ad42" + "href": "https://bitbucket.org/somecoollabs/svc-identity/commits/263736ecc250113fad56a93f83b712093554ad42" } }, "author": { - "raw": "Chris Winters ", + "raw": "John Smith ", "user": { - "username": "cwinters", + "account_id": "jsmith", "links": { "self": { - "href": "https://api.bitbucket.org/2.0/users/cwinters" + "href": "https://api.bitbucket.org/2.0/users/jsmith" }, "avatar": { - "href": "https://bitbucket.org/account/cwinters/avatar/32/" - }, - "html": { - "href": "https://bitbucket.org/cwinters/" + "href": "https://bitbucket.org/account/jsmith/avatar/32/" } }, - "uuid": "{a6209615-6d75-4294-8181-dbf96d40fc6b}", "type": "user", - "display_name": "Chris Winters" + "display_name": "John Smith" } } }, "links": { "self": { - "href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/refs/branches/master" + "href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/refs/branches/master" }, "commits": { - "href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commits/master" + "href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commits/master" }, "html": { - "href": "https://bitbucket.org/lightsidelabs/svc-identity/branch/master" + "href": "https://bitbucket.org/somecoollabs/svc-identity/branch/master" } }, "name": "master", @@ -152,13 +136,13 @@ }, "links": { "diff": { - "href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/diff/a0ec139843b2bb281ab21a433266ddc498e605dc..263736ecc250113fad56a93f83b712093554ad42" + "href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/diff/a0ec139843b2bb281ab21a433266ddc498e605dc..263736ecc250113fad56a93f83b712093554ad42" }, "commits": { - "href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commits?include=a0ec139843b2bb281ab21a433266ddc498e605dc&exclude=263736ecc250113fad56a93f83b712093554ad42" + "href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commits?include=a0ec139843b2bb281ab21a433266ddc498e605dc&exclude=263736ecc250113fad56a93f83b712093554ad42" }, "html": { - "href": "https://bitbucket.org/lightsidelabs/svc-identity/branches/compare/a0ec139843b2bb281ab21a433266ddc498e605dc..263736ecc250113fad56a93f83b712093554ad42" + "href": "https://bitbucket.org/somecoollabs/svc-identity/branches/compare/a0ec139843b2bb281ab21a433266ddc498e605dc..263736ecc250113fad56a93f83b712093554ad42" } }, "new": { @@ -168,10 +152,10 @@ "hash": "263736ecc250113fad56a93f83b712093554ad42", "links": { "self": { - "href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commit/263736ecc250113fad56a93f83b712093554ad42" + "href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commit/263736ecc250113fad56a93f83b712093554ad42" }, "html": { - "href": "https://bitbucket.org/lightsidelabs/svc-identity/commits/263736ecc250113fad56a93f83b712093554ad42" + "href": "https://bitbucket.org/somecoollabs/svc-identity/commits/263736ecc250113fad56a93f83b712093554ad42" } }, "type": "commit" @@ -183,25 +167,25 @@ "hash": "a0ec139843b2bb281ab21a433266ddc498e605dc", "links": { "self": { - "href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commit/a0ec139843b2bb281ab21a433266ddc498e605dc" + "href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commit/a0ec139843b2bb281ab21a433266ddc498e605dc" }, "html": { - "href": "https://bitbucket.org/lightsidelabs/svc-identity/commits/a0ec139843b2bb281ab21a433266ddc498e605dc" + "href": "https://bitbucket.org/somecoollabs/svc-identity/commits/a0ec139843b2bb281ab21a433266ddc498e605dc" } }, "author": { - "raw": "scripts/autotag_version.py " + "raw": "scripts/autotag_version.py " } }, "links": { "self": { - "href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/refs/branches/master" + "href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/refs/branches/master" }, "commits": { - "href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commits/master" + "href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commits/master" }, "html": { - "href": "https://bitbucket.org/lightsidelabs/svc-identity/branch/master" + "href": "https://bitbucket.org/somecoollabs/svc-identity/branch/master" } }, "name": "master", diff --git a/test/triggerjson/bitbucket_customer_example_tag.json b/buildtrigger/test/triggerjson/bitbucket_customer_example_tag.json similarity index 53% rename from test/triggerjson/bitbucket_customer_example_tag.json rename to buildtrigger/test/triggerjson/bitbucket_customer_example_tag.json index 3b3246ded..40c9b0ef6 100644 --- a/test/triggerjson/bitbucket_customer_example_tag.json +++ b/buildtrigger/test/triggerjson/bitbucket_customer_example_tag.json @@ -4,7 +4,7 @@ { "links": { "commits": { - "href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commits?include=a0ec139843b2bb281ab21a433266ddc498e605dc" + "href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commits?include=a0ec139843b2bb281ab21a433266ddc498e605dc" } }, "closed": false, @@ -13,10 +13,10 @@ "date": "2015-09-25T00:55:08+00:00", "links": { "html": { - "href": "https://bitbucket.org/lightsidelabs/svc-identity/commits/a0ec139843b2bb281ab21a433266ddc498e605dc" + "href": "https://bitbucket.org/somecoollabs/svc-identity/commits/a0ec139843b2bb281ab21a433266ddc498e605dc" }, "self": { - "href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commit/a0ec139843b2bb281ab21a433266ddc498e605dc" + "href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commit/a0ec139843b2bb281ab21a433266ddc498e605dc" } }, "message": "Update version.py to 0.1.2 [skip ci]\n\n(by utilitybelt/scripts/autotag_version.py)\n", @@ -25,10 +25,10 @@ { "links": { "html": { - "href": "https://bitbucket.org/lightsidelabs/svc-identity/commits/263736ecc250113fad56a93f83b712093554ad42" + "href": "https://bitbucket.org/somecoollabs/svc-identity/commits/263736ecc250113fad56a93f83b712093554ad42" }, "self": { - "href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commit/263736ecc250113fad56a93f83b712093554ad42" + "href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commit/263736ecc250113fad56a93f83b712093554ad42" } }, "hash": "263736ecc250113fad56a93f83b712093554ad42", @@ -37,19 +37,19 @@ ], "hash": "a0ec139843b2bb281ab21a433266ddc498e605dc", "author": { - "raw": "scripts/autotag_version.py " + "raw": "scripts/autotag_version.py " } }, "name": "0.1.2", "links": { "html": { - "href": "https://bitbucket.org/lightsidelabs/svc-identity/commits/tag/0.1.2" + "href": "https://bitbucket.org/somecoollabs/svc-identity/commits/tag/0.1.2" }, "self": { - "href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/refs/tags/0.1.2" + "href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/refs/tags/0.1.2" }, "commits": { - "href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity/commits/0.1.2" + "href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity/commits/0.1.2" } }, "type": "tag" @@ -65,53 +65,44 @@ "name": "svc-identity", "links": { "html": { - "href": "https://bitbucket.org/lightsidelabs/svc-identity" + "href": "https://bitbucket.org/somecoollabs/svc-identity" }, "self": { - "href": "https://api.bitbucket.org/2.0/repositories/lightsidelabs/svc-identity" + "href": "https://api.bitbucket.org/2.0/repositories/somecoollabs/svc-identity" }, "avatar": { - "href": "https://bitbucket.org/lightsidelabs/svc-identity/avatar/16/" + "href": "https://bitbucket.org/somecoollabs/svc-identity/avatar/16/" } }, "is_private": true, "type": "repository", "scm": "git", "owner": { - "username": "lightsidelabs", + "account_id": "somecoollabs", "links": { - "html": { - "href": "https://bitbucket.org/lightsidelabs/" - }, "self": { - "href": "https://api.bitbucket.org/2.0/teams/lightsidelabs" + "href": "https://api.bitbucket.org/2.0/teams/somecoollabs" }, "avatar": { - "href": "https://bitbucket.org/account/lightsidelabs/avatar/32/" + "href": "https://bitbucket.org/account/somecoollabs/avatar/32/" } }, - "display_name": "LightSIDE Labs", - "uuid": "{456c5f28-7338-4d89-9506-c7b889ba2d11}", + "display_name": "Some Cool Labs", "type": "team" }, - "full_name": "lightsidelabs/svc-identity", - "uuid": "{3400bed9-5cde-45b9-8d86-c1dac5d5e610}" + "full_name": "somecoollabs/svc-identity" }, "actor": { - "username": "LightSide_CodeShip", + "account_id": "SomeCoolLabs_CodeShip", "links": { - "html": { - "href": "https://bitbucket.org/LightSide_CodeShip/" - }, "self": { - "href": "https://api.bitbucket.org/2.0/users/LightSide_CodeShip" + "href": "https://api.bitbucket.org/2.0/users/SomeCoolLabs_CodeShip" }, "avatar": { - "href": "https://bitbucket.org/account/LightSide_CodeShip/avatar/32/" + "href": "https://bitbucket.org/account/SomeCoolLabs_CodeShip/avatar/32/" } }, "display_name": "CodeShip Tagging", - "uuid": "{d009ab20-b8b8-4840-9491-bfe72fbf666e}", "type": "user" } } \ No newline at end of file diff --git a/buildtrigger/test/triggerjson/bitbucket_knownissue.json b/buildtrigger/test/triggerjson/bitbucket_knownissue.json new file mode 100644 index 000000000..f60e8013d --- /dev/null +++ b/buildtrigger/test/triggerjson/bitbucket_knownissue.json @@ -0,0 +1,68 @@ +{ + "push": { + "changes": [ + + ] + }, + "actor": { + "account_id": "jsmith", + "display_name": "John Smith", + "type": "user", + "links": { + "self": { + "href": "https:\/\/api.bitbucket.org\/2.0\/users\/jsmith" + }, + "avatar": { + "href": "https:\/\/bitbucket.org\/account\/jsmith\/avatar\/32\/" + } + } + }, + "repository": { + "website": "", + "scm": "git", + "name": "slip-api", + "links": { + "self": { + "href": "https:\/\/api.bitbucket.org\/2.0\/repositories\/goldcuff\/slip-api" + }, + "html": { + "href": "https:\/\/bitbucket.org\/goldcuff\/slip-api" + }, + "avatar": { + "href": "https:\/\/bitbucket.org\/goldcuff\/slip-api\/avatar\/32\/" + } + }, + "project": { + "links": { + "self": { + "href": "https:\/\/api.bitbucket.org\/2.0\/teams\/goldcuff\/projects\/SLIP" + }, + "html": { + "href": "https:\/\/bitbucket.org\/account\/user\/goldcuff\/projects\/SLIP" + }, + "avatar": { + "href": "https:\/\/bitbucket.org\/account\/user\/goldcuff\/projects\/SLIP\/avatar\/32" + } + }, + "type": "project", + "name": "SLIP", + "key": "SLIP" + }, + "full_name": "goldcuff\/slip-api", + "owner": { + "account_id": "goldcuff", + "display_name": "Goldcuff", + "type": "team", + "links": { + "self": { + "href": "https:\/\/api.bitbucket.org\/2.0\/teams\/goldcuff" + }, + "avatar": { + "href": "https:\/\/bitbucket.org\/account\/goldcuff\/avatar\/32\/" + } + } + }, + "type": "repository", + "is_private": true + } +} \ No newline at end of file diff --git a/test/triggerjson/bitbucket_webhook.json b/buildtrigger/test/triggerjson/bitbucket_webhook.json similarity index 54% rename from test/triggerjson/bitbucket_webhook.json rename to buildtrigger/test/triggerjson/bitbucket_webhook.json index 9567e0f92..18006eeb7 100644 --- a/test/triggerjson/bitbucket_webhook.json +++ b/buildtrigger/test/triggerjson/bitbucket_webhook.json @@ -4,67 +4,66 @@ { "links": { "commits": { - "href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo/commits?include=af64ae7188685f8424040b4735ad12941b980d75&exclude=1784139225279a587e0afb151bed1f9ba3dd509e" + "href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo/commits?include=af64ae7188685f8424040b4735ad12941b980d75&exclude=1784139225279a587e0afb151bed1f9ba3dd509e" }, "diff": { - "href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo/diff/af64ae7188685f8424040b4735ad12941b980d75..1784139225279a587e0afb151bed1f9ba3dd509e" + "href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo/diff/af64ae7188685f8424040b4735ad12941b980d75..1784139225279a587e0afb151bed1f9ba3dd509e" }, "html": { - "href": "https://bitbucket.org/jscoreos/another-repo/branches/compare/af64ae7188685f8424040b4735ad12941b980d75..1784139225279a587e0afb151bed1f9ba3dd509e" + "href": "https://bitbucket.org/jsmith/another-repo/branches/compare/af64ae7188685f8424040b4735ad12941b980d75..1784139225279a587e0afb151bed1f9ba3dd509e" } }, "old": { "name": "master", "links": { "commits": { - "href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo/commits/master" + "href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo/commits/master" }, "html": { - "href": "https://bitbucket.org/jscoreos/another-repo/branch/master" + "href": "https://bitbucket.org/jsmith/another-repo/branch/master" }, "self": { - "href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo/refs/branches/master" + "href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo/refs/branches/master" } }, "type": "branch", "target": { "links": { "html": { - "href": "https://bitbucket.org/jscoreos/another-repo/commits/1784139225279a587e0afb151bed1f9ba3dd509e" + "href": "https://bitbucket.org/jsmith/another-repo/commits/1784139225279a587e0afb151bed1f9ba3dd509e" }, "self": { - "href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo/commit/1784139225279a587e0afb151bed1f9ba3dd509e" + "href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo/commit/1784139225279a587e0afb151bed1f9ba3dd509e" } }, "author": { "user": { "links": { "avatar": { - "href": "https://bitbucket.org/account/jscoreos/avatar/32/" + "href": "https://bitbucket.org/account/jsmith/avatar/32/" }, "html": { - "href": "https://bitbucket.org/jscoreos/" + "href": "https://bitbucket.org/jsmith/" }, "self": { - "href": "https://api.bitbucket.org/2.0/users/jscoreos" + "href": "https://api.bitbucket.org/2.0/users/jsmith" } }, - "uuid": "{2fa27577-f361-45bb-999a-f4450c546b73}", "type": "user", - "display_name": "Joseph Schorr", - "username": "jscoreos" + "display_name": "John Smith", + "account_id": "jsmith" }, - "raw": "Joseph Schorr " + "raw": "John Smith " }, "date": "2015-09-10T20:37:54+00:00", "parents": [ { "links": { "html": { - "href": "https://bitbucket.org/jscoreos/another-repo/commits/5329daa0961ec968de9ef36f30024bfa0da73103" + "href": "https://bitbucket.org/jsmith/another-repo/commits/5329daa0961ec968de9ef36f30024bfa0da73103" }, "self": { - "href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo/commit/5329daa0961ec968de9ef36f30024bfa0da73103" + "href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo/commit/5329daa0961ec968de9ef36f30024bfa0da73103" } }, "type": "commit", @@ -84,28 +83,24 @@ "user": { "links": { "avatar": { - "href": "https://bitbucket.org/account/jscoreos/avatar/32/" - }, - "html": { - "href": "https://bitbucket.org/jscoreos/" + "href": "https://bitbucket.org/account/jsmith/avatar/32/" }, "self": { - "href": "https://api.bitbucket.org/2.0/users/jscoreos" + "href": "https://api.bitbucket.org/2.0/users/jsmith" } }, - "uuid": "{2fa27577-f361-45bb-999a-f4450c546b73}", "type": "user", - "display_name": "Joseph Schorr", - "username": "jscoreos" + "display_name": "John Smith", + "account_id": "jsmith" }, - "raw": "Joseph Schorr " + "raw": "John Smith " }, "links": { "html": { - "href": "https://bitbucket.org/jscoreos/another-repo/commits/af64ae7188685f8424040b4735ad12941b980d75" + "href": "https://bitbucket.org/jsmith/another-repo/commits/af64ae7188685f8424040b4735ad12941b980d75" }, "self": { - "href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo/commit/af64ae7188685f8424040b4735ad12941b980d75" + "href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo/commit/af64ae7188685f8424040b4735ad12941b980d75" } }, "message": "Dockerfile edited online with Bitbucket", @@ -117,54 +112,50 @@ "name": "master", "links": { "commits": { - "href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo/commits/master" + "href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo/commits/master" }, "html": { - "href": "https://bitbucket.org/jscoreos/another-repo/branch/master" + "href": "https://bitbucket.org/jsmith/another-repo/branch/master" }, "self": { - "href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo/refs/branches/master" + "href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo/refs/branches/master" } }, "type": "branch", "target": { "links": { "html": { - "href": "https://bitbucket.org/jscoreos/another-repo/commits/af64ae7188685f8424040b4735ad12941b980d75" + "href": "https://bitbucket.org/jsmith/another-repo/commits/af64ae7188685f8424040b4735ad12941b980d75" }, "self": { - "href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo/commit/af64ae7188685f8424040b4735ad12941b980d75" + "href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo/commit/af64ae7188685f8424040b4735ad12941b980d75" } }, "author": { "user": { "links": { "avatar": { - "href": "https://bitbucket.org/account/jscoreos/avatar/32/" - }, - "html": { - "href": "https://bitbucket.org/jscoreos/" + "href": "https://bitbucket.org/account/jsmith/avatar/32/" }, "self": { - "href": "https://api.bitbucket.org/2.0/users/jscoreos" + "href": "https://api.bitbucket.org/2.0/users/jsmith" } }, - "uuid": "{2fa27577-f361-45bb-999a-f4450c546b73}", "type": "user", - "display_name": "Joseph Schorr", - "username": "jscoreos" + "display_name": "John Smith", + "account_id": "jsmith" }, - "raw": "Joseph Schorr " + "raw": "John Smith " }, "date": "2015-09-10T20:40:54+00:00", "parents": [ { "links": { "html": { - "href": "https://bitbucket.org/jscoreos/another-repo/commits/1784139225279a587e0afb151bed1f9ba3dd509e" + "href": "https://bitbucket.org/jsmith/another-repo/commits/1784139225279a587e0afb151bed1f9ba3dd509e" }, "self": { - "href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo/commit/1784139225279a587e0afb151bed1f9ba3dd509e" + "href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo/commit/1784139225279a587e0afb151bed1f9ba3dd509e" } }, "type": "commit", @@ -184,54 +175,45 @@ "repository": { "links": { "avatar": { - "href": "https://bitbucket.org/jscoreos/another-repo/avatar/16/" + "href": "https://bitbucket.org/jsmith/another-repo/avatar/16/" }, "html": { - "href": "https://bitbucket.org/jscoreos/another-repo" + "href": "https://bitbucket.org/jsmith/another-repo" }, "self": { - "href": "https://api.bitbucket.org/2.0/repositories/jscoreos/another-repo" + "href": "https://api.bitbucket.org/2.0/repositories/jsmith/another-repo" } }, - "full_name": "jscoreos/another-repo", - "uuid": "{b3459203-3e58-497b-8059-ad087b6b01de}", + "full_name": "jsmith/another-repo", "type": "repository", "is_private": true, "name": "Another Repo", "owner": { "links": { "avatar": { - "href": "https://bitbucket.org/account/jscoreos/avatar/32/" - }, - "html": { - "href": "https://bitbucket.org/jscoreos/" + "href": "https://bitbucket.org/account/jsmith/avatar/32/" }, "self": { - "href": "https://api.bitbucket.org/2.0/users/jscoreos" + "href": "https://api.bitbucket.org/2.0/users/jsmith" } }, - "uuid": "{2fa27577-f361-45bb-999a-f4450c546b73}", "type": "user", - "display_name": "Joseph Schorr", - "username": "jscoreos" + "display_name": "John Smith", + "account_id": "jsmith" }, "scm": "git" }, "actor": { "links": { "avatar": { - "href": "https://bitbucket.org/account/jscoreos/avatar/32/" - }, - "html": { - "href": "https://bitbucket.org/jscoreos/" + "href": "https://bitbucket.org/account/jsmith/avatar/32/" }, "self": { - "href": "https://api.bitbucket.org/2.0/users/jscoreos" + "href": "https://api.bitbucket.org/2.0/users/jsmith" } }, - "uuid": "{2fa27577-f361-45bb-999a-f4450c546b73}", "type": "user", - "display_name": "Joseph Schorr", - "username": "jscoreos" + "display_name": "John Smith", + "account_id": "jsmith" } } \ No newline at end of file diff --git a/test/triggerjson/custom_webhook.json b/buildtrigger/test/triggerjson/custom_webhook.json similarity index 100% rename from test/triggerjson/custom_webhook.json rename to buildtrigger/test/triggerjson/custom_webhook.json diff --git a/buildtrigger/test/triggerjson/github_webhook.json b/buildtrigger/test/triggerjson/github_webhook.json new file mode 100644 index 000000000..7d4dca0ed --- /dev/null +++ b/buildtrigger/test/triggerjson/github_webhook.json @@ -0,0 +1,153 @@ +{ + "ref": "refs/heads/master", + "before": "9ea43cab474709d4a61afb7e3340de1ffc405b41", + "after": "410f4cdf8ff09b87f245b13845e8497f90b90a4c", + "created": false, + "deleted": false, + "forced": false, + "base_ref": null, + "compare": "https://github.com/jsmith/anothertest/compare/9ea43cab4747...410f4cdf8ff0", + "commits": [ + { + "id": "410f4cdf8ff09b87f245b13845e8497f90b90a4c", + "distinct": true, + "message": "Update Dockerfile", + "timestamp": "2015-09-11T14:26:16-04:00", + "url": "https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c", + "author": { + "name": "jsmith", + "email": "jsmith@users.noreply.github.com", + "username": "jsmith" + }, + "committer": { + "name": "jsmith", + "email": "jsmith@users.noreply.github.com", + "username": "jsmith" + }, + "added": [], + "removed": [], + "modified": [ + "Dockerfile" + ] + } + ], + "head_commit": { + "id": "410f4cdf8ff09b87f245b13845e8497f90b90a4c", + "distinct": true, + "message": "Update Dockerfile", + "timestamp": "2015-09-11T14:26:16-04:00", + "url": "https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c", + "author": { + "name": "jsmith", + "email": "jsmith@users.noreply.github.com", + "username": "jsmith" + }, + "committer": { + "name": "jsmith", + "email": "jsmith@users.noreply.github.com", + "username": "jsmith" + }, + "added": [], + "removed": [], + "modified": [ + "Dockerfile" + ] + }, + "repository": { + "id": 1234567, + "name": "anothertest", + "full_name": "jsmith/anothertest", + "owner": { + "name": "jsmith", + "email": "jsmith@users.noreply.github.com" + }, + "private": false, + "html_url": "https://github.com/jsmith/anothertest", + "description": "", + "fork": false, + "url": "https://github.com/jsmith/anothertest", + "forks_url": "https://api.github.com/repos/jsmith/anothertest/forks", + "keys_url": "https://api.github.com/repos/jsmith/anothertest/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/jsmith/anothertest/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/jsmith/anothertest/teams", + "hooks_url": "https://api.github.com/repos/jsmith/anothertest/hooks", + "issue_events_url": "https://api.github.com/repos/jsmith/anothertest/issues/events{/number}", + "events_url": "https://api.github.com/repos/jsmith/anothertest/events", + "assignees_url": "https://api.github.com/repos/jsmith/anothertest/assignees{/user}", + "branches_url": "https://api.github.com/repos/jsmith/anothertest/branches{/branch}", + "tags_url": "https://api.github.com/repos/jsmith/anothertest/tags", + "blobs_url": "https://api.github.com/repos/jsmith/anothertest/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/jsmith/anothertest/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/jsmith/anothertest/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/jsmith/anothertest/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/jsmith/anothertest/statuses/{sha}", + "languages_url": "https://api.github.com/repos/jsmith/anothertest/languages", + "stargazers_url": "https://api.github.com/repos/jsmith/anothertest/stargazers", + "contributors_url": "https://api.github.com/repos/jsmith/anothertest/contributors", + "subscribers_url": "https://api.github.com/repos/jsmith/anothertest/subscribers", + "subscription_url": "https://api.github.com/repos/jsmith/anothertest/subscription", + "commits_url": "https://api.github.com/repos/jsmith/anothertest/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/jsmith/anothertest/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/jsmith/anothertest/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/jsmith/anothertest/issues/comments{/number}", + "contents_url": "https://api.github.com/repos/jsmith/anothertest/contents/{+path}", + "compare_url": "https://api.github.com/repos/jsmith/anothertest/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/jsmith/anothertest/merges", + "archive_url": "https://api.github.com/repos/jsmith/anothertest/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/jsmith/anothertest/downloads", + "issues_url": "https://api.github.com/repos/jsmith/anothertest/issues{/number}", + "pulls_url": "https://api.github.com/repos/jsmith/anothertest/pulls{/number}", + "milestones_url": "https://api.github.com/repos/jsmith/anothertest/milestones{/number}", + "notifications_url": "https://api.github.com/repos/jsmith/anothertest/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/jsmith/anothertest/labels{/name}", + "releases_url": "https://api.github.com/repos/jsmith/anothertest/releases{/id}", + "created_at": 1430426945, + "updated_at": "2015-04-30T20:49:05Z", + "pushed_at": 1441995976, + "git_url": "git://github.com/jsmith/anothertest.git", + "ssh_url": "git@github.com:jsmith/anothertest.git", + "clone_url": "https://github.com/jsmith/anothertest.git", + "svn_url": "https://github.com/jsmith/anothertest", + "homepage": null, + "size": 144, + "stargazers_count": 0, + "watchers_count": 0, + "language": null, + "has_issues": true, + "has_downloads": true, + "has_wiki": true, + "has_pages": false, + "forks_count": 0, + "mirror_url": null, + "open_issues_count": 0, + "forks": 0, + "open_issues": 0, + "watchers": 0, + "default_branch": "master", + "stargazers": 0, + "master_branch": "master" + }, + "pusher": { + "name": "jsmith", + "email": "jsmith@users.noreply.github.com" + }, + "sender": { + "login": "jsmith", + "id": 1234567, + "avatar_url": "https://avatars.githubusercontent.com/u/1234567?v=3", + "gravatar_id": "", + "url": "https://api.github.com/users/jsmith", + "html_url": "https://github.com/jsmith", + "followers_url": "https://api.github.com/users/jsmith/followers", + "following_url": "https://api.github.com/users/jsmith/following{/other_user}", + "gists_url": "https://api.github.com/users/jsmith/gists{/gist_id}", + "starred_url": "https://api.github.com/users/jsmith/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/jsmith/subscriptions", + "organizations_url": "https://api.github.com/users/jsmith/orgs", + "repos_url": "https://api.github.com/users/jsmith/repos", + "events_url": "https://api.github.com/users/jsmith/events{/privacy}", + "received_events_url": "https://api.github.com/users/jsmith/received_events", + "type": "User", + "site_admin": false + } +} \ No newline at end of file diff --git a/buildtrigger/test/triggerjson/github_webhook_deletedcommit.json b/buildtrigger/test/triggerjson/github_webhook_deletedcommit.json new file mode 100644 index 000000000..3d236db22 --- /dev/null +++ b/buildtrigger/test/triggerjson/github_webhook_deletedcommit.json @@ -0,0 +1,199 @@ +{ + "ref": "refs/heads/master", + "before": "c7fa613b99d509c0d4fcbf946f0415b5f024150b", + "after": "456806b662cb903a0febbaed8344f3ed42f27bab", + "created": false, + "deleted": false, + "forced": false, + "base_ref": null, + "compare": "https://github.com/jsmith/somerepo/compare/c7fa613b99d5...456806b662cb", + "commits": [ + { + "id": "e00365b225ad7f454982e9198756cc1ab5dc4428", + "distinct": true, + "message": "Assign the exception to a variable to log it", + "timestamp": "2015-12-08T18:03:48-05:00", + "url": "https://github.com/jsmith/somerepo/commit/e00365b225ad7f454982e9198756cc1ab5dc4428", + "author": { + "name": "John Smith", + "email": "j@smith.com", + "username": "jsmith" + }, + "committer": { + "name": "John Smith", + "email": "j@smith.com", + "username": "jsmith" + }, + "added": [ + + ], + "removed": [ + + ], + "modified": [ + "storage/basestorage.py" + ] + }, + { + "id": "456806b662cb903a0febbaed8344f3ed42f27bab", + "distinct": true, + "message": "Merge pull request #1044 from jsmith/errerror\n\nAssign the exception to a variable to log it", + "timestamp": "2015-12-08T18:07:03-05:00", + "url": "https://github.com/jsmith/somerepo/commit/456806b662cb903a0febbaed8344f3ed42f27bab", + "author": { + "name": "John Smith", + "email": "j@smith.com", + "username": "jsmith" + }, + "committer": { + "name": "John Smith", + "email": "j@smith.com", + "username": "jsmith" + }, + "added": [ + + ], + "removed": [ + + ], + "modified": [ + "storage/basestorage.py" + ] + } + ], + "head_commit": { + "id": "456806b662cb903a0febbaed8344f3ed42f27bab", + "distinct": true, + "message": "Merge pull request #1044 from jsmith/errerror\n\nAssign the exception to a variable to log it", + "timestamp": "2015-12-08T18:07:03-05:00", + "url": "https://github.com/jsmith/somerepo/commit/456806b662cb903a0febbaed8344f3ed42f27bab", + "author": { + "name": "John Smith", + "email": "j@smith.com", + "username": "jsmith" + }, + "committer": { + "name": "John Smith", + "email": "j@smith.com", + "username": "jsmith" + }, + "added": [ + + ], + "removed": [ + + ], + "modified": [ + "storage/basestorage.py" + ] + }, + "repository": { + "id": 12345678, + "name": "somerepo", + "full_name": "jsmith/somerepo", + "owner": { + "name": "jsmith", + "email": null + }, + "private": true, + "html_url": "https://github.com/jsmith/somerepo", + "description": "Some Cool Repo", + "fork": false, + "url": "https://github.com/jsmith/somerepo", + "forks_url": "https://api.github.com/repos/jsmith/somerepo/forks", + "keys_url": "https://api.github.com/repos/jsmith/somerepo/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/jsmith/somerepo/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/jsmith/somerepo/teams", + "hooks_url": "https://api.github.com/repos/jsmith/somerepo/hooks", + "issue_events_url": "https://api.github.com/repos/jsmith/somerepo/issues/events{/number}", + "events_url": "https://api.github.com/repos/jsmith/somerepo/events", + "assignees_url": "https://api.github.com/repos/jsmith/somerepo/assignees{/user}", + "branches_url": "https://api.github.com/repos/jsmith/somerepo/branches{/branch}", + "tags_url": "https://api.github.com/repos/jsmith/somerepo/tags", + "blobs_url": "https://api.github.com/repos/jsmith/somerepo/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/jsmith/somerepo/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/jsmith/somerepo/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/jsmith/somerepo/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/jsmith/somerepo/statuses/{sha}", + "languages_url": "https://api.github.com/repos/jsmith/somerepo/languages", + "stargazers_url": "https://api.github.com/repos/jsmith/somerepo/stargazers", + "contributors_url": "https://api.github.com/repos/jsmith/somerepo/contributors", + "subscribers_url": "https://api.github.com/repos/jsmith/somerepo/subscribers", + "subscription_url": "https://api.github.com/repos/jsmith/somerepo/subscription", + "commits_url": "https://api.github.com/repos/jsmith/somerepo/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/jsmith/somerepo/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/jsmith/somerepo/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/jsmith/somerepo/issues/comments{/number}", + "contents_url": "https://api.github.com/repos/jsmith/somerepo/contents/{+path}", + "compare_url": "https://api.github.com/repos/jsmith/somerepo/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/jsmith/somerepo/merges", + "archive_url": "https://api.github.com/repos/jsmith/somerepo/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/jsmith/somerepo/downloads", + "issues_url": "https://api.github.com/repos/jsmith/somerepo/issues{/number}", + "pulls_url": "https://api.github.com/repos/jsmith/somerepo/pulls{/number}", + "milestones_url": "https://api.github.com/repos/jsmith/somerepo/milestones{/number}", + "notifications_url": "https://api.github.com/repos/jsmith/somerepo/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/jsmith/somerepo/labels{/name}", + "releases_url": "https://api.github.com/repos/jsmith/somerepo/releases{/id}", + "created_at": 1415056063, + "updated_at": "2015-11-12T05:16:51Z", + "pushed_at": 1449616023, + "git_url": "git://github.com/jsmith/somerepo.git", + "ssh_url": "git@github.com:jsmith/somerepo.git", + "clone_url": "https://github.com/jsmith/somerepo.git", + "svn_url": "https://github.com/jsmith/somerepo", + "homepage": "", + "size": 183677, + "stargazers_count": 3, + "watchers_count": 3, + "language": "Python", + "has_issues": true, + "has_downloads": true, + "has_wiki": false, + "has_pages": false, + "forks_count": 8, + "mirror_url": null, + "open_issues_count": 188, + "forks": 8, + "open_issues": 188, + "watchers": 3, + "default_branch": "master", + "stargazers": 3, + "master_branch": "master", + "organization": "jsmith" + }, + "pusher": { + "name": "jsmith", + "email": "j@smith.com" + }, + "organization": { + "login": "jsmith", + "id": 9876543, + "url": "https://api.github.com/orgs/jsmith", + "repos_url": "https://api.github.com/orgs/jsmith/repos", + "events_url": "https://api.github.com/orgs/jsmith/events", + "members_url": "https://api.github.com/orgs/jsmith/members{/member}", + "public_members_url": "https://api.github.com/orgs/jsmith/public_members{/member}", + "avatar_url": "https://avatars.githubusercontent.com/u/5504624?v=3", + "description": null + }, + "sender": { + "login": "jsmith", + "id": 1234567, + "avatar_url": "https://avatars.githubusercontent.com/u/000000?v=3", + "gravatar_id": "", + "url": "https://api.github.com/users/jsmith", + "html_url": "https://github.com/jsmith", + "followers_url": "https://api.github.com/users/jsmith/followers", + "following_url": "https://api.github.com/users/jsmith/following{/other_user}", + "gists_url": "https://api.github.com/users/jsmith/gists{/gist_id}", + "starred_url": "https://api.github.com/users/jsmith/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/jsmith/subscriptions", + "organizations_url": "https://api.github.com/users/jsmith/orgs", + "repos_url": "https://api.github.com/users/jsmith/repos", + "events_url": "https://api.github.com/users/jsmith/events{/privacy}", + "received_events_url": "https://api.github.com/users/jsmith/received_events", + "type": "User", + "site_admin": false + } +} \ No newline at end of file diff --git a/buildtrigger/test/triggerjson/github_webhook_knownissue.json b/buildtrigger/test/triggerjson/github_webhook_knownissue.json new file mode 100644 index 000000000..b2a569ca3 --- /dev/null +++ b/buildtrigger/test/triggerjson/github_webhook_knownissue.json @@ -0,0 +1,126 @@ +{ + "ref": "refs/heads/1.2.6", + "before": "76a309ed96c72986eddffc02d2f4dda3fe689f10", + "after": "0000000000000000000000000000000000000000", + "created": false, + "deleted": true, + "forced": false, + "base_ref": null, + "compare": "https://github.com/jsmith/somerepo/compare/76a309ed96c7...000000000000", + "commits": [ + + ], + "head_commit": null, + "repository": { + "id": 12345678, + "name": "somerepo", + "full_name": "jsmith/somerepo", + "owner": { + "name": "jsmith", + "email": "j@smith.com" + }, + "private": true, + "html_url": "https://github.com/jsmith/somerepo", + "description": "Dockerfile for some repo", + "fork": false, + "url": "https://github.com/jsmith/somerepo", + "forks_url": "https://api.github.com/repos/jsmith/somerepo/forks", + "keys_url": "https://api.github.com/repos/jsmith/somerepo/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/jsmith/somerepo/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/jsmith/somerepo/teams", + "hooks_url": "https://api.github.com/repos/jsmith/somerepo/hooks", + "issue_events_url": "https://api.github.com/repos/jsmith/somerepo/issues/events{/number}", + "events_url": "https://api.github.com/repos/jsmith/somerepo/events", + "assignees_url": "https://api.github.com/repos/jsmith/somerepo/assignees{/user}", + "branches_url": "https://api.github.com/repos/jsmith/somerepo/branches{/branch}", + "tags_url": "https://api.github.com/repos/jsmith/somerepo/tags", + "blobs_url": "https://api.github.com/repos/jsmith/somerepo/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/jsmith/somerepo/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/jsmith/somerepo/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/jsmith/somerepo/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/jsmith/somerepo/statuses/{sha}", + "languages_url": "https://api.github.com/repos/jsmith/somerepo/languages", + "stargazers_url": "https://api.github.com/repos/jsmith/somerepo/stargazers", + "contributors_url": "https://api.github.com/repos/jsmith/somerepo/contributors", + "subscribers_url": "https://api.github.com/repos/jsmith/somerepo/subscribers", + "subscription_url": "https://api.github.com/repos/jsmith/somerepo/subscription", + "commits_url": "https://api.github.com/repos/jsmith/somerepo/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/jsmith/somerepo/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/jsmith/somerepo/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/jsmith/somerepo/issues/comments{/number}", + "contents_url": "https://api.github.com/repos/jsmith/somerepo/contents/{+path}", + "compare_url": "https://api.github.com/repos/jsmith/somerepo/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/jsmith/somerepo/merges", + "archive_url": "https://api.github.com/repos/jsmith/somerepo/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/jsmith/somerepo/downloads", + "issues_url": "https://api.github.com/repos/jsmith/somerepo/issues{/number}", + "pulls_url": "https://api.github.com/repos/jsmith/somerepo/pulls{/number}", + "milestones_url": "https://api.github.com/repos/jsmith/somerepo/milestones{/number}", + "notifications_url": "https://api.github.com/repos/jsmith/somerepo/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/jsmith/somerepo/labels{/name}", + "releases_url": "https://api.github.com/repos/jsmith/somerepo/releases{/id}", + "deployments_url": "https://api.github.com/repos/jsmith/somerepo/deployments", + "created_at": 1461165926, + "updated_at": "2016-11-03T18:20:01Z", + "pushed_at": 1479313569, + "git_url": "git://github.com/jsmith/somerepo.git", + "ssh_url": "git@github.com:jsmith/somerepo.git", + "clone_url": "https://github.com/jsmith/somerepo.git", + "svn_url": "https://github.com/jsmith/somerepo", + "homepage": "", + "size": 3114, + "stargazers_count": 0, + "watchers_count": 0, + "language": "Shell", + "has_issues": true, + "has_downloads": true, + "has_wiki": true, + "has_pages": false, + "forks_count": 0, + "mirror_url": null, + "open_issues_count": 0, + "forks": 0, + "open_issues": 0, + "watchers": 0, + "default_branch": "master", + "stargazers": 0, + "master_branch": "master", + "organization": "jsmith" + }, + "pusher": { + "name": "jsmith", + "email": "j@smith.com" + }, + "organization": { + "login": "jsmith", + "id": 9876543, + "url": "https://api.github.com/orgs/jsmith", + "repos_url": "https://api.github.com/orgs/jsmith/repos", + "events_url": "https://api.github.com/orgs/jsmith/events", + "hooks_url": "https://api.github.com/orgs/jsmith/hooks", + "issues_url": "https://api.github.com/orgs/jsmith/issues", + "members_url": "https://api.github.com/orgs/jsmith/members{/member}", + "public_members_url": "https://api.github.com/orgs/jsmith/public_members{/member}", + "avatar_url": "https://avatars.githubusercontent.com/u/1234567?v=3", + "description": "Open Source Projects for Linux Containers" + }, + "sender": { + "login": "jsmith", + "id": 12345678, + "avatar_url": "https://avatars.githubusercontent.com/u/1234567?v=3", + "gravatar_id": "", + "url": "https://api.github.com/users/jsmith", + "html_url": "https://github.com/jsmith", + "followers_url": "https://api.github.com/users/jsmith/followers", + "following_url": "https://api.github.com/users/jsmith/following{/other_user}", + "gists_url": "https://api.github.com/users/jsmith/gists{/gist_id}", + "starred_url": "https://api.github.com/users/jsmith/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/jsmith/subscriptions", + "organizations_url": "https://api.github.com/users/jsmith/orgs", + "repos_url": "https://api.github.com/users/jsmith/repos", + "events_url": "https://api.github.com/users/jsmith/events{/privacy}", + "received_events_url": "https://api.github.com/users/jsmith/received_events", + "type": "User", + "site_admin": false + } +} \ No newline at end of file diff --git a/buildtrigger/test/triggerjson/github_webhook_missing.json b/buildtrigger/test/triggerjson/github_webhook_missing.json new file mode 100644 index 000000000..e94996505 --- /dev/null +++ b/buildtrigger/test/triggerjson/github_webhook_missing.json @@ -0,0 +1,133 @@ +{ + "ref": "refs/heads/master", + "before": "9ea43cab474709d4a61afb7e3340de1ffc405b41", + "after": "410f4cdf8ff09b87f245b13845e8497f90b90a4c", + "created": false, + "deleted": false, + "forced": false, + "base_ref": null, + "compare": "https://github.com/jsmith/anothertest/compare/9ea43cab4747...410f4cdf8ff0", + "commits": [ + { + "id": "410f4cdf8ff09b87f245b13845e8497f90b90a4c", + "distinct": true, + "message": "Update Dockerfile", + "timestamp": "2015-09-11T14:26:16-04:00", + "url": "https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c", + "added": [], + "removed": [], + "modified": [ + "Dockerfile" + ] + } + ], + "head_commit": { + "id": "410f4cdf8ff09b87f245b13845e8497f90b90a4c", + "distinct": true, + "message": "Update Dockerfile", + "timestamp": "2015-09-11T14:26:16-04:00", + "url": "https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c", + "added": [], + "removed": [], + "modified": [ + "Dockerfile" + ] + }, + "repository": { + "id": 12345678, + "name": "anothertest", + "full_name": "jsmith/anothertest", + "owner": { + "name": "jsmith", + "email": "jsmith@users.noreply.github.com" + }, + "private": false, + "html_url": "https://github.com/jsmith/anothertest", + "description": "", + "fork": false, + "url": "https://github.com/jsmith/anothertest", + "forks_url": "https://api.github.com/repos/jsmith/anothertest/forks", + "keys_url": "https://api.github.com/repos/jsmith/anothertest/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/jsmith/anothertest/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/jsmith/anothertest/teams", + "hooks_url": "https://api.github.com/repos/jsmith/anothertest/hooks", + "issue_events_url": "https://api.github.com/repos/jsmith/anothertest/issues/events{/number}", + "events_url": "https://api.github.com/repos/jsmith/anothertest/events", + "assignees_url": "https://api.github.com/repos/jsmith/anothertest/assignees{/user}", + "branches_url": "https://api.github.com/repos/jsmith/anothertest/branches{/branch}", + "tags_url": "https://api.github.com/repos/jsmith/anothertest/tags", + "blobs_url": "https://api.github.com/repos/jsmith/anothertest/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/jsmith/anothertest/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/jsmith/anothertest/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/jsmith/anothertest/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/jsmith/anothertest/statuses/{sha}", + "languages_url": "https://api.github.com/repos/jsmith/anothertest/languages", + "stargazers_url": "https://api.github.com/repos/jsmith/anothertest/stargazers", + "contributors_url": "https://api.github.com/repos/jsmith/anothertest/contributors", + "subscribers_url": "https://api.github.com/repos/jsmith/anothertest/subscribers", + "subscription_url": "https://api.github.com/repos/jsmith/anothertest/subscription", + "commits_url": "https://api.github.com/repos/jsmith/anothertest/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/jsmith/anothertest/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/jsmith/anothertest/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/jsmith/anothertest/issues/comments{/number}", + "contents_url": "https://api.github.com/repos/jsmith/anothertest/contents/{+path}", + "compare_url": "https://api.github.com/repos/jsmith/anothertest/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/jsmith/anothertest/merges", + "archive_url": "https://api.github.com/repos/jsmith/anothertest/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/jsmith/anothertest/downloads", + "issues_url": "https://api.github.com/repos/jsmith/anothertest/issues{/number}", + "pulls_url": "https://api.github.com/repos/jsmith/anothertest/pulls{/number}", + "milestones_url": "https://api.github.com/repos/jsmith/anothertest/milestones{/number}", + "notifications_url": "https://api.github.com/repos/jsmith/anothertest/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/jsmith/anothertest/labels{/name}", + "releases_url": "https://api.github.com/repos/jsmith/anothertest/releases{/id}", + "created_at": 1430426945, + "updated_at": "2015-04-30T20:49:05Z", + "pushed_at": 1441995976, + "git_url": "git://github.com/jsmith/anothertest.git", + "ssh_url": "git@github.com:jsmith/anothertest.git", + "clone_url": "https://github.com/jsmith/anothertest.git", + "svn_url": "https://github.com/jsmith/anothertest", + "homepage": null, + "size": 144, + "stargazers_count": 0, + "watchers_count": 0, + "language": null, + "has_issues": true, + "has_downloads": true, + "has_wiki": true, + "has_pages": false, + "forks_count": 0, + "mirror_url": null, + "open_issues_count": 0, + "forks": 0, + "open_issues": 0, + "watchers": 0, + "default_branch": "master", + "stargazers": 0, + "master_branch": "master" + }, + "pusher": { + "name": "jsmith", + "email": "jsmith@users.noreply.github.com" + }, + "sender": { + "login": "jsmith", + "id": 1234567, + "avatar_url": "https://avatars.githubusercontent.com/u/4073002?v=3", + "gravatar_id": "", + "url": "https://api.github.com/users/jsmith", + "html_url": "https://github.com/jsmith", + "followers_url": "https://api.github.com/users/jsmith/followers", + "following_url": "https://api.github.com/users/jsmith/following{/other_user}", + "gists_url": "https://api.github.com/users/jsmith/gists{/gist_id}", + "starred_url": "https://api.github.com/users/jsmith/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/jsmith/subscriptions", + "organizations_url": "https://api.github.com/users/jsmith/orgs", + "repos_url": "https://api.github.com/users/jsmith/repos", + "events_url": "https://api.github.com/users/jsmith/events{/privacy}", + "received_events_url": "https://api.github.com/users/jsmith/received_events", + "type": "User", + "site_admin": false + } +} \ No newline at end of file diff --git a/buildtrigger/test/triggerjson/github_webhook_noname.json b/buildtrigger/test/triggerjson/github_webhook_noname.json new file mode 100644 index 000000000..0e02edc9e --- /dev/null +++ b/buildtrigger/test/triggerjson/github_webhook_noname.json @@ -0,0 +1,149 @@ +{ + "ref": "refs/heads/master", + "before": "9716b516939221dc754a056e0f9ddf599e71d4b8", + "after": "118b07121695d9f2e40a5ff264fdcc2917680870", + "created": false, + "deleted": false, + "forced": false, + "base_ref": null, + "compare": "https://github.com/jsmith/docker-test/compare/9716b5169392...118b07121695", + "commits": [ + { + "id": "118b07121695d9f2e40a5ff264fdcc2917680870", + "distinct": true, + "message": "Fail", + "timestamp": "2015-09-25T14:55:11-04:00", + "url": "https://github.com/jsmith/docker-test/commit/118b07121695d9f2e40a5ff264fdcc2917680870", + "author": { + "name": "John Smith", + "email": "j@smith.com" + }, + "committer": { + "name": "John Smith", + "email": "j@smith.com" + }, + "added": [], + "removed": [], + "modified": [ + "README.md" + ] + } + ], + "head_commit": { + "id": "118b07121695d9f2e40a5ff264fdcc2917680870", + "distinct": true, + "message": "Fail", + "timestamp": "2015-09-25T14:55:11-04:00", + "url": "https://github.com/jsmith/docker-test/commit/118b07121695d9f2e40a5ff264fdcc2917680870", + "author": { + "name": "John Smith", + "email": "j@smith.com" + }, + "committer": { + "name": "John Smith", + "email": "j@smith.com" + }, + "added": [], + "removed": [], + "modified": [ + "README.md" + ] + }, + "repository": { + "id": 1234567, + "name": "docker-test", + "full_name": "jsmith/docker-test", + "owner": { + "name": "jsmith", + "email": "j@smith.com" + }, + "private": false, + "html_url": "https://github.com/jsmith/docker-test", + "description": "", + "fork": false, + "url": "https://github.com/jsmith/docker-test", + "forks_url": "https://api.github.com/repos/jsmith/docker-test/forks", + "keys_url": "https://api.github.com/repos/jsmith/docker-test/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/jsmith/docker-test/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/jsmith/docker-test/teams", + "hooks_url": "https://api.github.com/repos/jsmith/docker-test/hooks", + "issue_events_url": "https://api.github.com/repos/jsmith/docker-test/issues/events{/number}", + "events_url": "https://api.github.com/repos/jsmith/docker-test/events", + "assignees_url": "https://api.github.com/repos/jsmith/docker-test/assignees{/user}", + "branches_url": "https://api.github.com/repos/jsmith/docker-test/branches{/branch}", + "tags_url": "https://api.github.com/repos/jsmith/docker-test/tags", + "blobs_url": "https://api.github.com/repos/jsmith/docker-test/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/jsmith/docker-test/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/jsmith/docker-test/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/jsmith/docker-test/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/jsmith/docker-test/statuses/{sha}", + "languages_url": "https://api.github.com/repos/jsmith/docker-test/languages", + "stargazers_url": "https://api.github.com/repos/jsmith/docker-test/stargazers", + "contributors_url": "https://api.github.com/repos/jsmith/docker-test/contributors", + "subscribers_url": "https://api.github.com/repos/jsmith/docker-test/subscribers", + "subscription_url": "https://api.github.com/repos/jsmith/docker-test/subscription", + "commits_url": "https://api.github.com/repos/jsmith/docker-test/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/jsmith/docker-test/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/jsmith/docker-test/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/jsmith/docker-test/issues/comments{/number}", + "contents_url": "https://api.github.com/repos/jsmith/docker-test/contents/{+path}", + "compare_url": "https://api.github.com/repos/jsmith/docker-test/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/jsmith/docker-test/merges", + "archive_url": "https://api.github.com/repos/jsmith/docker-test/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/jsmith/docker-test/downloads", + "issues_url": "https://api.github.com/repos/jsmith/docker-test/issues{/number}", + "pulls_url": "https://api.github.com/repos/jsmith/docker-test/pulls{/number}", + "milestones_url": "https://api.github.com/repos/jsmith/docker-test/milestones{/number}", + "notifications_url": "https://api.github.com/repos/jsmith/docker-test/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/jsmith/docker-test/labels{/name}", + "releases_url": "https://api.github.com/repos/jsmith/docker-test/releases{/id}", + "created_at": 1442254053, + "updated_at": "2015-09-14T18:07:33Z", + "pushed_at": 1443207315, + "git_url": "git://github.com/jsmith/docker-test.git", + "ssh_url": "git@github.com:jsmith/docker-test.git", + "clone_url": "https://github.com/jsmith/docker-test.git", + "svn_url": "https://github.com/jsmith/docker-test", + "homepage": null, + "size": 108, + "stargazers_count": 0, + "watchers_count": 0, + "language": null, + "has_issues": true, + "has_downloads": true, + "has_wiki": true, + "has_pages": false, + "forks_count": 0, + "mirror_url": null, + "open_issues_count": 0, + "forks": 0, + "open_issues": 0, + "watchers": 0, + "default_branch": "master", + "stargazers": 0, + "master_branch": "master" + }, + "pusher": { + "name": "jsmith", + "email": "j@smith.com" + }, + "sender": { + "login": "jsmith", + "id": 1234567, + "avatar_url": "https://avatars.githubusercontent.com/u/1234567?v=3", + "gravatar_id": "", + "url": "https://api.github.com/users/jsmith", + "html_url": "https://github.com/jsmith", + "followers_url": "https://api.github.com/users/jsmith/followers", + "following_url": "https://api.github.com/users/jsmith/following{/other_user}", + "gists_url": "https://api.github.com/users/jsmith/gists{/gist_id}", + "starred_url": "https://api.github.com/users/jsmith/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/jsmith/subscriptions", + "organizations_url": "https://api.github.com/users/jsmith/orgs", + "repos_url": "https://api.github.com/users/jsmith/repos", + "events_url": "https://api.github.com/users/jsmith/events{/privacy}", + "received_events_url": "https://api.github.com/users/jsmith/received_events", + "type": "User", + "site_admin": false + } +} \ No newline at end of file diff --git a/buildtrigger/test/triggerjson/github_webhook_slash_branch.json b/buildtrigger/test/triggerjson/github_webhook_slash_branch.json new file mode 100644 index 000000000..7fa6df4c3 --- /dev/null +++ b/buildtrigger/test/triggerjson/github_webhook_slash_branch.json @@ -0,0 +1,153 @@ +{ + "ref": "refs/heads/slash/branch", + "before": "9ea43cab474709d4a61afb7e3340de1ffc405b41", + "after": "410f4cdf8ff09b87f245b13845e8497f90b90a4c", + "created": false, + "deleted": false, + "forced": false, + "base_ref": null, + "compare": "https://github.com/jsmith/anothertest/compare/9ea43cab4747...410f4cdf8ff0", + "commits": [ + { + "id": "410f4cdf8ff09b87f245b13845e8497f90b90a4c", + "distinct": true, + "message": "Update Dockerfile", + "timestamp": "2015-09-11T14:26:16-04:00", + "url": "https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c", + "author": { + "name": "jsmith", + "email": "jsmith@users.noreply.github.com", + "username": "jsmith" + }, + "committer": { + "name": "jsmith", + "email": "jsmith@users.noreply.github.com", + "username": "jsmith" + }, + "added": [], + "removed": [], + "modified": [ + "Dockerfile" + ] + } + ], + "head_commit": { + "id": "410f4cdf8ff09b87f245b13845e8497f90b90a4c", + "distinct": true, + "message": "Update Dockerfile", + "timestamp": "2015-09-11T14:26:16-04:00", + "url": "https://github.com/jsmith/anothertest/commit/410f4cdf8ff09b87f245b13845e8497f90b90a4c", + "author": { + "name": "jsmith", + "email": "jsmith@users.noreply.github.com", + "username": "jsmith" + }, + "committer": { + "name": "jsmith", + "email": "jsmith@users.noreply.github.com", + "username": "jsmith" + }, + "added": [], + "removed": [], + "modified": [ + "Dockerfile" + ] + }, + "repository": { + "id": 1234567, + "name": "anothertest", + "full_name": "jsmith/anothertest", + "owner": { + "name": "jsmith", + "email": "jsmith@users.noreply.github.com" + }, + "private": false, + "html_url": "https://github.com/jsmith/anothertest", + "description": "", + "fork": false, + "url": "https://github.com/jsmith/anothertest", + "forks_url": "https://api.github.com/repos/jsmith/anothertest/forks", + "keys_url": "https://api.github.com/repos/jsmith/anothertest/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/jsmith/anothertest/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/jsmith/anothertest/teams", + "hooks_url": "https://api.github.com/repos/jsmith/anothertest/hooks", + "issue_events_url": "https://api.github.com/repos/jsmith/anothertest/issues/events{/number}", + "events_url": "https://api.github.com/repos/jsmith/anothertest/events", + "assignees_url": "https://api.github.com/repos/jsmith/anothertest/assignees{/user}", + "branches_url": "https://api.github.com/repos/jsmith/anothertest/branches{/branch}", + "tags_url": "https://api.github.com/repos/jsmith/anothertest/tags", + "blobs_url": "https://api.github.com/repos/jsmith/anothertest/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/jsmith/anothertest/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/jsmith/anothertest/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/jsmith/anothertest/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/jsmith/anothertest/statuses/{sha}", + "languages_url": "https://api.github.com/repos/jsmith/anothertest/languages", + "stargazers_url": "https://api.github.com/repos/jsmith/anothertest/stargazers", + "contributors_url": "https://api.github.com/repos/jsmith/anothertest/contributors", + "subscribers_url": "https://api.github.com/repos/jsmith/anothertest/subscribers", + "subscription_url": "https://api.github.com/repos/jsmith/anothertest/subscription", + "commits_url": "https://api.github.com/repos/jsmith/anothertest/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/jsmith/anothertest/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/jsmith/anothertest/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/jsmith/anothertest/issues/comments{/number}", + "contents_url": "https://api.github.com/repos/jsmith/anothertest/contents/{+path}", + "compare_url": "https://api.github.com/repos/jsmith/anothertest/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/jsmith/anothertest/merges", + "archive_url": "https://api.github.com/repos/jsmith/anothertest/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/jsmith/anothertest/downloads", + "issues_url": "https://api.github.com/repos/jsmith/anothertest/issues{/number}", + "pulls_url": "https://api.github.com/repos/jsmith/anothertest/pulls{/number}", + "milestones_url": "https://api.github.com/repos/jsmith/anothertest/milestones{/number}", + "notifications_url": "https://api.github.com/repos/jsmith/anothertest/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/jsmith/anothertest/labels{/name}", + "releases_url": "https://api.github.com/repos/jsmith/anothertest/releases{/id}", + "created_at": 1430426945, + "updated_at": "2015-04-30T20:49:05Z", + "pushed_at": 1441995976, + "git_url": "git://github.com/jsmith/anothertest.git", + "ssh_url": "git@github.com:jsmith/anothertest.git", + "clone_url": "https://github.com/jsmith/anothertest.git", + "svn_url": "https://github.com/jsmith/anothertest", + "homepage": null, + "size": 144, + "stargazers_count": 0, + "watchers_count": 0, + "language": null, + "has_issues": true, + "has_downloads": true, + "has_wiki": true, + "has_pages": false, + "forks_count": 0, + "mirror_url": null, + "open_issues_count": 0, + "forks": 0, + "open_issues": 0, + "watchers": 0, + "default_branch": "master", + "stargazers": 0, + "master_branch": "master" + }, + "pusher": { + "name": "jsmith", + "email": "jsmith@users.noreply.github.com" + }, + "sender": { + "login": "jsmith", + "id": 1234567, + "avatar_url": "https://avatars.githubusercontent.com/u/1234567?v=3", + "gravatar_id": "", + "url": "https://api.github.com/users/jsmith", + "html_url": "https://github.com/jsmith", + "followers_url": "https://api.github.com/users/jsmith/followers", + "following_url": "https://api.github.com/users/jsmith/following{/other_user}", + "gists_url": "https://api.github.com/users/jsmith/gists{/gist_id}", + "starred_url": "https://api.github.com/users/jsmith/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/jsmith/subscriptions", + "organizations_url": "https://api.github.com/users/jsmith/orgs", + "repos_url": "https://api.github.com/users/jsmith/repos", + "events_url": "https://api.github.com/users/jsmith/events{/privacy}", + "received_events_url": "https://api.github.com/users/jsmith/received_events", + "type": "User", + "site_admin": false + } +} \ No newline at end of file diff --git a/buildtrigger/test/triggerjson/gitlab_webhook.json b/buildtrigger/test/triggerjson/gitlab_webhook.json new file mode 100644 index 000000000..b25c81524 --- /dev/null +++ b/buildtrigger/test/triggerjson/gitlab_webhook.json @@ -0,0 +1,54 @@ +{ + "object_kind": "push", + "before": "11fcaca195e8b17ca7e3dc47d9608d5b6b892f45", + "after": "fb88379ee45de28a0a4590fddcbd8eff8b36026e", + "ref": "refs/heads/master", + "checkout_sha": "fb88379ee45de28a0a4590fddcbd8eff8b36026e", + "message": null, + "user_id": 98765, + "user_name": "John Smith", + "user_email": "j@smith.com", + "project_id": 12344567, + "repository": { + "name": "somerepo", + "url": "git@gitlab.com:jsmith/somerepo.git", + "description": "", + "homepage": "https://gitlab.com/jsmith/somerepo", + "git_http_url": "https://gitlab.com/jsmith/somerepo.git", + "git_ssh_url": "git@gitlab.com:jsmith/somerepo.git", + "visibility_level": 20 + }, + "commits": [ + { + "id": "fb88379ee45de28a0a4590fddcbd8eff8b36026e", + "message": "Fix link\n", + "timestamp": "2015-08-13T19:33:18+00:00", + "url": "https://gitlab.com/jsmith/somerepo/commit/fb88379ee45de28a0a4590fddcbd8eff8b36026e", + "author": { + "name": "Jane Smith", + "email": "jane@smith.com" + } + }, + { + "id": "4ca166bc0b511f21fa331873f260f1a7cb38d723", + "message": "Do Some Cool Thing", + "timestamp": "2015-08-13T15:52:15+00:00", + "url": "https://gitlab.com/jsmith/somerepo/commit/4ca166bc0b511f21fa331873f260f1a7cb38d723", + "author": { + "name": "Jane Smith", + "email": "jane@smith.com" + } + }, + { + "id": "11fcaca195e8b17ca7e3dc47d9608d5b6b892f45", + "message": "Merge another cool thing", + "timestamp": "2015-08-13T09:31:47+00:00", + "url": "https://gitlab.com/jsmith/somerepo/commit/11fcaca195e8b17ca7e3dc47d9608d5b6b892f45", + "author": { + "name": "Kate Smith", + "email": "kate@smith.com" + } + } + ], + "total_commits_count": 3 +} \ No newline at end of file diff --git a/buildtrigger/test/triggerjson/gitlab_webhook_known_issue.json b/buildtrigger/test/triggerjson/gitlab_webhook_known_issue.json new file mode 100644 index 000000000..09b54bb3a --- /dev/null +++ b/buildtrigger/test/triggerjson/gitlab_webhook_known_issue.json @@ -0,0 +1,61 @@ +{ + "ref": "refs/tags/fourthtag", + "user_id": 4797254, + "object_kind": "tag_push", + "repository": { + "git_ssh_url": "git@gitlab.com:someuser/some-test-project.git", + "name": "Some test project", + "url": "git@gitlab.com:someuser/some-test-project.git", + "git_http_url": "https://gitlab.com/someuser/some-test-project.git", + "visibility_level": 0, + "homepage": "https://gitlab.com/someuser/some-test-project", + "description": "Some test project" + }, + "event_name": "tag_push", + "commits": [ + { + "added": [], + "author": { + "name": "Some User", + "email": "someuser@somedomain.com" + }, + "url": "https://gitlab.com/someuser/some-test-project/commit/770830e7ca132856991e6db4f7fc0f4dbe20bd5f", + "timestamp": "2019-10-17T18:07:48Z", + "message": "Update Dockerfile", + "removed": [], + "modified": [ + "Dockerfile" + ], + "id": "770830e7ca132856991e6db4f7fc0f4dbe20bd5f" + } + ], + "after": "770830e7ca132856991e6db4f7fc0f4dbe20bd5f", + "project": { + "git_ssh_url": "git@gitlab.com:someuser/some-test-project.git", + "ci_config_path": null, + "web_url": "https://gitlab.com/someuser/some-test-project", + "description": "Some test project", + "url": "git@gitlab.com:someuser/some-test-project.git", + "namespace": "Some User", + "default_branch": "master", + "homepage": "https://gitlab.com/someuser/some-test-project", + "git_http_url": "https://gitlab.com/someuser/some-test-project.git", + "avatar_url": null, + "ssh_url": "git@gitlab.com:someuser/some-test-project.git", + "http_url": "https://gitlab.com/someuser/some-test-project.git", + "path_with_namespace": "someuser/some-test-project", + "visibility_level": 0, + "id": 14838571, + "name": "Some test project" + }, + "user_username": "someuser", + "checkout_sha": "770830e7ca132856991e6db4f7fc0f4dbe20bd5f", + "total_commits_count": 1, + "before": "0000000000000000000000000000000000000000", + "user_avatar": "https://secure.gravatar.com/avatar/0ea05bdf5c3f2cb8aac782a4a2ac3177?s=80&d=identicon", + "message": "", + "project_id": 14838571, + "user_name": "Some User", + "user_email": "", + "push_options": {} + } \ No newline at end of file diff --git a/buildtrigger/test/triggerjson/gitlab_webhook_multicommit.json b/buildtrigger/test/triggerjson/gitlab_webhook_multicommit.json new file mode 100644 index 000000000..338e93b01 --- /dev/null +++ b/buildtrigger/test/triggerjson/gitlab_webhook_multicommit.json @@ -0,0 +1,100 @@ +{ + "object_kind": "push", + "event_name": "push", + "before": "0da5b5ebb397f0a8569c97f28e266c718607e8da", + "after": "9a052a0b2fbe01d4a1a88638dd9fe31c1c56ef53", + "ref": "refs\/heads\/master", + "checkout_sha": "9a052a0b2fbe01d4a1a88638dd9fe31c1c56ef53", + "message": null, + "user_id": 750047, + "user_name": "John Smith", + "user_email": "j@smith.com", + "user_avatar": "https:\/\/secure.gravatar.com\/avatar\/32784623495678234678234?s=80&d=identicon", + "project_id": 1756744, + "project": { + "name": "some-test-project", + "description": "", + "web_url": "https:\/\/gitlab.com\/jsmith\/some-test-project", + "avatar_url": null, + "git_ssh_url": "git@gitlab.com:jsmith\/some-test-project.git", + "git_http_url": "https:\/\/gitlab.com\/jsmith\/some-test-project.git", + "namespace": "jsmith", + "visibility_level": 0, + "path_with_namespace": "jsmith\/some-test-project", + "default_branch": "master", + "homepage": "https:\/\/gitlab.com\/jsmith\/some-test-project", + "url": "git@gitlab.com:jsmith\/some-test-project.git", + "ssh_url": "git@gitlab.com:jsmith\/some-test-project.git", + "http_url": "https:\/\/gitlab.com\/jsmith\/some-test-project.git" + }, + "commits": [ + { + "id": "f00a0a6a71118721ac1f586bf79650170042609f", + "message": "Add changelog", + "timestamp": "2016-09-29T14:59:23+00:00", + "url": "https:\/\/gitlab.com\/jsmith\/some-test-project\/commit\/f00a0a6a71118721ac1f586bf79650170042609f", + "author": { + "name": "John Smith", + "email": "j@smith.com" + }, + "added": [ + "CHANGELOG" + ], + "modified": [ + + ], + "removed": [ + + ] + }, + { + "id": "cc66287314cb154c986665a6c29377ef42edee60", + "message": "Add new file", + "timestamp": "2016-09-29T15:02:01+00:00", + "url": "https:\/\/gitlab.com\/jsmith\/some-test-project\/commit\/cc66287314cb154c986665a6c29377ef42edee60", + "author": { + "name": "John Smith", + "email": "j@smith.com" + }, + "added": [ + "YetAnotherFIle" + ], + "modified": [ + + ], + "removed": [ + + ] + }, + { + "id": "9a052a0b2fbe01d4a1a88638dd9fe31c1c56ef53", + "message": "Merge branch 'foobar' into 'master'\r\n\r\nAdd changelog\r\n\r\nSome merge thing\r\n\r\nSee merge request !1", + "timestamp": "2016-09-29T15:02:41+00:00", + "url": "https:\/\/gitlab.com\/jsmith\/some-test-project\/commit\/9a052a0b2fbe01d4a1a88638dd9fe31c1c56ef53", + "author": { + "name": "John Smith", + "email": "j@smith.com" + }, + "added": [ + "CHANGELOG", + "YetAnotherFIle" + ], + "modified": [ + + ], + "removed": [ + + ] + } + ], + "total_commits_count": 3, + "repository": { + "name": "some-test-project", + "url": "git@gitlab.com:jsmith\/some-test-project.git", + "description": "", + "homepage": "https:\/\/gitlab.com\/jsmith\/some-test-project", + "git_http_url": "https:\/\/gitlab.com\/jsmith\/some-test-project.git", + "git_ssh_url": "git@gitlab.com:jsmith\/some-test-project.git", + "visibility_level": 0 + } +} \ No newline at end of file diff --git a/buildtrigger/test/triggerjson/gitlab_webhook_nocommit.json b/buildtrigger/test/triggerjson/gitlab_webhook_nocommit.json new file mode 100644 index 000000000..4241debde --- /dev/null +++ b/buildtrigger/test/triggerjson/gitlab_webhook_nocommit.json @@ -0,0 +1,44 @@ +{ + "object_kind": "push", + "event_name": "push", + "before": "cc66287314cb154c986665a6c29377ef42edee60", + "after": "0000000000000000000000000000000000000000", + "ref": "refs\/heads\/foobar", + "checkout_sha": null, + "message": null, + "user_id": 750047, + "user_name": "John Smith", + "user_email": "j@smith.com", + "user_avatar": "https:\/\/secure.gravatar.com\/avatar\/2348972348972348973?s=80&d=identicon", + "project_id": 1756744, + "project": { + "name": "some-test-project", + "description": "", + "web_url": "https:\/\/gitlab.com\/jsmith\/some-test-project", + "avatar_url": null, + "git_ssh_url": "git@gitlab.com:jsmith\/some-test-project.git", + "git_http_url": "https:\/\/gitlab.com\/jsmith\/some-test-project.git", + "namespace": "jsmith", + "visibility_level": 0, + "path_with_namespace": "jsmith\/some-test-project", + "default_branch": "master", + "homepage": "https:\/\/gitlab.com\/jsmith\/some-test-project", + "url": "git@gitlab.com:jsmith\/some-test-project.git", + "ssh_url": "git@gitlab.com:jsmith\/some-test-project.git", + "http_url": "https:\/\/gitlab.com\/jsmith\/some-test-project.git" + }, + "commits": [ + + ], + "total_commits_count": 0, + "repository": { + "name": "some-test-project", + "url": "git@gitlab.com:jsmith\/some-test-project.git", + "description": "", + "homepage": "https:\/\/gitlab.com\/jsmith\/some-test-project", + "git_http_url": "https:\/\/gitlab.com\/jsmith\/some-test-project.git", + "git_ssh_url": "git@gitlab.com:jsmith\/some-test-project.git", + "visibility_level": 0 + } +} + diff --git a/buildtrigger/test/triggerjson/gitlab_webhook_other.json b/buildtrigger/test/triggerjson/gitlab_webhook_other.json new file mode 100644 index 000000000..0704a7b6b --- /dev/null +++ b/buildtrigger/test/triggerjson/gitlab_webhook_other.json @@ -0,0 +1,14 @@ +{ + "object_kind": "someother", + "ref": "refs/tags/v1.0.0", + "checkout_sha": "82b3d5ae55f7080f1e6022629cdb57bfae7cccc7", + "repository":{ + "name": "Example", + "url": "ssh://git@example.com/jsmith/example.git", + "description": "", + "homepage": "http://example.com/jsmith/example", + "git_http_url":"http://example.com/jsmith/example.git", + "git_ssh_url":"git@example.com:jsmith/example.git", + "visibility_level":0 + } +} \ No newline at end of file diff --git a/buildtrigger/test/triggerjson/gitlab_webhook_tag.json b/buildtrigger/test/triggerjson/gitlab_webhook_tag.json new file mode 100644 index 000000000..86dff4ce5 --- /dev/null +++ b/buildtrigger/test/triggerjson/gitlab_webhook_tag.json @@ -0,0 +1,38 @@ +{ + "object_kind": "tag_push", + "before": "0000000000000000000000000000000000000000", + "after": "82b3d5ae55f7080f1e6022629cdb57bfae7cccc7", + "ref": "refs/tags/v1.0.0", + "checkout_sha": "82b3d5ae55f7080f1e6022629cdb57bfae7cccc7", + "user_id": 1, + "user_name": "John Smith", + "user_avatar": "https://s.gravatar.com/avatar/d4c74594d841139328695756648b6bd6?s=8://s.gravatar.com/avatar/d4c74594d841139328695756648b6bd6?s=80", + "project_id": 1, + "project":{ + "name":"Example", + "description":"", + "web_url":"http://example.com/jsmith/example", + "avatar_url":null, + "git_ssh_url":"git@example.com:jsmith/example.git", + "git_http_url":"http://example.com/jsmith/example.git", + "namespace":"Jsmith", + "visibility_level":0, + "path_with_namespace":"jsmith/example", + "default_branch":"master", + "homepage":"http://example.com/jsmith/example", + "url":"git@example.com:jsmith/example.git", + "ssh_url":"git@example.com:jsmith/example.git", + "http_url":"http://example.com/jsmith/example.git" + }, + "repository":{ + "name": "Example", + "url": "ssh://git@example.com/jsmith/example.git", + "description": "", + "homepage": "http://example.com/jsmith/example", + "git_http_url":"http://example.com/jsmith/example.git", + "git_ssh_url":"git@example.com:jsmith/example.git", + "visibility_level":0 + }, + "commits": [], + "total_commits_count": 0 +} \ No newline at end of file diff --git a/buildtrigger/test/triggerjson/gitlab_webhook_tag_commit_issue.json b/buildtrigger/test/triggerjson/gitlab_webhook_tag_commit_issue.json new file mode 100644 index 000000000..6bcdbb8b2 --- /dev/null +++ b/buildtrigger/test/triggerjson/gitlab_webhook_tag_commit_issue.json @@ -0,0 +1,61 @@ +{ + "after": "770830e7ca132856991e6db4f7fc0f4dbe20bd5f", + "before": "0000000000000000000000000000000000000000", + "checkout_sha": "770830e7ca132856991e6db4f7fc0f4dbe20bd5f", + "commits": [ + { + "added": [], + "author": { + "name": "Some User", + "email": "some.user@someplace.com" + }, + "id": "770830e7ca132856991e6db4f7fc0f4dbe20bd5f", + "message": "Update Dockerfile", + "modified": [ + "Dockerfile" + ], + "removed": [], + "timestamp": "2019-10-17T18:07:48Z", + "url": "https://gitlab.com/someuser/some-test-project/commit/770830e7ca132856991e6db4f7fc0f4dbe20bd5f" + } + ], + "event_name": "tag_push", + "message": "", + "object_kind": "tag_push", + "project": { + "avatar_url": null, + "ci_config_path": null, + "default_branch": "master", + "description": "Some test project", + "git_http_url": "https://gitlab.com/someuser/some-test-project.git", + "git_ssh_url": "git@gitlab.com:someuser/some-test-project.git", + "homepage": "https://gitlab.com/someuser/some-test-project", + "http_url": "https://gitlab.com/someuser/some-test-project.git", + "id": 14838571, + "name": "Some test project", + "namespace": "Joey Schorr", + "path_with_namespace": "someuser/some-test-project", + "ssh_url": "git@gitlab.com:someuser/some-test-project.git", + "url": "git@gitlab.com:someuser/some-test-project.git", + "visibility_level": 0, + "web_url": "https://gitlab.com/someuser/some-test-project" + }, + "project_id": 14838571, + "push_options": {}, + "ref": "refs/tags/thirdtag", + "repository": { + "description": "Some test project", + "git_http_url": "https://gitlab.com/someuser/some-test-project.git", + "git_ssh_url": "git@gitlab.com:someuser/some-test-project.git", + "homepage": "https://gitlab.com/someuser/some-test-project", + "name": "Some test project", + "url": "git@gitlab.com:someuser/some-test-project.git", + "visibility_level": 0 + }, + "total_commits_count": 1, + "user_avatar": "https://secure.gravatar.com/avatar/someavatar?s=80&d=identicon", + "user_email": "", + "user_id": 4797254, + "user_name": "Some User", + "user_username": "someuser" + } \ No newline at end of file diff --git a/buildtrigger/test/triggerjson/gitlab_webhook_tag_commit_sha_null.json b/buildtrigger/test/triggerjson/gitlab_webhook_tag_commit_sha_null.json new file mode 100644 index 000000000..a9b0a929f --- /dev/null +++ b/buildtrigger/test/triggerjson/gitlab_webhook_tag_commit_sha_null.json @@ -0,0 +1,38 @@ +{ + "object_kind": "tag_push", + "before": "0000000000000000000000000000000000000000", + "after": "82b3d5ae55f7080f1e6022629cdb57bfae7cccc7", + "ref": "refs/tags/v1.0.0", + "checkout_sha": null, + "user_id": 1, + "user_name": "John Smith", + "user_avatar": "https://s.gravatar.com/avatar/d4c74594d841139328695756648b6bd6?s=8://s.gravatar.com/avatar/d4c74594d841139328695756648b6bd6?s=80", + "project_id": 1, + "project":{ + "name":"Example", + "description":"", + "web_url":"http://example.com/jsmith/example", + "avatar_url":null, + "git_ssh_url":"git@example.com:jsmith/example.git", + "git_http_url":"http://example.com/jsmith/example.git", + "namespace":"Jsmith", + "visibility_level":0, + "path_with_namespace":"jsmith/example", + "default_branch":"master", + "homepage":"http://example.com/jsmith/example", + "url":"git@example.com:jsmith/example.git", + "ssh_url":"git@example.com:jsmith/example.git", + "http_url":"http://example.com/jsmith/example.git" + }, + "repository":{ + "name": "Example", + "url": "ssh://git@example.com/jsmith/example.git", + "description": "", + "homepage": "http://example.com/jsmith/example", + "git_http_url":"http://example.com/jsmith/example.git", + "git_ssh_url":"git@example.com:jsmith/example.git", + "visibility_level":0 + }, + "commits": [], + "total_commits_count": 0 + } \ No newline at end of file diff --git a/buildtrigger/triggerutil.py b/buildtrigger/triggerutil.py index b60f38620..5c459e53e 100644 --- a/buildtrigger/triggerutil.py +++ b/buildtrigger/triggerutil.py @@ -3,37 +3,43 @@ import io import logging import re -class InvalidPayloadException(Exception): +class TriggerException(Exception): pass -class BuildArchiveException(Exception): +class TriggerAuthException(TriggerException): pass -class InvalidServiceException(Exception): +class InvalidPayloadException(TriggerException): pass -class TriggerActivationException(Exception): +class BuildArchiveException(TriggerException): pass -class TriggerDeactivationException(Exception): +class InvalidServiceException(TriggerException): pass -class TriggerStartException(Exception): +class TriggerActivationException(TriggerException): pass -class ValidationRequestException(Exception): +class TriggerDeactivationException(TriggerException): pass -class SkipRequestException(Exception): +class TriggerStartException(TriggerException): pass -class EmptyRepositoryException(Exception): +class ValidationRequestException(TriggerException): pass -class RepositoryReadException(Exception): +class SkipRequestException(TriggerException): pass -class TriggerProviderException(Exception): +class EmptyRepositoryException(TriggerException): + pass + +class RepositoryReadException(TriggerException): + pass + +class TriggerProviderException(TriggerException): pass logger = logging.getLogger(__name__) @@ -52,7 +58,7 @@ def determine_build_ref(run_parameters, get_branch_sha, get_tag_sha, default_bra value = run_parameters['branch_name'] kind = kind or 'branch' - value = value or default_branch + value = value or default_branch or 'master' ref = 'refs/tags/' + value if kind == 'tag' else 'refs/heads/' + value commit_sha = get_tag_sha(value) if kind == 'tag' else get_branch_sha(value) diff --git a/cirun.config.yaml b/cirun.config.yaml new file mode 100644 index 000000000..b92a74ee0 --- /dev/null +++ b/cirun.config.yaml @@ -0,0 +1,3 @@ +SETUP_COMPLETE: true +V3_UPGRADE_MODE: complete +DATABASE_SECRET_KEY: anothercrazykey! diff --git a/conf/gunicorn_local.py b/conf/gunicorn_local.py index f95d85cc1..b33558ef2 100644 --- a/conf/gunicorn_local.py +++ b/conf/gunicorn_local.py @@ -1,10 +1,19 @@ -from Crypto import Random +import sys +import os +sys.path.append(os.path.join(os.path.dirname(__file__), "../")) +import logging + +from Crypto import Random +from util.log import logfile_path +from util.workers import get_worker_count + + +logconfig = logfile_path(debug=True) bind = '0.0.0.0:5000' -workers = 2 +workers = get_worker_count('local', 2, minimum=2, maximum=8) worker_class = 'gevent' daemon = False -logconfig = 'conf/logging_debug.conf' pythonpath = '.' preload_app = True @@ -12,3 +21,7 @@ def post_fork(server, worker): # Reset the Random library to ensure it won't raise the "PID check failed." error after # gunicorn forks. Random.atfork() + +def when_ready(server): + logger = logging.getLogger(__name__) + logger.debug('Starting local gunicorn with %s workers and %s worker class', workers, worker_class) diff --git a/conf/gunicorn_registry.py b/conf/gunicorn_registry.py index 9d7f080c1..23590ba45 100644 --- a/conf/gunicorn_registry.py +++ b/conf/gunicorn_registry.py @@ -1,13 +1,28 @@ -from Crypto import Random +import sys +import os +sys.path.append(os.path.join(os.path.dirname(__file__), "../")) +import logging + +from Crypto import Random +from util.log import logfile_path +from util.workers import get_worker_count + + +logconfig = logfile_path(debug=False) bind = 'unix:/tmp/gunicorn_registry.sock' -workers = 8 +workers = get_worker_count('registry', 4, minimum=8, maximum=64) worker_class = 'gevent' -logconfig = 'conf/logging.conf' pythonpath = '.' preload_app = True + def post_fork(server, worker): # Reset the Random library to ensure it won't raise the "PID check failed." error after # gunicorn forks. Random.atfork() + +def when_ready(server): + logger = logging.getLogger(__name__) + logger.debug('Starting registry gunicorn with %s workers and %s worker class', workers, + worker_class) diff --git a/conf/gunicorn_secscan.py b/conf/gunicorn_secscan.py index 4b16b4399..daea39c38 100644 --- a/conf/gunicorn_secscan.py +++ b/conf/gunicorn_secscan.py @@ -1,13 +1,28 @@ -from Crypto import Random +import sys +import os +sys.path.append(os.path.join(os.path.dirname(__file__), "../")) +import logging + +from Crypto import Random +from util.log import logfile_path +from util.workers import get_worker_count + + +logconfig = logfile_path(debug=False) bind = 'unix:/tmp/gunicorn_secscan.sock' -workers = 2 +workers = get_worker_count('secscan', 2, minimum=2, maximum=4) worker_class = 'gevent' -logconfig = 'conf/logging.conf' pythonpath = '.' preload_app = True + def post_fork(server, worker): # Reset the Random library to ensure it won't raise the "PID check failed." error after # gunicorn forks. Random.atfork() + +def when_ready(server): + logger = logging.getLogger(__name__) + logger.debug('Starting secscan gunicorn with %s workers and %s worker class', workers, + worker_class) diff --git a/conf/gunicorn_verbs.py b/conf/gunicorn_verbs.py index ad432ee2a..9502f7563 100644 --- a/conf/gunicorn_verbs.py +++ b/conf/gunicorn_verbs.py @@ -1,13 +1,27 @@ +import sys +import os +sys.path.append(os.path.join(os.path.dirname(__file__), "../")) + +import logging + from Crypto import Random +from util.log import logfile_path +from util.workers import get_worker_count + +logconfig = logfile_path(debug=False) bind = 'unix:/tmp/gunicorn_verbs.sock' -workers = 4 -logconfig = 'conf/logging.conf' +workers = get_worker_count('verbs', 2, minimum=2, maximum=32) pythonpath = '.' preload_app = True timeout = 2000 # Because sync workers + def post_fork(server, worker): # Reset the Random library to ensure it won't raise the "PID check failed." error after # gunicorn forks. Random.atfork() + +def when_ready(server): + logger = logging.getLogger(__name__) + logger.debug('Starting verbs gunicorn with %s workers and sync worker class', workers) diff --git a/conf/gunicorn_web.py b/conf/gunicorn_web.py index b6a5ddbcd..8bd1abaa0 100644 --- a/conf/gunicorn_web.py +++ b/conf/gunicorn_web.py @@ -1,9 +1,19 @@ +import sys +import os +sys.path.append(os.path.join(os.path.dirname(__file__), "../")) + +import logging + from Crypto import Random +from util.log import logfile_path +from util.workers import get_worker_count + + +logconfig = logfile_path(debug=False) bind = 'unix:/tmp/gunicorn_web.sock' -workers = 2 +workers = get_worker_count('web', 2, minimum=2, maximum=32) worker_class = 'gevent' -logconfig = 'conf/logging.conf' pythonpath = '.' preload_app = True @@ -11,3 +21,8 @@ def post_fork(server, worker): # Reset the Random library to ensure it won't raise the "PID check failed." error after # gunicorn forks. Random.atfork() + +def when_ready(server): + logger = logging.getLogger(__name__) + logger.debug('Starting web gunicorn with %s workers and %s worker class', workers, + worker_class) diff --git a/conf/init/02_get_kube_certs.py b/conf/init/02_get_kube_certs.py new file mode 100644 index 000000000..3f88a09ac --- /dev/null +++ b/conf/init/02_get_kube_certs.py @@ -0,0 +1,71 @@ +import json +import os +import base64 + +from requests import Request, Session + +QUAYPATH = os.environ.get('QUAYPATH', '.') +KUBE_EXTRA_CA_CERTDIR = os.environ.get('KUBE_EXTRA_CA_CERTDIR', '%s/conf/kube_extra_certs' % QUAYPATH) + +KUBERNETES_API_HOST = os.environ.get('KUBERNETES_SERVICE_HOST', '') +port = os.environ.get('KUBERNETES_SERVICE_PORT') +if port: + KUBERNETES_API_HOST += ':' + port + +SERVICE_ACCOUNT_TOKEN_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/token' + +QE_NAMESPACE = os.environ.get('QE_K8S_NAMESPACE', 'quay-enterprise') +QE_CONFIG_SECRET = os.environ.get('QE_K8S_CONFIG_SECRET', 'quay-enterprise-config-secret') +EXTRA_CA_DIRECTORY_PREFIX = 'extra_ca_certs_' + + +def _lookup_secret(service_token): + secret_url = 'namespaces/%s/secrets/%s' % (QE_NAMESPACE, QE_CONFIG_SECRET) + response = _execute_k8s_api(service_token, 'GET', secret_url) + if response.status_code != 200: + raise Exception('Cannot get the config secret') + return json.loads(response.text) + +def _execute_k8s_api(service_account_token, method, relative_url, data=None, api_prefix='api/v1', content_type='application/json'): + headers = { + 'Authorization': 'Bearer ' + service_account_token + } + + if data: + headers['Content-Type'] = content_type + + data = json.dumps(data) if data else None + session = Session() + url = 'https://%s/%s/%s' % (KUBERNETES_API_HOST, api_prefix, relative_url) + + request = Request(method, url, data=data, headers=headers) + return session.send(request.prepare(), verify=False, timeout=2) + +def is_extra_cert(key): + return key.find(EXTRA_CA_DIRECTORY_PREFIX) == 0 + +def main(): + # Load the service account token from the local store. + if not os.path.exists(SERVICE_ACCOUNT_TOKEN_PATH): + raise Exception('Cannot load Kubernetes service account token') + + with open(SERVICE_ACCOUNT_TOKEN_PATH, 'r') as f: + service_token = f.read() + + secret_data = _lookup_secret(service_token).get('data', {}) + cert_keys = filter(is_extra_cert, secret_data.keys()) + + for cert_key in cert_keys: + if not os.path.exists(KUBE_EXTRA_CA_CERTDIR): + os.mkdir(KUBE_EXTRA_CA_CERTDIR) + + cert_value = base64.b64decode(secret_data[cert_key]) + cert_filename = cert_key.replace(EXTRA_CA_DIRECTORY_PREFIX, '') + print "Found an extra cert %s in config-secret, copying to kube ca dir" + + with open(os.path.join(KUBE_EXTRA_CA_CERTDIR, cert_filename), 'w') as f: + f.write(cert_value) + + +if __name__ == '__main__': + main() diff --git a/conf/init/02_get_kube_certs.sh b/conf/init/02_get_kube_certs.sh new file mode 100755 index 000000000..f97ccda99 --- /dev/null +++ b/conf/init/02_get_kube_certs.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +QUAYDIR=${QUAYDIR:-"/"} +QUAYPATH=${QUAYPATH:-"."} +QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"} + +cd $QUAYDIR + +if [[ "$KUBERNETES_SERVICE_HOST" != "" ]];then + echo "Running on kubernetes, attempting to retrieve extra certs from secret" + python $QUAYCONF/init/02_get_kube_certs.py +fi diff --git a/conf/init/__init__.py b/conf/init/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/conf/init/certs_create.sh b/conf/init/certs_create.sh new file mode 100755 index 000000000..a38901543 --- /dev/null +++ b/conf/init/certs_create.sh @@ -0,0 +1,15 @@ +#! /bin/bash +set -e +QUAYPATH=${QUAYPATH:-"."} +QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"} +cd ${QUAYDIR:-"/"} +SYSTEM_CERTDIR=${SYSTEM_CERTDIR:-"/etc/pki/ca-trust/source/anchors"} +# Create certs for jwtproxy to mitm outgoing TLS connections +# echo '{"CN":"CA","key":{"algo":"rsa","size":2048}}' | cfssl gencert -initca - | cfssljson -bare mitm +mkdir -p /certificates; cd /certificates +openssl req -new -newkey rsa:4096 -days 3650 -nodes -x509 \ + -subj "/C=US/ST=NY/L=NYC/O=Dis/CN=self-signed" \ + -keyout mitm-key.pem -out mitm.pem +cp /certificates/mitm-key.pem $QUAYCONF/mitm.key +cp /certificates/mitm.pem $QUAYCONF/mitm.cert +cp /certificates/mitm.pem $SYSTEM_CERTDIR/mitm.crt diff --git a/conf/init/certs_install.sh b/conf/init/certs_install.sh new file mode 100755 index 000000000..accd169e6 --- /dev/null +++ b/conf/init/certs_install.sh @@ -0,0 +1,52 @@ +#! /bin/bash +set -e +QUAYPATH=${QUAYPATH:-"."} +QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"} +QUAYCONFIG=${QUAYCONFIG:-"$QUAYCONF/stack"} +CERTDIR=${CERTDIR:-"$QUAYCONFIG/extra_ca_certs"} +SYSTEM_CERTDIR=${SYSTEM_CERTDIR:-"/etc/pki/ca-trust/source/anchors"} + +PYTHON_ROOT=${PYTHON_ROOT:-"/opt/rh/python27/root/usr/lib/python2.7"} + +# If we're running under kube, the previous script (02_get_kube_certs.sh) will put the certs in a different location +if [[ "$KUBERNETES_SERVICE_HOST" != "" ]];then + CERTDIR=${KUBE_EXTRA_CA_CERTDIR:-"$QUAYPATH/conf/kube_extra_certs"} +fi + +cd ${QUAYDIR:-"/quay-registry"} + +# Add the custom LDAP certificate +if [ -e $QUAYCONFIG/ldap.crt ] +then + cp $QUAYCONFIG/ldap.crt ${SYSTEM_CERTDIR}/ldap.crt +fi + +# Add extra trusted certificates (as a directory) +if [ -d $CERTDIR ]; then + if test "$(ls -A "$CERTDIR")"; then + echo "Installing extra certificates found in $CERTDIR directory" + cp $CERTDIR/* ${SYSTEM_CERTDIR} + cat $CERTDIR/* >> $PYTHON_ROOT/site-packages/requests/cacert.pem + cat $CERTDIR/* >> $PYTHON_ROOT/site-packages/certifi/cacert.pem + fi +fi + +# Add extra trusted certificates (as a file) +if [ -f $CERTDIR ]; then + echo "Installing extra certificates found in $CERTDIR file" + csplit -z -f ${SYSTEM_CERTDIR}/extra-ca- $CERTDIR '/-----BEGIN CERTIFICATE-----/' '{*}' + cat $CERTDIR >> $PYTHON_ROOT/site-packages/requests/cacert.pem + cat $CERTDIR >> $PYTHON_ROOT/site-packages/certifi/cacert.pem +fi + +# Add extra trusted certificates (prefixed) +for f in $(find $QUAYCONFIG/ -maxdepth 1 -type f -name "extra_ca*") +do + echo "Installing extra cert $f" + cp "$f" ${SYSTEM_CERTDIR} + cat "$f" >> $PYTHON_ROOT/site-packages/requests/cacert.pem + cat "$f" >> $PYTHON_ROOT/site-packages/certifi/cacert.pem +done + +# Update all CA certificates. +update-ca-trust extract diff --git a/conf/init/copy_config_files.sh b/conf/init/copy_config_files.sh index 8849a97cf..0aef306ef 100755 --- a/conf/init/copy_config_files.sh +++ b/conf/init/copy_config_files.sh @@ -1,11 +1,16 @@ #! /bin/sh +QUAYPATH=${QUAYPATH:-"."} +QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"} -if [ -e /conf/stack/robots.txt ] +cd ${QUAYDIR:-"/"} + + +if [ -e $QUAYCONF/stack/robots.txt ] then - cp /conf/stack/robots.txt /static/robots.txt + cp $QUAYCONF/stack/robots.txt $QUAYPATH/templates/robots.txt fi -if [ -e /conf/stack/favicon.ico ] +if [ -e $QUAYCONF/stack/favicon.ico ] then - cp /conf/stack/favicon.ico /static/favicon.ico + cp $QUAYCONF/stack/favicon.ico $QUAYPATH/static/favicon.ico fi \ No newline at end of file diff --git a/conf/init/copy_syslog_config.sh b/conf/init/copy_syslog_config.sh deleted file mode 100755 index 7acd62b6b..000000000 --- a/conf/init/copy_syslog_config.sh +++ /dev/null @@ -1,6 +0,0 @@ -#! /bin/sh - -if [ -e /conf/stack/syslog-ng-extra.conf ] -then - cp /conf/stack/syslog-ng-extra.conf /etc/syslog-ng/conf.d/ -fi diff --git a/conf/init/create_certs.sh b/conf/init/create_certs.sh deleted file mode 100755 index 37b528ee3..000000000 --- a/conf/init/create_certs.sh +++ /dev/null @@ -1,15 +0,0 @@ -#! /bin/bash -set -e - -# Create certs for jwtproxy to mitm outgoing TLS connections -echo '{"CN":"CA","key":{"algo":"rsa","size":2048}}' | cfssl gencert -initca - | cfssljson -bare mitm -cp mitm-key.pem /conf/mitm.key -cp mitm.pem /conf/mitm.cert -cp mitm.pem /usr/local/share/ca-certificates/mitm.crt - -# Add extra trusted certificates -if [ -d /conf/stack/extra_ca_certs ]; then - cp /conf/stack/extra_ca_certs/* /usr/local/share/ca-certificates/ -fi - -update-ca-certificates diff --git a/conf/init/doupdatelimits.sh b/conf/init/doupdatelimits.sh deleted file mode 100755 index bdeb6ddec..000000000 --- a/conf/init/doupdatelimits.sh +++ /dev/null @@ -1,5 +0,0 @@ -#! /bin/bash -set -e - -# Update the connection limit -sysctl -w net.core.somaxconn=1024 || true \ No newline at end of file diff --git a/conf/init/logrotate.conf b/conf/init/logrotate.conf new file mode 100644 index 000000000..bdd2033bc --- /dev/null +++ b/conf/init/logrotate.conf @@ -0,0 +1,42 @@ +# +# This file exists because of a bug in phusion/baseimage:0.9.19 where the su +# directive below is configured to use the nonexistant syslog user. +# + + +# see "man logrotate" for details +# rotate log files weekly +weekly + +# use the syslog group by default, since this is the owning group +# of /var/log/syslog. +su root root + +# keep 4 weeks worth of backlogs +rotate 4 + +# create new (empty) log files after rotating old ones +create + +# uncomment this if you want your log files compressed +#compress + +# packages drop log rotation information into this directory +include /etc/logrotate.d + +# no packages own wtmp, or btmp -- we'll rotate them here +/var/log/wtmp { + missingok + monthly + create 0664 root utmp + rotate 1 +} + +/var/log/btmp { + missingok + monthly + create 0660 root utmp + rotate 1 +} + +# system-specific logs may be configured here diff --git a/conf/init/nginx_conf_create.py b/conf/init/nginx_conf_create.py new file mode 100644 index 000000000..56a59a2d2 --- /dev/null +++ b/conf/init/nginx_conf_create.py @@ -0,0 +1,126 @@ +import os +import os.path + +import yaml +import jinja2 + +QUAYPATH = os.getenv("QUAYPATH", ".") +QUAYDIR = os.getenv("QUAYDIR", "/") +QUAYCONF_DIR = os.getenv("QUAYCONF", os.path.join(QUAYDIR, QUAYPATH, "conf")) +STATIC_DIR = os.path.join(QUAYDIR, 'static') + +SSL_PROTOCOL_DEFAULTS = ['TLSv1', 'TLSv1.1', 'TLSv1.2'] +SSL_CIPHER_DEFAULTS = [ + 'ECDHE-RSA-AES128-GCM-SHA256', + 'ECDHE-ECDSA-AES128-GCM-SHA256', + 'ECDHE-RSA-AES256-GCM-SHA384', + 'ECDHE-ECDSA-AES256-GCM-SHA384', + 'DHE-RSA-AES128-GCM-SHA256', + 'DHE-DSS-AES128-GCM-SHA256', + 'kEDH+AESGCM', + 'ECDHE-RSA-AES128-SHA256', + 'ECDHE-ECDSA-AES128-SHA256', + 'ECDHE-RSA-AES128-SHA', + 'ECDHE-ECDSA-AES128-SHA', + 'ECDHE-RSA-AES256-SHA384', + 'ECDHE-ECDSA-AES256-SHA384', + 'ECDHE-RSA-AES256-SHA', + 'ECDHE-ECDSA-AES256-SHA', + 'DHE-RSA-AES128-SHA256', + 'DHE-RSA-AES128-SHA', + 'DHE-DSS-AES128-SHA256', + 'DHE-RSA-AES256-SHA256', + 'DHE-DSS-AES256-SHA', + 'DHE-RSA-AES256-SHA', + 'AES128-GCM-SHA256', + 'AES256-GCM-SHA384', + 'AES128-SHA256', + 'AES256-SHA256', + 'AES128-SHA', + 'AES256-SHA', + 'AES', + 'CAMELLIA', + '!3DES', + '!aNULL', + '!eNULL', + '!EXPORT', + '!DES', + '!RC4', + '!MD5', + '!PSK', + '!aECDH', + '!EDH-DSS-DES-CBC3-SHA', + '!EDH-RSA-DES-CBC3-SHA', + '!KRB5-DES-CBC3-SHA', +] + +def write_config(filename, **kwargs): + with open(filename + ".jnj") as f: + template = jinja2.Template(f.read()) + rendered = template.render(kwargs) + + with open(filename, 'w') as f: + f.write(rendered) + + +def generate_nginx_config(config): + """ + Generates nginx config from the app config + """ + config = config or {} + use_https = os.path.exists(os.path.join(QUAYCONF_DIR, 'stack/ssl.key')) + use_old_certs = os.path.exists(os.path.join(QUAYCONF_DIR, 'stack/ssl.old.key')) + v1_only_domain = config.get('V1_ONLY_DOMAIN', None) + enable_rate_limits = config.get('FEATURE_RATE_LIMITS', False) + ssl_protocols = config.get('SSL_PROTOCOLS', SSL_PROTOCOL_DEFAULTS) + ssl_ciphers = config.get('SSL_CIPHERS', SSL_CIPHER_DEFAULTS) + + write_config(os.path.join(QUAYCONF_DIR, 'nginx/nginx.conf'), use_https=use_https, + use_old_certs=use_old_certs, + enable_rate_limits=enable_rate_limits, + v1_only_domain=v1_only_domain, + ssl_protocols=ssl_protocols, + ssl_ciphers=':'.join(ssl_ciphers)) + + +def generate_server_config(config): + """ + Generates server config from the app config + """ + config = config or {} + tuf_server = config.get('TUF_SERVER', None) + tuf_host = config.get('TUF_HOST', None) + signing_enabled = config.get('FEATURE_SIGNING', False) + maximum_layer_size = config.get('MAXIMUM_LAYER_SIZE', '20G') + enable_rate_limits = config.get('FEATURE_RATE_LIMITS', False) + + write_config( + os.path.join(QUAYCONF_DIR, 'nginx/server-base.conf'), tuf_server=tuf_server, tuf_host=tuf_host, + signing_enabled=signing_enabled, maximum_layer_size=maximum_layer_size, + enable_rate_limits=enable_rate_limits, + static_dir=STATIC_DIR) + + +def generate_rate_limiting_config(config): + """ + Generates rate limiting config from the app config + """ + config = config or {} + non_rate_limited_namespaces = config.get('NON_RATE_LIMITED_NAMESPACES') or set() + enable_rate_limits = config.get('FEATURE_RATE_LIMITS', False) + write_config( + os.path.join(QUAYCONF_DIR, 'nginx/rate-limiting.conf'), + non_rate_limited_namespaces=non_rate_limited_namespaces, + enable_rate_limits=enable_rate_limits, + static_dir=STATIC_DIR) + +if __name__ == "__main__": + if os.path.exists(os.path.join(QUAYCONF_DIR, 'stack/config.yaml')): + with open(os.path.join(QUAYCONF_DIR, 'stack/config.yaml'), 'r') as f: + config = yaml.load(f) + else: + config = None + + generate_rate_limiting_config(config) + generate_server_config(config) + generate_nginx_config(config) diff --git a/conf/init/nginx_conf_create.sh b/conf/init/nginx_conf_create.sh new file mode 100755 index 000000000..fedfada42 --- /dev/null +++ b/conf/init/nginx_conf_create.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +QUAYDIR=${QUAYDIR:-"/"} +QUAYPATH=${QUAYPATH:-"."} +QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"} + +cd $QUAYDIR +python $QUAYCONF/init/nginx_conf_create.py diff --git a/conf/init/runmigration.sh b/conf/init/runmigration.sh index 8b006b745..448589155 100755 --- a/conf/init/runmigration.sh +++ b/conf/init/runmigration.sh @@ -1,5 +1,10 @@ -#! /bin/bash +#!/bin/bash +QUAYPATH=${QUAYPATH:-"."} +QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"} + set -e +cd ${QUAYDIR:-"/"} # Run the database migration -PYTHONPATH=. venv/bin/alembic upgrade head \ No newline at end of file +PYTHONPATH=${QUAYPATH:-"."} python $QUAYCONF/init/v3_migration.py > revision_head +PYTHONPATH=${QUAYPATH:-"."} alembic upgrade `cat revision_head` diff --git a/conf/init/service/batch/blobuploadcleanupworker/log/run b/conf/init/service/batch/blobuploadcleanupworker/log/run new file mode 100755 index 000000000..9cb142568 --- /dev/null +++ b/conf/init/service/batch/blobuploadcleanupworker/log/run @@ -0,0 +1,4 @@ +#!/bin/sh + +# Start the logger +exec logger -i -t blobuploadcleanupworker diff --git a/conf/init/service/batch/blobuploadcleanupworker/run b/conf/init/service/batch/blobuploadcleanupworker/run new file mode 100755 index 000000000..29759be69 --- /dev/null +++ b/conf/init/service/batch/blobuploadcleanupworker/run @@ -0,0 +1,10 @@ +#! /bin/bash + +echo 'Starting Blob upload cleanup worker' + +QUAYPATH=${QUAYPATH:-"."} + +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/python -m workers.blobuploadcleanupworker.blobuploadcleanupworker 2>&1 + +echo 'Blob upload cleanup exited' \ No newline at end of file diff --git a/conf/init/service/batch/buildlogsarchiver/log/run b/conf/init/service/batch/buildlogsarchiver/log/run new file mode 100755 index 000000000..276a6459a --- /dev/null +++ b/conf/init/service/batch/buildlogsarchiver/log/run @@ -0,0 +1,4 @@ +#!/bin/sh + +# Start the logger +exec logger -i -t buildlogsarchiver diff --git a/conf/init/service/batch/buildlogsarchiver/run b/conf/init/service/batch/buildlogsarchiver/run new file mode 100755 index 000000000..bf6a3aad9 --- /dev/null +++ b/conf/init/service/batch/buildlogsarchiver/run @@ -0,0 +1,9 @@ +#! /bin/bash + +echo 'Starting build logs archiver worker' + +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/python -m workers.buildlogsarchiver.buildlogsarchiver 2>&1 + +echo 'Diffs worker exited' \ No newline at end of file diff --git a/conf/init/service/batch/buildmanager/log/run b/conf/init/service/batch/buildmanager/log/run new file mode 100755 index 000000000..c1b5e95c8 --- /dev/null +++ b/conf/init/service/batch/buildmanager/log/run @@ -0,0 +1,4 @@ +#!/bin/sh + +# Start the logger +exec logger -i -t buildmanager diff --git a/conf/init/service/batch/buildmanager/run b/conf/init/service/batch/buildmanager/run new file mode 100755 index 000000000..40015cab6 --- /dev/null +++ b/conf/init/service/batch/buildmanager/run @@ -0,0 +1,11 @@ +#! /bin/bash + +echo 'Starting internal build manager' + +# Run the build manager. +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} +export PYTHONPATH=$QUAYPATH +exec venv/bin/python -m buildman.builder 2>&1 + +echo 'Internal build manager exited' diff --git a/conf/init/service/batch/chunkcleanupworker/log/run b/conf/init/service/batch/chunkcleanupworker/log/run new file mode 100755 index 000000000..a79c95cdb --- /dev/null +++ b/conf/init/service/batch/chunkcleanupworker/log/run @@ -0,0 +1,4 @@ +#!/bin/sh + +# Start the logger +exec logger -i -t chunkcleanupworker diff --git a/conf/init/service/batch/chunkcleanupworker/run b/conf/init/service/batch/chunkcleanupworker/run new file mode 100755 index 000000000..a16307d5a --- /dev/null +++ b/conf/init/service/batch/chunkcleanupworker/run @@ -0,0 +1,9 @@ +#! /bin/bash + +echo 'Starting chunk cleanup worker' + +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/python -m workers.chunkcleanupworker 2>&1 + +echo 'Chunk cleanup worker exited' \ No newline at end of file diff --git a/conf/init/service/batch/expiredappspecifictokenworker/log/run b/conf/init/service/batch/expiredappspecifictokenworker/log/run new file mode 100755 index 000000000..a8881fc51 --- /dev/null +++ b/conf/init/service/batch/expiredappspecifictokenworker/log/run @@ -0,0 +1,4 @@ +#!/bin/sh + +# Start the logger +exec logger -i -t expiredappspecifictokenworker diff --git a/conf/init/service/batch/expiredappspecifictokenworker/run b/conf/init/service/batch/expiredappspecifictokenworker/run new file mode 100755 index 000000000..3436f4432 --- /dev/null +++ b/conf/init/service/batch/expiredappspecifictokenworker/run @@ -0,0 +1,9 @@ +#! /bin/bash + +echo 'Starting Expired app specific token GC worker' + +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/python -m workers.expiredappspecifictokenworker 2>&1 + +echo 'Expired app specific token GC exited' diff --git a/conf/init/service/batch/exportactionlogsworker/log/run b/conf/init/service/batch/exportactionlogsworker/log/run new file mode 100755 index 000000000..a152ba029 --- /dev/null +++ b/conf/init/service/batch/exportactionlogsworker/log/run @@ -0,0 +1,4 @@ +#!/bin/sh + +# Start the logger +exec logger -i -t exportactionlogsworker diff --git a/conf/init/service/batch/exportactionlogsworker/run b/conf/init/service/batch/exportactionlogsworker/run new file mode 100755 index 000000000..a2f6194e7 --- /dev/null +++ b/conf/init/service/batch/exportactionlogsworker/run @@ -0,0 +1,9 @@ +#! /bin/bash + +echo 'Starting Export Actions Log worker' + +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/python -m workers.exportactionlogsworker 2>&1 + +echo 'Export Actions Log worker exited' diff --git a/conf/init/service/batch/gcworker/log/run b/conf/init/service/batch/gcworker/log/run new file mode 100755 index 000000000..4bf67a575 --- /dev/null +++ b/conf/init/service/batch/gcworker/log/run @@ -0,0 +1,4 @@ +#!/bin/sh + +# Start the logger +exec logger -i -t gcworker diff --git a/conf/init/service/batch/gcworker/run b/conf/init/service/batch/gcworker/run new file mode 100755 index 000000000..1f892342a --- /dev/null +++ b/conf/init/service/batch/gcworker/run @@ -0,0 +1,9 @@ +#! /bin/bash + +echo 'Starting GC worker' + +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/python -m workers.gc.gcworker 2>&1 + +echo 'Repository GC exited' diff --git a/conf/init/service/batch/globalpromstats/log/run b/conf/init/service/batch/globalpromstats/log/run new file mode 100755 index 000000000..67c474972 --- /dev/null +++ b/conf/init/service/batch/globalpromstats/log/run @@ -0,0 +1,4 @@ +#!/bin/sh + +# Start the logger +exec logger -i -t globalpromstats diff --git a/conf/init/service/batch/globalpromstats/run b/conf/init/service/batch/globalpromstats/run new file mode 100755 index 000000000..a8f5627cd --- /dev/null +++ b/conf/init/service/batch/globalpromstats/run @@ -0,0 +1,9 @@ +#! /bin/bash + +echo 'Starting global prometheus stats worker' + +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/python -m workers.globalpromstats.globalpromstats + +echo 'Global prometheus stats exited' diff --git a/conf/init/service/batch/labelbackfillworker/log/run b/conf/init/service/batch/labelbackfillworker/log/run new file mode 100755 index 000000000..2437a88f1 --- /dev/null +++ b/conf/init/service/batch/labelbackfillworker/log/run @@ -0,0 +1,4 @@ +#!/bin/sh + +# Start the logger +exec logger -i -t labelbackfillworker \ No newline at end of file diff --git a/conf/init/service/batch/labelbackfillworker/run b/conf/init/service/batch/labelbackfillworker/run new file mode 100755 index 000000000..1b7c3d799 --- /dev/null +++ b/conf/init/service/batch/labelbackfillworker/run @@ -0,0 +1,9 @@ +#! /bin/bash + +echo 'Starting label backfill worker' + +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/python -m workers.labelbackfillworker 2>&1 + +echo 'Repository label backfill exited' diff --git a/conf/init/service/logrotateworker/log/run b/conf/init/service/batch/logrotateworker/log/run similarity index 68% rename from conf/init/service/logrotateworker/log/run rename to conf/init/service/batch/logrotateworker/log/run index d8fe0054b..be6df3834 100755 --- a/conf/init/service/logrotateworker/log/run +++ b/conf/init/service/batch/logrotateworker/log/run @@ -1,2 +1,4 @@ #!/bin/sh + +# Start the logger exec logger -i -t logrotateworker diff --git a/conf/init/service/batch/logrotateworker/run b/conf/init/service/batch/logrotateworker/run new file mode 100755 index 000000000..57ffad5ff --- /dev/null +++ b/conf/init/service/batch/logrotateworker/run @@ -0,0 +1,9 @@ +#! /bin/bash + +echo 'Starting log rotation worker' + +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/python -m workers.logrotateworker + +echo 'Log rotation worker exited' diff --git a/conf/init/service/batch/namespacegcworker/log/run b/conf/init/service/batch/namespacegcworker/log/run new file mode 100755 index 000000000..6e43109a9 --- /dev/null +++ b/conf/init/service/batch/namespacegcworker/log/run @@ -0,0 +1,4 @@ +#!/bin/sh + +# Start the logger +exec logger -i -t namespacegcworker diff --git a/conf/init/service/batch/namespacegcworker/run b/conf/init/service/batch/namespacegcworker/run new file mode 100755 index 000000000..04b41a0fb --- /dev/null +++ b/conf/init/service/batch/namespacegcworker/run @@ -0,0 +1,9 @@ +#! /bin/bash + +echo 'Starting Namespace GC worker' + +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/python -m workers.namespacegcworker 2>&1 + +echo 'Namespace GC exited' diff --git a/conf/init/service/batch/notificationworker/log/run b/conf/init/service/batch/notificationworker/log/run new file mode 100755 index 000000000..60d08f417 --- /dev/null +++ b/conf/init/service/batch/notificationworker/log/run @@ -0,0 +1,4 @@ +#!/bin/sh + +# Start the logger +exec logger -i -t notificationworker diff --git a/conf/init/service/batch/notificationworker/run b/conf/init/service/batch/notificationworker/run new file mode 100755 index 000000000..7f5f3502f --- /dev/null +++ b/conf/init/service/batch/notificationworker/run @@ -0,0 +1,10 @@ +#! /bin/bash + +echo 'Starting notification worker' + +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} + +PYTHONPATH=$QUAYPATH venv/bin/python -m workers.notificationworker.notificationworker + +echo 'Notification worker exited' \ No newline at end of file diff --git a/conf/init/service/batch/queuecleanupworker/log/run b/conf/init/service/batch/queuecleanupworker/log/run new file mode 100755 index 000000000..e4cf31f9f --- /dev/null +++ b/conf/init/service/batch/queuecleanupworker/log/run @@ -0,0 +1,4 @@ +#!/bin/sh + +# Start the logger +exec logger -i -t queuecleanupworker diff --git a/conf/init/service/batch/queuecleanupworker/run b/conf/init/service/batch/queuecleanupworker/run new file mode 100755 index 000000000..96bdc88d5 --- /dev/null +++ b/conf/init/service/batch/queuecleanupworker/run @@ -0,0 +1,9 @@ +#! /bin/bash + +echo 'Starting Queue cleanup worker' + +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/python -m workers.queuecleanupworker 2>&1 + +echo 'Repository Queue cleanup exited' \ No newline at end of file diff --git a/conf/init/service/batch/repositoryactioncounter/log/run b/conf/init/service/batch/repositoryactioncounter/log/run new file mode 100755 index 000000000..35d76891a --- /dev/null +++ b/conf/init/service/batch/repositoryactioncounter/log/run @@ -0,0 +1,4 @@ +#!/bin/sh + +# Start the logger +exec logger -i -t repositoryactioncounter diff --git a/conf/init/service/batch/repositoryactioncounter/run b/conf/init/service/batch/repositoryactioncounter/run new file mode 100755 index 000000000..d0aa9a748 --- /dev/null +++ b/conf/init/service/batch/repositoryactioncounter/run @@ -0,0 +1,9 @@ +#! /bin/bash + +echo 'Starting repository action count worker' + +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/python -m workers.repositoryactioncounter 2>&1 + +echo 'Repository action worker exited' \ No newline at end of file diff --git a/conf/init/service/batch/security_notification_worker/log/run b/conf/init/service/batch/security_notification_worker/log/run new file mode 100755 index 000000000..cf00fa381 --- /dev/null +++ b/conf/init/service/batch/security_notification_worker/log/run @@ -0,0 +1,4 @@ +#!/bin/sh + +# Start the logger +exec logger -i -t security_notification_worker diff --git a/conf/init/service/batch/security_notification_worker/run b/conf/init/service/batch/security_notification_worker/run new file mode 100755 index 000000000..d1dd24a07 --- /dev/null +++ b/conf/init/service/batch/security_notification_worker/run @@ -0,0 +1,9 @@ +#! /bin/bash + +echo 'Starting security scanner notification worker' + +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/python -m workers.security_notification_worker 2>&1 + +echo 'Security scanner notification worker exited' diff --git a/conf/init/service/securityworker/log/run b/conf/init/service/batch/securityworker/log/run similarity index 68% rename from conf/init/service/securityworker/log/run rename to conf/init/service/batch/securityworker/log/run index 8de3dfdec..64052c402 100755 --- a/conf/init/service/securityworker/log/run +++ b/conf/init/service/batch/securityworker/log/run @@ -1,2 +1,4 @@ #!/bin/sh + +# Start the logger exec logger -i -t securityworker diff --git a/conf/init/service/batch/securityworker/run b/conf/init/service/batch/securityworker/run new file mode 100755 index 000000000..4498cf00a --- /dev/null +++ b/conf/init/service/batch/securityworker/run @@ -0,0 +1,9 @@ +#! /bin/bash + +echo 'Starting security scanner worker' + +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/python -m workers.securityworker.securityworker 2>&1 + +echo 'Security scanner worker exited' diff --git a/conf/init/service/batch/storagereplication/log/run b/conf/init/service/batch/storagereplication/log/run new file mode 100755 index 000000000..badf9a235 --- /dev/null +++ b/conf/init/service/batch/storagereplication/log/run @@ -0,0 +1,4 @@ +#!/bin/sh + +# Start the logger +exec logger -i -t storagereplication diff --git a/conf/init/service/batch/storagereplication/run b/conf/init/service/batch/storagereplication/run new file mode 100755 index 000000000..1773070c6 --- /dev/null +++ b/conf/init/service/batch/storagereplication/run @@ -0,0 +1,9 @@ +#! /bin/bash + +echo 'Starting storage replication worker' + +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/python -m workers.storagereplication 2>&1 + +echo 'Repository storage replication exited' \ No newline at end of file diff --git a/conf/init/service/batch/tagbackfillworker/log/run b/conf/init/service/batch/tagbackfillworker/log/run new file mode 100755 index 000000000..1aaabc9b5 --- /dev/null +++ b/conf/init/service/batch/tagbackfillworker/log/run @@ -0,0 +1,4 @@ +#!/bin/sh + +# Start the logger +exec logger -i -t tagbackfillworker \ No newline at end of file diff --git a/conf/init/service/batch/tagbackfillworker/run b/conf/init/service/batch/tagbackfillworker/run new file mode 100755 index 000000000..0a5ad5663 --- /dev/null +++ b/conf/init/service/batch/tagbackfillworker/run @@ -0,0 +1,9 @@ +#! /bin/bash + +echo 'Starting tag backfill worker' + +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/python -m workers.tagbackfillworker 2>&1 + +echo 'Repository tag backfill exited' diff --git a/conf/init/service/batch/teamsyncworker/log/run b/conf/init/service/batch/teamsyncworker/log/run new file mode 100755 index 000000000..a96975768 --- /dev/null +++ b/conf/init/service/batch/teamsyncworker/log/run @@ -0,0 +1,4 @@ +#!/bin/sh + +# Start the logger +exec logger -i -t teamsyncworker diff --git a/conf/init/service/batch/teamsyncworker/run b/conf/init/service/batch/teamsyncworker/run new file mode 100755 index 000000000..2ec485670 --- /dev/null +++ b/conf/init/service/batch/teamsyncworker/run @@ -0,0 +1,9 @@ +#! /bin/bash + +echo 'Starting team synchronization worker' + +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/python -m workers.teamsyncworker.teamsyncworker 2>&1 + +echo 'Team synchronization worker exited' \ No newline at end of file diff --git a/conf/init/service/buildlogsarchiver/log/run b/conf/init/service/buildlogsarchiver/log/run deleted file mode 100755 index 3bcd9ba8a..000000000 --- a/conf/init/service/buildlogsarchiver/log/run +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -exec logger -i -t buildlogsarchiver \ No newline at end of file diff --git a/conf/init/service/buildlogsarchiver/run b/conf/init/service/buildlogsarchiver/run deleted file mode 100755 index df3d4b05f..000000000 --- a/conf/init/service/buildlogsarchiver/run +++ /dev/null @@ -1,8 +0,0 @@ -#! /bin/bash - -echo 'Starting build logs archiver worker' - -cd / -venv/bin/python -m workers.buildlogsarchiver 2>&1 - -echo 'Diffs worker exited' \ No newline at end of file diff --git a/conf/init/service/buildmanager/log/run b/conf/init/service/buildmanager/log/run deleted file mode 100755 index b35e28af9..000000000 --- a/conf/init/service/buildmanager/log/run +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -exec logger -i -t buildmanager \ No newline at end of file diff --git a/conf/init/service/buildmanager/run b/conf/init/service/buildmanager/run deleted file mode 100755 index d0bc6564f..000000000 --- a/conf/init/service/buildmanager/run +++ /dev/null @@ -1,8 +0,0 @@ -#! /bin/bash - -echo 'Starting internal build manager' - -cd / -venv/bin/python -m buildman.builder 2>&1 - -echo 'Internal build manager exited' \ No newline at end of file diff --git a/conf/init/service/gcworker/log/run b/conf/init/service/gcworker/log/run deleted file mode 100755 index cf6bdc1d7..000000000 --- a/conf/init/service/gcworker/log/run +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -exec logger -i -t gcworker \ No newline at end of file diff --git a/conf/init/service/gcworker/run b/conf/init/service/gcworker/run deleted file mode 100755 index 6a843d4b8..000000000 --- a/conf/init/service/gcworker/run +++ /dev/null @@ -1,8 +0,0 @@ -#! /bin/bash - -echo 'Starting GC worker' - -cd / -venv/bin/python -m workers.gcworker 2>&1 - -echo 'Repository GC exited' \ No newline at end of file diff --git a/conf/init/service/gunicorn_registry/log/run b/conf/init/service/gunicorn_registry/log/run deleted file mode 100755 index 5b5b37af9..000000000 --- a/conf/init/service/gunicorn_registry/log/run +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -exec logger -i -t gunicorn_registry \ No newline at end of file diff --git a/conf/init/service/gunicorn_registry/run b/conf/init/service/gunicorn_registry/run deleted file mode 100755 index 3c88fd0e3..000000000 --- a/conf/init/service/gunicorn_registry/run +++ /dev/null @@ -1,8 +0,0 @@ -#! /bin/bash - -echo 'Starting gunicon' - -cd / -nice -n 10 venv/bin/gunicorn -c conf/gunicorn_registry.py registry:application - -echo 'Gunicorn exited' \ No newline at end of file diff --git a/conf/init/service/gunicorn_secscan/run b/conf/init/service/gunicorn_secscan/run deleted file mode 100755 index d78ebedcd..000000000 --- a/conf/init/service/gunicorn_secscan/run +++ /dev/null @@ -1,8 +0,0 @@ -#! /bin/bash - -echo 'Starting gunicon' - -cd / -venv/bin/gunicorn -c conf/gunicorn_secscan.py secscan:application - -echo 'Gunicorn exited' \ No newline at end of file diff --git a/conf/init/service/gunicorn_verbs/log/run b/conf/init/service/gunicorn_verbs/log/run deleted file mode 100755 index d0bc335d7..000000000 --- a/conf/init/service/gunicorn_verbs/log/run +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -exec logger -i -t gunicorn_verbs \ No newline at end of file diff --git a/conf/init/service/gunicorn_verbs/run b/conf/init/service/gunicorn_verbs/run deleted file mode 100755 index d76a7adcf..000000000 --- a/conf/init/service/gunicorn_verbs/run +++ /dev/null @@ -1,8 +0,0 @@ -#! /bin/bash - -echo 'Starting gunicon' - -cd / -nice -n 10 venv/bin/gunicorn -c conf/gunicorn_verbs.py verbs:application - -echo 'Gunicorn exited' \ No newline at end of file diff --git a/conf/init/service/gunicorn_web/log/run b/conf/init/service/gunicorn_web/log/run deleted file mode 100755 index c96d365a5..000000000 --- a/conf/init/service/gunicorn_web/log/run +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -exec logger -i -t gunicorn_web \ No newline at end of file diff --git a/conf/init/service/gunicorn_web/run b/conf/init/service/gunicorn_web/run deleted file mode 100755 index 86d107618..000000000 --- a/conf/init/service/gunicorn_web/run +++ /dev/null @@ -1,8 +0,0 @@ -#! /bin/bash - -echo 'Starting gunicon' - -cd / -venv/bin/gunicorn -c conf/gunicorn_web.py web:application - -echo 'Gunicorn exited' \ No newline at end of file diff --git a/conf/init/service/interactive/dnsmasq/log/run b/conf/init/service/interactive/dnsmasq/log/run new file mode 100755 index 000000000..baf5af08e --- /dev/null +++ b/conf/init/service/interactive/dnsmasq/log/run @@ -0,0 +1,4 @@ +#!/bin/sh + +# Start the logger +exec logger -i -t dnsmasq diff --git a/conf/init/service/interactive/dnsmasq/run b/conf/init/service/interactive/dnsmasq/run new file mode 100755 index 000000000..faa868091 --- /dev/null +++ b/conf/init/service/interactive/dnsmasq/run @@ -0,0 +1,7 @@ +#! /bin/bash + +echo 'Starting dnsmasq' + +/usr/sbin/dnsmasq --no-daemon --user=root --listen-address=127.0.0.1 + +echo 'dnsmasq' diff --git a/conf/init/service/interactive/gunicorn_registry/log/run b/conf/init/service/interactive/gunicorn_registry/log/run new file mode 100755 index 000000000..5eceb18f5 --- /dev/null +++ b/conf/init/service/interactive/gunicorn_registry/log/run @@ -0,0 +1,4 @@ +#!/bin/sh + +# Start the logger +exec logger -i -t gunicorn_registry diff --git a/conf/init/service/interactive/gunicorn_registry/run b/conf/init/service/interactive/gunicorn_registry/run new file mode 100755 index 000000000..4b35b44ab --- /dev/null +++ b/conf/init/service/interactive/gunicorn_registry/run @@ -0,0 +1,12 @@ +#! /bin/bash + +echo 'Starting gunicon' + +QUAYPATH=${QUAYPATH:-"."} +QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"} +DB_CONNECTION_POOLING=${DB_CONNECTION_POOLING:-"true"} + +cd ${QUAYDIR:-"/"} +DB_CONNECTION_POOLING=$DB_CONNECTION_POOLING PYTHONPATH=$QUAYPATH nice -n 10 venv/bin/gunicorn -c $QUAYCONF/gunicorn_registry.py registry:application + +echo 'Gunicorn exited' \ No newline at end of file diff --git a/conf/init/service/gunicorn_secscan/log/run b/conf/init/service/interactive/gunicorn_secscan/log/run similarity index 69% rename from conf/init/service/gunicorn_secscan/log/run rename to conf/init/service/interactive/gunicorn_secscan/log/run index 0f061a86e..056d6d8a1 100755 --- a/conf/init/service/gunicorn_secscan/log/run +++ b/conf/init/service/interactive/gunicorn_secscan/log/run @@ -1,2 +1,4 @@ #!/bin/sh + +# Start the logger exec logger -i -t gunicorn_secscan diff --git a/conf/init/service/interactive/gunicorn_secscan/run b/conf/init/service/interactive/gunicorn_secscan/run new file mode 100755 index 000000000..23f24bf7d --- /dev/null +++ b/conf/init/service/interactive/gunicorn_secscan/run @@ -0,0 +1,11 @@ +#! /bin/bash + +echo 'Starting gunicon' + +QUAYPATH=${QUAYPATH:-"."} +QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"} + +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/gunicorn -c $QUAYCONF/gunicorn_secscan.py secscan:application + +echo 'Gunicorn exited' \ No newline at end of file diff --git a/conf/init/service/interactive/gunicorn_verbs/log/run b/conf/init/service/interactive/gunicorn_verbs/log/run new file mode 100755 index 000000000..105da2862 --- /dev/null +++ b/conf/init/service/interactive/gunicorn_verbs/log/run @@ -0,0 +1,4 @@ +#!/bin/sh + +# Start the logger +exec logger -i -t gunicorn_verbs diff --git a/conf/init/service/interactive/gunicorn_verbs/run b/conf/init/service/interactive/gunicorn_verbs/run new file mode 100755 index 000000000..eb7d7e35e --- /dev/null +++ b/conf/init/service/interactive/gunicorn_verbs/run @@ -0,0 +1,11 @@ +#! /bin/bash + +echo 'Starting gunicon' + +QUAYPATH=${QUAYPATH:-"."} +QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"} + +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH nice -n 10 venv/bin/gunicorn -c $QUAYCONF/gunicorn_verbs.py verbs:application + +echo 'Gunicorn exited' \ No newline at end of file diff --git a/conf/init/service/interactive/gunicorn_web/log/run b/conf/init/service/interactive/gunicorn_web/log/run new file mode 100755 index 000000000..1394100e4 --- /dev/null +++ b/conf/init/service/interactive/gunicorn_web/log/run @@ -0,0 +1,4 @@ +#!/bin/sh + +# Start the logger +exec logger -i -t gunicorn_web diff --git a/conf/init/service/interactive/gunicorn_web/run b/conf/init/service/interactive/gunicorn_web/run new file mode 100755 index 000000000..76ed8edde --- /dev/null +++ b/conf/init/service/interactive/gunicorn_web/run @@ -0,0 +1,11 @@ +#! /bin/bash + +echo 'Starting gunicon' + +QUAYPATH=${QUAYPATH:-"."} +QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"} + +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/gunicorn -c $QUAYCONF/gunicorn_web.py web:application + +echo 'Gunicorn exited' \ No newline at end of file diff --git a/conf/init/service/interactive/jwtproxy/log/run b/conf/init/service/interactive/jwtproxy/log/run new file mode 100755 index 000000000..ec79e337a --- /dev/null +++ b/conf/init/service/interactive/jwtproxy/log/run @@ -0,0 +1,4 @@ +#!/bin/sh + +# Start the logger +exec logger -i -t jwtproxy diff --git a/conf/init/service/interactive/jwtproxy/run b/conf/init/service/interactive/jwtproxy/run new file mode 100755 index 000000000..7c77b1cd7 --- /dev/null +++ b/conf/init/service/interactive/jwtproxy/run @@ -0,0 +1,16 @@ +#! /bin/bash + +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH +QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"} + +if [ -f $QUAYCONF/jwtproxy_conf.yaml ]; +then + echo 'Starting jwtproxy' + /usr/local/bin/jwtproxy --config $QUAYCONF/jwtproxy_conf.yaml + rm /tmp/jwtproxy_secscan.sock + echo 'Jwtproxy exited' +else + sleep 1 +fi diff --git a/conf/init/service/interactive/memcached/log/run b/conf/init/service/interactive/memcached/log/run new file mode 100755 index 000000000..25afe47dd --- /dev/null +++ b/conf/init/service/interactive/memcached/log/run @@ -0,0 +1,7 @@ +#!/bin/sh + +# Ensure dependencies start before the logger +sv check syslog-ng > /dev/null || exit 1 + +# Start the logger +exec logger -i -t memcached diff --git a/conf/init/service/interactive/memcached/run b/conf/init/service/interactive/memcached/run new file mode 100755 index 000000000..720c8ad3e --- /dev/null +++ b/conf/init/service/interactive/memcached/run @@ -0,0 +1,12 @@ +#! /bin/bash + +echo 'Starting memcached' + +if [ "$DEBUGLOG" == "true" ] +then + memcached -u memcached -m 64 -vv -l 127.0.0.1 -p 18080 +else + memcached -u memcached -m 64 -l 127.0.0.1 -p 18080 +fi + +echo 'memcached exited' diff --git a/conf/init/service/interactive/nginx/log/run b/conf/init/service/interactive/nginx/log/run new file mode 100755 index 000000000..a75f76208 --- /dev/null +++ b/conf/init/service/interactive/nginx/log/run @@ -0,0 +1,4 @@ +#!/bin/sh + +# Start the logger +exec logger -i -t nginx diff --git a/conf/init/service/interactive/nginx/run b/conf/init/service/interactive/nginx/run new file mode 100755 index 000000000..85e4511f9 --- /dev/null +++ b/conf/init/service/interactive/nginx/run @@ -0,0 +1,12 @@ +#! /bin/bash + +echo 'Starting nginx' + +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH +QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"} + +/usr/sbin/nginx -c $QUAYCONF/nginx/nginx.conf + +echo 'Nginx exited' diff --git a/conf/init/service/interactive/prometheus-aggregator/log/run b/conf/init/service/interactive/prometheus-aggregator/log/run new file mode 100755 index 000000000..a1ca97fa3 --- /dev/null +++ b/conf/init/service/interactive/prometheus-aggregator/log/run @@ -0,0 +1,4 @@ +#!/bin/sh + +# Start the logger +exec logger -i -t prometheus-aggregator diff --git a/conf/init/service/interactive/prometheus-aggregator/run b/conf/init/service/interactive/prometheus-aggregator/run new file mode 100755 index 000000000..fc9b157c7 --- /dev/null +++ b/conf/init/service/interactive/prometheus-aggregator/run @@ -0,0 +1,7 @@ +#! /bin/bash + +echo 'Starting prometheus aggregator' + +/usr/local/bin/prometheus-aggregator + +echo 'Prometheus aggregator exited' \ No newline at end of file diff --git a/conf/init/service/service_key_worker/log/run b/conf/init/service/interactive/servicekeyworker/log/run similarity index 70% rename from conf/init/service/service_key_worker/log/run rename to conf/init/service/interactive/servicekeyworker/log/run index 410fabb1a..1c548aef3 100755 --- a/conf/init/service/service_key_worker/log/run +++ b/conf/init/service/interactive/servicekeyworker/log/run @@ -1,2 +1,4 @@ #!/bin/sh + +# Start the logger exec logger -i -t service_key_worker diff --git a/conf/init/service/interactive/servicekeyworker/run b/conf/init/service/interactive/servicekeyworker/run new file mode 100755 index 000000000..f1b9635e5 --- /dev/null +++ b/conf/init/service/interactive/servicekeyworker/run @@ -0,0 +1,9 @@ +#! /bin/bash + +echo 'Starting service key worker' + +QUAYPATH=${QUAYPATH:-"."} +cd ${QUAYDIR:-"/"} +PYTHONPATH=$QUAYPATH venv/bin/python -m workers.servicekeyworker.servicekeyworker 2>&1 + +echo 'Service key worker exited' diff --git a/conf/init/service/jwtproxy/log/run b/conf/init/service/jwtproxy/log/run deleted file mode 100755 index 0fd684fe2..000000000 --- a/conf/init/service/jwtproxy/log/run +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -exec logger -i -t jwtproxy diff --git a/conf/init/service/jwtproxy/run b/conf/init/service/jwtproxy/run deleted file mode 100755 index 263169cde..000000000 --- a/conf/init/service/jwtproxy/run +++ /dev/null @@ -1,12 +0,0 @@ -#! /bin/bash -cd / - -if [ -f conf/jwtproxy_conf.yaml ]; -then - echo 'Starting jwtproxy' - /usr/local/bin/jwtproxy --config conf/jwtproxy_conf.yaml - rm /tmp/jwtproxy_secscan.sock - echo 'Jwtproxy exited' -else - sleep 1 -fi diff --git a/conf/init/service/logrotateworker/run b/conf/init/service/logrotateworker/run deleted file mode 100755 index a99aa6ad3..000000000 --- a/conf/init/service/logrotateworker/run +++ /dev/null @@ -1,8 +0,0 @@ -#! /bin/bash - -echo 'Starting log rotation worker' - -cd / -venv/bin/python -m workers.logrotateworker - -echo 'Log rotation worker exited' diff --git a/conf/init/service/nginx/log/run b/conf/init/service/nginx/log/run deleted file mode 100755 index 168af6d3e..000000000 --- a/conf/init/service/nginx/log/run +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -exec logger -i -t nginx \ No newline at end of file diff --git a/conf/init/service/nginx/run b/conf/init/service/nginx/run deleted file mode 100755 index e5cc1aaac..000000000 --- a/conf/init/service/nginx/run +++ /dev/null @@ -1,14 +0,0 @@ -#! /bin/bash - -echo 'Starting nginx' - -if [ -f /conf/stack/ssl.key ] -then - echo "Using HTTPS" - /usr/local/nginx/sbin/nginx -c /conf/nginx.conf -else - echo "No SSL key provided, using HTTP" - /usr/local/nginx/sbin/nginx -c /conf/nginx-nossl.conf -fi - -echo 'Nginx exited' \ No newline at end of file diff --git a/conf/init/service/notificationworker/log/run b/conf/init/service/notificationworker/log/run deleted file mode 100755 index 49747f3ce..000000000 --- a/conf/init/service/notificationworker/log/run +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -exec logger -i -t notificationworker \ No newline at end of file diff --git a/conf/init/service/notificationworker/run b/conf/init/service/notificationworker/run deleted file mode 100755 index b149d9f34..000000000 --- a/conf/init/service/notificationworker/run +++ /dev/null @@ -1,8 +0,0 @@ -#! /bin/bash - -echo 'Starting notification worker' - -cd / -venv/bin/python -m workers.notificationworker - -echo 'Notification worker exited' \ No newline at end of file diff --git a/conf/init/service/repositoryactioncounter/log/run b/conf/init/service/repositoryactioncounter/log/run deleted file mode 100755 index d86d5766f..000000000 --- a/conf/init/service/repositoryactioncounter/log/run +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -exec logger -i -t repositoryactioncounter \ No newline at end of file diff --git a/conf/init/service/repositoryactioncounter/run b/conf/init/service/repositoryactioncounter/run deleted file mode 100755 index 08e0e3164..000000000 --- a/conf/init/service/repositoryactioncounter/run +++ /dev/null @@ -1,8 +0,0 @@ -#! /bin/bash - -echo 'Starting repository action count worker' - -cd / -venv/bin/python -m workers.repositoryactioncounter 2>&1 - -echo 'Repository action worker exited' \ No newline at end of file diff --git a/conf/init/service/security_notification_worker/log/run b/conf/init/service/security_notification_worker/log/run deleted file mode 100755 index 262fed98e..000000000 --- a/conf/init/service/security_notification_worker/log/run +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -exec logger -i -t securitynotificationworker diff --git a/conf/init/service/security_notification_worker/run b/conf/init/service/security_notification_worker/run deleted file mode 100755 index 83c94e686..000000000 --- a/conf/init/service/security_notification_worker/run +++ /dev/null @@ -1,8 +0,0 @@ -#! /bin/bash - -echo 'Starting security scanner notification worker' - -cd / -venv/bin/python -m workers.security_notification_worker 2>&1 - -echo 'Security scanner notification worker exited' diff --git a/conf/init/service/securityworker/run b/conf/init/service/securityworker/run deleted file mode 100755 index c40f9aa4b..000000000 --- a/conf/init/service/securityworker/run +++ /dev/null @@ -1,8 +0,0 @@ -#! /bin/bash - -echo 'Starting security scanner worker' - -cd / -venv/bin/python -m workers.securityworker 2>&1 - -echo 'Security scanner worker exited' diff --git a/conf/init/service/service_key_worker/run b/conf/init/service/service_key_worker/run deleted file mode 100755 index 20b578c24..000000000 --- a/conf/init/service/service_key_worker/run +++ /dev/null @@ -1,8 +0,0 @@ -#! /bin/bash - -echo 'Starting service key worker' - -cd / -venv/bin/python -m workers.service_key_worker 2>&1 - -echo 'Service key worker exited' diff --git a/conf/init/service/storagereplication/log/run b/conf/init/service/storagereplication/log/run deleted file mode 100755 index adcd2b63f..000000000 --- a/conf/init/service/storagereplication/log/run +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -exec logger -i -t storagereplication \ No newline at end of file diff --git a/conf/init/service/storagereplication/run b/conf/init/service/storagereplication/run deleted file mode 100755 index ed62731f8..000000000 --- a/conf/init/service/storagereplication/run +++ /dev/null @@ -1,8 +0,0 @@ -#! /bin/bash - -echo 'Starting storage replication worker' - -cd / -venv/bin/python -m workers.storagereplication 2>&1 - -echo 'Repository storage replication exited' \ No newline at end of file diff --git a/conf/init/supervisord_conf_create.py b/conf/init/supervisord_conf_create.py new file mode 100644 index 000000000..50f5cabbf --- /dev/null +++ b/conf/init/supervisord_conf_create.py @@ -0,0 +1,147 @@ +import os +import os.path + +import jinja2 + +QUAYPATH = os.getenv("QUAYPATH", ".") +QUAYDIR = os.getenv("QUAYDIR", "/") +QUAYCONF_DIR = os.getenv("QUAYCONF", os.path.join(QUAYDIR, QUAYPATH, "conf")) + +QUAY_SERVICES = os.getenv("QUAY_SERVICES", []) +QUAY_OVERRIDE_SERVICES = os.getenv("QUAY_OVERRIDE_SERVICES", []) + + +def default_services(): + return { + "blobuploadcleanupworker": { + "autostart": "true" + }, + "buildlogsarchiver": { + "autostart": "true" + }, + "builder": { + "autostart": "true" + }, + "chunkcleanupworker": { + "autostart": "true" + }, + "expiredappspecifictokenworker": { + "autostart": "true" + }, + "exportactionlogsworker": { + "autostart": "true" + }, + "gcworker": { + "autostart": "true" + }, + "globalpromstats": { + "autostart": "true" + }, + "labelbackfillworker": { + "autostart": "true" + }, + "logrotateworker": { + "autostart": "true" + }, + "namespacegcworker": { + "autostart": "true" + }, + "notificationworker": { + "autostart": "true" + }, + "queuecleanupworker": { + "autostart": "true" + }, + "repositoryactioncounter": { + "autostart": "true" + }, + "security_notification_worker": { + "autostart": "true" + }, + "securityworker": { + "autostart": "true" + }, + "storagereplication": { + "autostart": "true" + }, + "tagbackfillworker": { + "autostart": "true" + }, + "teamsyncworker": { + "autostart": "true" + }, + "dnsmasq": { + "autostart": "true" + }, + "gunicorn-registry": { + "autostart": "true" + }, + "gunicorn-secscan": { + "autostart": "true" + }, + "gunicorn-verbs": { + "autostart": "true" + }, + "gunicorn-web": { + "autostart": "true" + }, + "ip-resolver-update-worker": { + "autostart": "true" + }, + "jwtproxy": { + "autostart": "true" + }, + "memcache": { + "autostart": "true" + }, + "nginx": { + "autostart": "true" + }, + "prometheus-aggregator": { + "autostart": "true" + }, + "servicekey": { + "autostart": "true" + }, + "repomirrorworker": { + "autostart": "false" + } +} + + +def generate_supervisord_config(filename, config): + with open(filename + ".jnj") as f: + template = jinja2.Template(f.read()) + rendered = template.render(config=config) + + with open(filename, 'w') as f: + f.write(rendered) + + +def limit_services(config, enabled_services): + if enabled_services == []: + return + + for service in config.keys(): + if service in enabled_services: + config[service]["autostart"] = "true" + else: + config[service]["autostart"] = "false" + + +def override_services(config, override_services): + if override_services == []: + return + + for service in config.keys(): + if service + "=true" in override_services: + config[service]["autostart"] = "true" + elif service + "=false" in override_services: + config[service]["autostart"] = "false" + + +if __name__ == "__main__": + config = default_services() + limit_services(config, QUAY_SERVICES) + override_services(config, QUAY_OVERRIDE_SERVICES) + generate_supervisord_config(os.path.join(QUAYCONF_DIR, 'supervisord.conf'), config) diff --git a/conf/init/supervisord_conf_create.sh b/conf/init/supervisord_conf_create.sh new file mode 100755 index 000000000..307a8f670 --- /dev/null +++ b/conf/init/supervisord_conf_create.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +QUAYDIR=${QUAYDIR:-"/"} +QUAYPATH=${QUAYPATH:-"."} +QUAYCONF=${QUAYCONF:-"$QUAYPATH/conf"} + +cd $QUAYDIR +python $QUAYCONF/init/supervisord_conf_create.py diff --git a/conf/init/syslog-ng.conf b/conf/init/syslog-ng.conf deleted file mode 100644 index 678d89366..000000000 --- a/conf/init/syslog-ng.conf +++ /dev/null @@ -1,143 +0,0 @@ -@version: 3.5 -@include "scl.conf" -@include "`scl-root`/system/tty10.conf" - -# Syslog-ng configuration file, compatible with default Debian syslogd -# installation. - -# First, set some global options. -options { chain_hostnames(off); flush_lines(0); use_dns(no); use_fqdn(no); - owner("root"); group("adm"); perm(0640); stats_freq(0); - bad_hostname("^gconfd$"); -}; - -######################## -# Sources -######################## -# This is the default behavior of sysklogd package -# Logs may come from unix stream, but not from another machine. -# -source s_src { - unix-stream("/dev/log"); - internal(); -}; - -# If you wish to get logs from remote machine you should uncomment -# this and comment the above source line. -# -#source s_net { tcp(ip(127.0.0.1) port(1000)); }; - -######################## -# Destinations -######################## -# First some standard logfile -# -destination d_auth { file("/var/log/auth.log"); }; -destination d_cron { file("/var/log/cron.log"); }; -destination d_daemon { file("/var/log/daemon.log"); }; -destination d_kern { file("/var/log/kern.log"); }; -destination d_lpr { file("/var/log/lpr.log"); }; -destination d_mail { file("/var/log/mail.log"); }; -destination d_syslog { file("/var/log/syslog"); }; -destination d_user { file("/var/log/user.log"); }; -destination d_uucp { file("/var/log/uucp.log"); }; - -# This files are the log come from the mail subsystem. -# -destination d_mailinfo { file("/var/log/mail.info"); }; -destination d_mailwarn { file("/var/log/mail.warn"); }; -destination d_mailerr { file("/var/log/mail.err"); }; - -# Logging for INN news system -# -destination d_newscrit { file("/var/log/news/news.crit"); }; -destination d_newserr { file("/var/log/news/news.err"); }; -destination d_newsnotice { file("/var/log/news/news.notice"); }; - -# Some `catch-all' logfiles. -# -destination d_debug { file("/var/log/debug"); }; -destination d_error { file("/var/log/error"); }; -destination d_messages { file("/var/log/messages"); }; - -# The named pipe /dev/xconsole is for the nsole' utility. To use it, -# you must invoke nsole' with the -file' option: -# -# $ xconsole -file /dev/xconsole [...] -# -destination d_xconsole { pipe("/dev/xconsole"); }; - -# Send the messages to an other host -# -#destination d_net { tcp("127.0.0.1" port(1000) log_fifo_size(1000)); }; - -# Debian only -destination d_ppp { file("/var/log/ppp.log"); }; - -######################## -# Filters -######################## -# Here's come the filter options. With this rules, we can set which -# message go where. - -filter f_dbg { level(debug); }; -filter f_info { level(info); }; -filter f_notice { level(notice); }; -filter f_warn { level(warn); }; -filter f_err { level(err); }; -filter f_crit { level(crit .. emerg); }; - -filter f_debug { level(debug) and not facility(auth, authpriv, news, mail); }; -filter f_error { level(err .. emerg) ; }; - -filter f_auth { facility(auth, authpriv) and not filter(f_debug); }; -filter f_cron { facility(cron) and not filter(f_debug); }; -filter f_daemon { facility(daemon) and not filter(f_debug); }; -filter f_kern { facility(kern) and not filter(f_debug); }; -filter f_lpr { facility(lpr) and not filter(f_debug); }; -filter f_local { facility(local0, local1, local3, local4, local5, - local6, local7) and not filter(f_debug); }; -filter f_mail { facility(mail) and not filter(f_debug); }; -filter f_news { facility(news) and not filter(f_debug); }; -filter f_syslog3 { not facility(auth, authpriv, mail) and not filter(f_debug); }; -filter f_uucp { facility(uucp) and not filter(f_debug); }; - -filter f_cnews { level(notice, err, crit) and facility(news); }; -filter f_cother { level(debug, info, notice, warn) or facility(daemon, mail); }; - -filter f_ppp { facility(local2) and not filter(f_debug); }; -filter f_console { level(warn .. emerg); }; - -######################## -# Log paths -######################## -log { source(s_src); filter(f_auth); destination(d_auth); }; -log { source(s_src); filter(f_cron); destination(d_cron); }; -log { source(s_src); filter(f_daemon); destination(d_daemon); }; -log { source(s_src); filter(f_kern); destination(d_kern); }; -log { source(s_src); filter(f_lpr); destination(d_lpr); }; -log { source(s_src); filter(f_syslog3); destination(d_syslog); }; -log { source(s_src); filter(f_uucp); destination(d_uucp); }; - -log { source(s_src); filter(f_mail); destination(d_mail); }; -#log { source(s_src); filter(f_mail); filter(f_info); destination(d_mailinfo); }; -#log { source(s_src); filter(f_mail); filter(f_warn); destination(d_mailwarn); }; -#log { source(s_src); filter(f_mail); filter(f_err); destination(d_mailerr); }; - -log { source(s_src); filter(f_news); filter(f_crit); destination(d_newscrit); }; -log { source(s_src); filter(f_news); filter(f_err); destination(d_newserr); }; -log { source(s_src); filter(f_news); filter(f_notice); destination(d_newsnotice); }; - -#log { source(s_src); filter(f_ppp); destination(d_ppp); }; - -log { source(s_src); filter(f_debug); destination(d_debug); }; -log { source(s_src); filter(f_error); destination(d_error); }; - -# All messages send to a remote site -# -#log { source(s_src); destination(d_net); }; - -### -# Include all config files in /etc/syslog-ng/conf.d/ -### -@include "/etc/syslog-ng/conf.d/*.conf" diff --git a/conf/init/test/__init__.py b/conf/init/test/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/conf/init/test/test_supervisord_conf_create.py b/conf/init/test/test_supervisord_conf_create.py new file mode 100644 index 000000000..8972b2e39 --- /dev/null +++ b/conf/init/test/test_supervisord_conf_create.py @@ -0,0 +1,778 @@ +import os +import pytest +import json +import yaml +import jinja2 + +from ..supervisord_conf_create import QUAYCONF_DIR, default_services, limit_services + +def render_supervisord_conf(config): + with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../supervisord.conf.jnj")) as f: + template = jinja2.Template(f.read()) + return template.render(config=config) + +def test_supervisord_conf_create_defaults(): + config = default_services() + limit_services(config, []) + rendered = render_supervisord_conf(config) + + expected = """[supervisord] +nodaemon=true + +[unix_http_server] +file=%(ENV_QUAYCONF)s/supervisord.sock +user=root + +[supervisorctl] +serverurl=unix:///%(ENV_QUAYCONF)s/supervisord.sock + +[rpcinterface:supervisor] +supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface + +[eventlistener:stdout] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command = supervisor_stdout +buffer_size = 1024 +events = PROCESS_LOG +result_handler = supervisor_stdout:event_handler + +;;; Run batch scripts +[program:blobuploadcleanupworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.blobuploadcleanupworker.blobuploadcleanupworker +autostart = true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:buildlogsarchiver] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.buildlogsarchiver.buildlogsarchiver +autostart = true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:builder] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m buildman.builder +autostart = true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:chunkcleanupworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.chunkcleanupworker +autostart = true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:expiredappspecifictokenworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.expiredappspecifictokenworker +autostart = true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:exportactionlogsworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.exportactionlogsworker +autostart = true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:gcworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.gc.gcworker +autostart = true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:globalpromstats] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.globalpromstats.globalpromstats +autostart = true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:labelbackfillworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.labelbackfillworker +autostart = true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:logrotateworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.logrotateworker +autostart = true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:namespacegcworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.namespacegcworker +autostart = true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:notificationworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.notificationworker.notificationworker +autostart = true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:queuecleanupworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.queuecleanupworker +autostart = true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:repositoryactioncounter] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.repositoryactioncounter +autostart = true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:security_notification_worker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.security_notification_worker +autostart = true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:securityworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.securityworker.securityworker +autostart = true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:storagereplication] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.storagereplication +autostart = true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:tagbackfillworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.tagbackfillworker +autostart = true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:teamsyncworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.teamsyncworker.teamsyncworker +autostart = true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +;;; Run interactive scripts +[program:dnsmasq] +command=/usr/sbin/dnsmasq --no-daemon --user=root --listen-address=127.0.0.1 --port=8053 +autostart = true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:gunicorn-registry] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s, + DB_CONNECTION_POOLING=%(ENV_DB_CONNECTION_POOLING_REGISTRY)s +command=nice -n 10 gunicorn -c %(ENV_QUAYCONF)s/gunicorn_registry.py registry:application +autostart = true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:gunicorn-secscan] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=gunicorn -c %(ENV_QUAYCONF)s/gunicorn_secscan.py secscan:application +autostart = true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:gunicorn-verbs] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=nice -n 10 gunicorn -c %(ENV_QUAYCONF)s/gunicorn_verbs.py verbs:application +autostart = true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:gunicorn-web] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=gunicorn -c %(ENV_QUAYCONF)s/gunicorn_web.py web:application +autostart = true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:jwtproxy] +command=/usr/local/bin/jwtproxy --config %(ENV_QUAYCONF)s/jwtproxy_conf.yaml +autostart = true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:memcache] +command=memcached -u memcached -m 64 -l 127.0.0.1 -p 18080 +autostart = true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:nginx] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=nginx -c %(ENV_QUAYCONF)s/nginx/nginx.conf +autostart = true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:prometheus-aggregator] +command=/usr/local/bin/prometheus-aggregator +autostart = true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:servicekey] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.servicekeyworker.servicekeyworker +autostart = true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:repomirrorworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.repomirrorworker.repomirrorworker +autostart = false +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true +# EOF NO NEWLINE""" + assert rendered == expected + +def test_supervisord_conf_create_all_overrides(): + config = default_services() + limit_services(config, "servicekey,prometheus-aggregator") + rendered = render_supervisord_conf(config) + + expected = """[supervisord] +nodaemon=true + +[unix_http_server] +file=%(ENV_QUAYCONF)s/supervisord.sock +user=root + +[supervisorctl] +serverurl=unix:///%(ENV_QUAYCONF)s/supervisord.sock + +[rpcinterface:supervisor] +supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface + +[eventlistener:stdout] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command = supervisor_stdout +buffer_size = 1024 +events = PROCESS_LOG +result_handler = supervisor_stdout:event_handler + +;;; Run batch scripts +[program:blobuploadcleanupworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.blobuploadcleanupworker.blobuploadcleanupworker +autostart = false +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:buildlogsarchiver] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.buildlogsarchiver.buildlogsarchiver +autostart = false +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:builder] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m buildman.builder +autostart = false +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:chunkcleanupworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.chunkcleanupworker +autostart = false +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:expiredappspecifictokenworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.expiredappspecifictokenworker +autostart = false +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:exportactionlogsworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.exportactionlogsworker +autostart = false +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:gcworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.gc.gcworker +autostart = false +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:globalpromstats] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.globalpromstats.globalpromstats +autostart = false +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:labelbackfillworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.labelbackfillworker +autostart = false +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:logrotateworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.logrotateworker +autostart = false +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:namespacegcworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.namespacegcworker +autostart = false +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:notificationworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.notificationworker.notificationworker +autostart = false +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:queuecleanupworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.queuecleanupworker +autostart = false +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:repositoryactioncounter] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.repositoryactioncounter +autostart = false +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:security_notification_worker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.security_notification_worker +autostart = false +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:securityworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.securityworker.securityworker +autostart = false +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:storagereplication] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.storagereplication +autostart = false +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:tagbackfillworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.tagbackfillworker +autostart = false +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:teamsyncworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.teamsyncworker.teamsyncworker +autostart = false +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +;;; Run interactive scripts +[program:dnsmasq] +command=/usr/sbin/dnsmasq --no-daemon --user=root --listen-address=127.0.0.1 --port=8053 +autostart = false +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:gunicorn-registry] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s, + DB_CONNECTION_POOLING=%(ENV_DB_CONNECTION_POOLING_REGISTRY)s +command=nice -n 10 gunicorn -c %(ENV_QUAYCONF)s/gunicorn_registry.py registry:application +autostart = false +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:gunicorn-secscan] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=gunicorn -c %(ENV_QUAYCONF)s/gunicorn_secscan.py secscan:application +autostart = false +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:gunicorn-verbs] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=nice -n 10 gunicorn -c %(ENV_QUAYCONF)s/gunicorn_verbs.py verbs:application +autostart = false +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:gunicorn-web] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=gunicorn -c %(ENV_QUAYCONF)s/gunicorn_web.py web:application +autostart = false +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:jwtproxy] +command=/usr/local/bin/jwtproxy --config %(ENV_QUAYCONF)s/jwtproxy_conf.yaml +autostart = false +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:memcache] +command=memcached -u memcached -m 64 -l 127.0.0.1 -p 18080 +autostart = false +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:nginx] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=nginx -c %(ENV_QUAYCONF)s/nginx/nginx.conf +autostart = false +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:prometheus-aggregator] +command=/usr/local/bin/prometheus-aggregator +autostart = true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:servicekey] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.servicekeyworker.servicekeyworker +autostart = true +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:repomirrorworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.repomirrorworker.repomirrorworker +autostart = false +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true +# EOF NO NEWLINE""" + assert rendered == expected diff --git a/conf/init/v3_migration.py b/conf/init/v3_migration.py new file mode 100644 index 000000000..89fe72770 --- /dev/null +++ b/conf/init/v3_migration.py @@ -0,0 +1,18 @@ +from app import app +from active_migration import ActiveDataMigration + +if not app.config.get('SETUP_COMPLETE', False): + print 'head' +else: + v3_upgrade_mode = app.config.get('V3_UPGRADE_MODE') + if v3_upgrade_mode == 'background': + raise Exception('V3_UPGRADE_MODE must be "complete". This requires a full upgrade to Quay:v3.0. See https://access.qa.redhat.com/documentation/en-us/red_hat_quay/3/html/upgrade_quay/index') + elif v3_upgrade_mode == 'production-transition': + print '481623ba00ba' + elif v3_upgrade_mode == 'post-oci-rollout' or v3_upgrade_mode == 'post-oci-roll-back-compat' or v3_upgrade_mode == 'complete': + if ActiveDataMigration is not None: + print ActiveDataMigration.alembic_migration_revision + else: + print 'head' + else: + raise Exception('Unknown V3_UPGRADE_MODE: %s' % v3_upgrade_mode) diff --git a/conf/init/zz_boot.sh b/conf/init/zz_boot.sh index ab760266b..9fa68190a 100755 --- a/conf/init/zz_boot.sh +++ b/conf/init/zz_boot.sh @@ -1,3 +1,4 @@ #!/bin/bash +cd ${QUAYDIR:-"/"} -/venv/bin/python /boot.py +python ${QUAYPATH:-"."}/boot.py diff --git a/conf/jwtproxy_conf.yaml.jnj b/conf/jwtproxy_conf.yaml.jnj index 05f162400..7864f9706 100644 --- a/conf/jwtproxy_conf.yaml.jnj +++ b/conf/jwtproxy_conf.yaml.jnj @@ -1,9 +1,9 @@ jwtproxy: signer_proxy: enabled: true - listen_addr: :8080 - ca_key_file: /conf/mitm.key - ca_crt_file: /conf/mitm.cert + listen_addr: :8081 + ca_key_file: {{ conf_dir }}/mitm.key + ca_crt_file: {{ conf_dir }}/mitm.cert signer: issuer: quay @@ -13,10 +13,11 @@ jwtproxy: type: preshared options: key_id: {{ key_id }} - private_key_path: /conf/quay.pem + private_key_path: {{ service_key_location }} verifier_proxies: - enabled: true listen_addr: unix:/tmp/jwtproxy_secscan.sock + socket_permission: 0777 verifier: upstream: unix:/tmp/gunicorn_secscan.sock audience: {{ audience }} diff --git a/conf/logging.conf b/conf/logging.conf index 317803a24..e38521b66 100644 --- a/conf/logging.conf +++ b/conf/logging.conf @@ -1,11 +1,11 @@ [loggers] -keys=root +keys=root,gunicorn.error,gunicorn.access [handlers] keys=console [formatters] -keys=generic +keys=generic,json [logger_root] level=INFO @@ -19,3 +19,18 @@ args=(sys.stdout, ) [formatter_generic] format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s class=logging.Formatter + +[formatter_json] +class=loghandler.JsonFormatter + +[logger_gunicorn.error] +level=ERROR +handlers=console +propagate=0 +qualname=gunicorn.error + +[logger_gunicorn.access] +handlers=console +propagate=0 +qualname=gunicorn.access +level=DEBUG diff --git a/conf/logging_debug.conf b/conf/logging_debug.conf index 3413f3035..0609a8e58 100644 --- a/conf/logging_debug.conf +++ b/conf/logging_debug.conf @@ -1,11 +1,11 @@ [loggers] -keys=root,boto +keys=root,boto,gunicorn.error,gunicorn.access [handlers] keys=console [formatters] -keys=generic +keys=generic,json [logger_root] level=DEBUG @@ -16,11 +16,26 @@ level=INFO handlers=console qualname=boto +[logger_gunicorn.access] +handlers=console +propagate=0 +qualname=gunicorn.access +level=DEBUG + [handler_console] class=StreamHandler formatter=generic args=(sys.stdout, ) +[logger_gunicorn.error] +level=ERROR +handlers=console +propagate=0 +qualname=gunicorn.error + [formatter_generic] format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s class=logging.Formatter + +[formatter_json] +class=loghandler.JsonFormatter diff --git a/conf/logging_debug_json.conf b/conf/logging_debug_json.conf new file mode 100644 index 000000000..8f0d51c64 --- /dev/null +++ b/conf/logging_debug_json.conf @@ -0,0 +1,41 @@ +[loggers] +keys=root,boto,gunicorn.error,gunicorn.access + +[handlers] +keys=console + +[formatters] +keys=generic,json + +[logger_root] +level=DEBUG +handlers=console + +[logger_boto] +level=INFO +handlers=console +qualname=boto + +[logger_gunicorn.access] +handlers=console +propagate=0 +qualname=gunicorn.access +level=DEBUG + +[handler_console] +class=StreamHandler +formatter=json +args=(sys.stdout, ) + +[logger_gunicorn.error] +level=ERROR +handlers=console +propagate=0 +qualname=gunicorn.error + +[formatter_generic] +format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s +class=logging.Formatter + +[formatter_json] +class=loghandler.JsonFormatter diff --git a/conf/logging_json.conf b/conf/logging_json.conf new file mode 100644 index 000000000..4d9536380 --- /dev/null +++ b/conf/logging_json.conf @@ -0,0 +1,36 @@ +[loggers] +keys=root,gunicorn.error,gunicorn.access + +[handlers] +keys=console + +[formatters] +keys=json,generic + +[logger_root] +level=INFO +handlers=console + +[handler_console] +class=StreamHandler +formatter=json +args=(sys.stdout, ) + +[formatter_generic] +format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s +class=logging.Formatter + +[formatter_json] +class=loghandler.JsonFormatter + +[logger_gunicorn.error] +level=ERROR +handlers=console +propagate=0 +qualname=gunicorn.error + +[logger_gunicorn.access] +handlers=console +propagate=0 +qualname=gunicorn.access +level=DEBUG diff --git a/conf/nginx-nossl.conf b/conf/nginx-nossl.conf deleted file mode 100644 index 549f4b4e8..000000000 --- a/conf/nginx-nossl.conf +++ /dev/null @@ -1,14 +0,0 @@ -# vim: ft=nginx - -include root-base.conf; - -http { - include http-base.conf; - include rate-limiting.conf; - - server { - include server-base.conf; - - listen 80 default; - } -} diff --git a/conf/dhparams.pem b/conf/nginx/dhparams.pem similarity index 100% rename from conf/dhparams.pem rename to conf/nginx/dhparams.pem diff --git a/conf/hosted-http-base.conf b/conf/nginx/hosted-http-base.conf similarity index 75% rename from conf/hosted-http-base.conf rename to conf/nginx/hosted-http-base.conf index fa5994e6f..b9a895e5d 100644 --- a/conf/hosted-http-base.conf +++ b/conf/nginx/hosted-http-base.conf @@ -1,7 +1,7 @@ # vim: ft=nginx server { - listen 80 default_server; + listen 8080 default_server; server_name _; rewrite ^ https://$host$request_uri? permanent; } diff --git a/conf/http-base.conf b/conf/nginx/http-base.conf similarity index 55% rename from conf/http-base.conf rename to conf/nginx/http-base.conf index 22f01aa52..672118bef 100644 --- a/conf/http-base.conf +++ b/conf/nginx/http-base.conf @@ -2,17 +2,25 @@ set_real_ip_from 0.0.0.0/0; real_ip_recursive on; -log_format lb_pp '$remote_addr ($proxy_protocol_addr) ' - '- $remote_user [$time_local] ' - '"$request" $status $body_bytes_sent ' - '"$http_referer" "$http_user_agent" ' - '($request_time $request_length $upstream_response_time)'; +log_format lb_logs '$remote_addr ($proxy_protocol_addr) ' + '- $remote_user [$time_local] ' + '"$request" $status $body_bytes_sent ' + '"$http_referer" "$http_user_agent" ' + '($request_time $request_length $upstream_response_time)'; types_hash_max_size 2048; -include /usr/local/nginx/conf/mime.types.default; +include /etc/opt/rh/rh-nginx112/nginx/mime.types; default_type application/octet-stream; -access_log /dev/stdout; + +access_log /var/log/nginx/access.log; +error_log /var/log/nginx/error.log; +client_body_temp_path /tmp/nginx 1 2; +proxy_temp_path /tmp/nginx-proxy; +fastcgi_temp_path /tmp/nginx-fastcgi; +uwsgi_temp_path /tmp/nginx-uwsgi; +scgi_temp_path /tmp/nginx-scgi; + sendfile on; gzip on; @@ -22,6 +30,7 @@ gzip_min_length 500; gzip_disable "MSIE [1-6]\."; gzip_types text/plain text/xml text/css text/javascript application/x-javascript + application/javascript image/svg+xml application/octet-stream; map $proxy_protocol_addr $proper_forwarded_for { @@ -47,6 +56,13 @@ upstream registry_app_server { server unix:/tmp/gunicorn_registry.sock fail_timeout=0; } +# NOTE: Exposed for the _internal_ping *only*. All other secscan routes *MUST* go through +# the jwtproxy. +upstream secscan_app_server { + server unix:/tmp/gunicorn_secscan.sock fail_timeout=0; +} + + upstream build_manager_controller_server { server localhost:8686; } diff --git a/conf/nginx/nginx.conf.jnj b/conf/nginx/nginx.conf.jnj new file mode 100644 index 000000000..c8dc1bb15 --- /dev/null +++ b/conf/nginx/nginx.conf.jnj @@ -0,0 +1,129 @@ +# vim: ft=nginx + +include root-base.conf; + +{% if use_https %} + +http { + include http-base.conf; + include hosted-http-base.conf; + include rate-limiting.conf; + + server_names_hash_bucket_size 64; + + resolver 127.0.0.1:8053 valid=10s; + + ssl_ciphers '{{ ssl_ciphers }}'; + ssl_protocols {% for ssl_protocol in ssl_protocols %}{{ ssl_protocol }} {% endfor %}; + ssl_session_cache shared:SSL:60m; + ssl_session_timeout 2h; + ssl_session_tickets on; + ssl_prefer_server_ciphers on; + ssl_dhparam dhparams.pem; + + server { + server_name _; + + ssl_certificate ../stack/ssl.cert; + ssl_certificate_key ../stack/ssl.key; + + include server-base.conf; + + listen 8443 ssl http2 default; + + ssl on; + + # This header must be set only for HTTPS + add_header Strict-Transport-Security "max-age=63072000; preload"; + + access_log /var/log/nginx/access.log lb_logs; + } + + server { + server_name _; + + ssl_certificate ../stack/ssl.cert; + ssl_certificate_key ../stack/ssl.key; + + include server-base.conf; + + listen 7443 ssl http2 default proxy_protocol; + ssl on; + + # This header must be set only for HTTPS + add_header Strict-Transport-Security "max-age=63072000; preload"; + + real_ip_header proxy_protocol; + + access_log /var/log/nginx/access.log lb_logs; + } + +{% if v1_only_domain %} + server { + include server-base.conf; + + server_name {{ v1_only_domain }}; + +{% if use_old_certs %} + ssl_certificate ../stack/ssl.old.cert; + ssl_certificate_key ../stack/ssl.old.key; +{% else %} + ssl_certificate ../stack/ssl.cert; + ssl_certificate_key ../stack/ssl.key; +{% endif %} + + listen 8443 ssl; + + ssl on; + + # This header must be set only for HTTPS + add_header Strict-Transport-Security "max-age=63072000; preload"; + + access_log /var/log/nginx/access.log lb_logs; + } + + server { + server_name {{ v1_only_domain }}; + +{% if use_old_certs %} + ssl_certificate ../stack/ssl.old.cert; + ssl_certificate_key ../stack/ssl.old.key; +{% else %} + ssl_certificate ../stack/ssl.cert; + ssl_certificate_key ../stack/ssl.key; +{% endif %} + + include server-base.conf; + + listen 7443 ssl proxy_protocol; + ssl on; + + # This header must be set only for HTTPS + add_header Strict-Transport-Security "max-age=63072000; preload"; + + real_ip_header proxy_protocol; + + access_log /var/log/nginx/access.log lb_logs; + } +{% endif %} + +} + +{% else %} + +http { + include http-base.conf; + include rate-limiting.conf; + + resolver 127.0.0.1:8053 valid=10s; + + server { + include server-base.conf; + + listen 8080 default; + + access_log /var/log/nginx/access.log lb_logs; + } +} + +{% endif %} diff --git a/conf/nginx/rate-limiting.conf.jnj b/conf/nginx/rate-limiting.conf.jnj new file mode 100644 index 000000000..eb7dd5951 --- /dev/null +++ b/conf/nginx/rate-limiting.conf.jnj @@ -0,0 +1,66 @@ +# vim: ft=nginx + +# Define two buckets: Once for http1 connections (which we force to shard across our fleet) and +# one for http2 connections (which will all hit the same node). +map $http2 $http1_bucket { + "" $proxy_protocol_addr; # HTTP1 case: use the IP address, since shared across nodes. + default $request_id; # HTTP2 case: use request ID to "disable" check. +} + +map $http2 $http2_bucket { + "" $request_id; # HTTP1 case: use the request ID to "disable" check. + default $connection; # HTTP2 case: use the connection serial number to limit. +} + +# Define two additional buckets that fall to $request_id (thus no effective rate limiting) if +# a specific set of namespaces is matched. This allows us to turn off rate limiting selectively +# for special internal namespaces. +map $namespace $namespaced_http1_bucket { + {% for namespace in non_rate_limited_namespaces %} + "{{ namespace }}" $request_id; + {% endfor %} + {% if enable_rate_limits %} + default $http1_bucket; + {% else %} + default $request_id; + {% endif %} +} + +map $namespace $namespaced_http2_bucket { + {% for namespace in non_rate_limited_namespaces %} + "{{ namespace }}" $request_id; + {% endfor %} + {% if enable_rate_limits %} + default $http2_bucket; + {% else %} + default $request_id; + {% endif %} +} + +{% if enable_rate_limits %} +limit_req_zone $http_authorization zone=staticauth:10m rate=30r/s; +{% else %} +limit_req_zone $request_id zone=staticauth:10m rate=300r/s; +{% endif %} + +limit_req_zone $http1_bucket zone=dynamicauth_very_light_http1:10m rate=30r/s; +limit_req_zone $http2_bucket zone=dynamicauth_very_light_http2:10m rate=600r/s; +limit_req_zone $namespaced_http1_bucket zone=namespaced_dynamicauth_very_light_http1:10m rate=30r/s; +limit_req_zone $namespaced_http2_bucket zone=namespaced_dynamicauth_very_light_http2:10m rate=600r/s; + +limit_req_zone $http1_bucket zone=dynamicauth_light_http1:10m rate=20r/s; +limit_req_zone $http2_bucket zone=dynamicauth_light_http2:10m rate=400r/s; +limit_req_zone $namespaced_http1_bucket zone=namespaced_dynamicauth_light_http1:10m rate=20r/s; +limit_req_zone $namespaced_http2_bucket zone=namespaced_dynamicauth_light_http2:10m rate=400r/s; + +# This zone should always be used with burst= (nodelay|delay) as the +# limit is very low on purpose but should allow for the burst of traffic +# required for a registry operation. The burst number should also vary per +# endpoint. +limit_req_zone $http1_bucket zone=dynamicauth_heavy_http1:10m rate=1r/s; +limit_req_zone $http2_bucket zone=dynamicauth_heavy_http2:10m rate=20r/s; +limit_req_zone $namespaced_http1_bucket zone=namespaced_dynamicauth_heavy_http1:10m rate=1r/s; +limit_req_zone $namespaced_http2_bucket zone=namespaced_dynamicauth_heavy_http2:10m rate=20r/s; + +limit_req_status 429; +limit_req_log_level warn; \ No newline at end of file diff --git a/conf/nginx/resolver.conf b/conf/nginx/resolver.conf new file mode 100644 index 000000000..175afbe7f --- /dev/null +++ b/conf/nginx/resolver.conf @@ -0,0 +1 @@ +resolver 127.0.0.1:8053 valid=10s; diff --git a/conf/root-base.conf b/conf/nginx/root-base.conf similarity index 72% rename from conf/root-base.conf rename to conf/nginx/root-base.conf index 357e6ed03..972f883dd 100644 --- a/conf/root-base.conf +++ b/conf/nginx/root-base.conf @@ -1,14 +1,12 @@ # vim: ft=nginx pid /tmp/nginx.pid; -error_log /dev/stdout; +error_log /var/log/nginx/error.log; -worker_processes 2; +worker_processes auto; worker_priority -10; worker_rlimit_nofile 10240; -user root nogroup; - daemon off; events { diff --git a/conf/nginx/server-base.conf.jnj b/conf/nginx/server-base.conf.jnj new file mode 100644 index 000000000..a1d44b542 --- /dev/null +++ b/conf/nginx/server-base.conf.jnj @@ -0,0 +1,338 @@ +# vim: ft=nginx + +keepalive_timeout 5; + +if ($host = "www.quay.io") { + return 301 $proper_scheme://quay.io$request_uri; +} + +# Disable the ability to be embedded into iframes +add_header X-Frame-Options DENY; + + +# Proxy Headers +proxy_set_header X-Forwarded-For $proper_forwarded_for; +proxy_set_header X-Forwarded-Proto $proper_scheme; +proxy_set_header Host $host; +proxy_redirect off; + +proxy_set_header Transfer-Encoding $http_transfer_encoding; + +location / { + proxy_pass http://web_app_server; +} + +location /push { + proxy_pass http://web_app_server; + client_max_body_size 5M; +} + +location /realtime { + proxy_pass http://web_app_server; + proxy_buffering off; + proxy_request_buffering off; +} + +location ~ ^/_storage_proxy/([^/]+)/([^/]+)/([^/]+)/(.+) { + include resolver.conf; + + auth_request /_storage_proxy_auth; + + proxy_pass $2://$3/$4$is_args$args; + + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Host $3; + proxy_set_header Authorization ""; + + add_header Host $3; + + proxy_buffering off; + proxy_request_buffering off; + + proxy_read_timeout 60s; +} + +location = /_storage_proxy_auth { + proxy_pass http://web_app_server; + proxy_pass_request_body off; + proxy_set_header Content-Length ""; + + proxy_set_header X-Original-URI $request_uri; + + proxy_read_timeout 10; +} + +location ~ ^/v2/_catalog(.*)$ { + proxy_pass http://registry_app_server; + proxy_read_timeout 10; + keepalive_timeout 0; # Disables HTTP 1.1 keep-alive and forces round-robin. + + {% if enable_rate_limits %} + limit_req zone=dynamicauth_heavy_http1 burst=1 nodelay; + limit_req zone=dynamicauth_heavy_http2 burst=5 nodelay; + {% endif %} +} + +location /secscan/ { + proxy_pass http://jwtproxy_secscan; +} + +location /secscan/_internal_ping { + proxy_pass http://secscan_app_server; +} + +{% if signing_enabled %} +location ~ ^/v2/(.+)/_trust/tuf/(.*)$ { + set $upstream_tuf {{ tuf_server }}; + proxy_pass $upstream_tuf$uri; + proxy_set_header Host "{{ tuf_host }}"; +} +{% endif %} + +location /cnr { + proxy_buffering off; + + proxy_request_buffering off; + + proxy_pass http://registry_app_server; + proxy_read_timeout 120; + proxy_temp_path /tmp 1 2; + + {% if enable_rate_limits %} + limit_req zone=staticauth burst=5 nodelay; + {% endif %} +} + +location /api/ { + proxy_pass http://web_app_server; + + {% if enable_rate_limits %} + limit_req zone=dynamicauth_heavy_http1 burst=25 nodelay; + limit_req zone=dynamicauth_heavy_http2 burst=100 nodelay; + {% endif %} + + keepalive_timeout 0; # Disables HTTP 1.1 keep-alive and forces round-robin. +} + +location /api/suconfig { + proxy_pass http://web_app_server; + + # For suconfig, set our read timeout as super large for both DB migrations + # and awaiting for secrets to be updated. + proxy_read_timeout 2000; +} + +# This block handles blob requests, and will receive a high volume of traffic, so we set the burst +# much higher. +location ~ /v2/([^/]+)\/[^/]+/blobs/ { + # If we're being accessed via v1.quay.io, pretend we don't support v2. + if ($host = "v1.quay.io") { + return 404; + } + + # NOTE: We disable gzip for HEAD requests because Docker issues them to determine the Content + # Length of a blob. Unfortunately, nginx, seeing an empty body, overwrites the header with + # a length of 0, which breaks this functionality. + if ($request_method = HEAD) { + gzip off; + } + + proxy_buffering off; + proxy_request_buffering off; + proxy_read_timeout 2000; + proxy_temp_path /tmp 1 2; + + client_max_body_size {{ maximum_layer_size }}; + + # Setting ANY header clears all inherited proxy_set_header directives + proxy_set_header X-Forwarded-For $proper_forwarded_for; + proxy_set_header X-Forwarded-Proto $proper_scheme; + proxy_set_header Host $host; + + proxy_http_version 1.1; + + proxy_pass http://registry_app_server; + + set $namespace $1; + + {% if enable_rate_limits %} + limit_req zone=namespaced_dynamicauth_light_http1 burst=50 nodelay; + limit_req zone=namespaced_dynamicauth_light_http2 burst=100 nodelay; + {% endif %} + + keepalive_timeout 0; # Disables HTTP 1.1 keep-alive and forces round-robin. +} + +# This block handles tags endpoint requests, for which we want to restrict traffic due to how +# heavy an operation it can be +location ~ /v2/([^/]+)\/[^/]+/tags/ { + # If we're being accessed via v1.quay.io, pretend we don't support v2. + if ($host = "v1.quay.io") { + return 404; + } + + # Setting ANY header clears all inherited proxy_set_header directives + proxy_set_header X-Forwarded-For $proper_forwarded_for; + proxy_set_header X-Forwarded-Proto $proper_scheme; + proxy_set_header Host $host; + + proxy_http_version 1.1; + + proxy_pass http://registry_app_server; + + set $namespace $1; + + {% if enable_rate_limits %} + limit_req zone=namespaced_dynamicauth_heavy_http1 burst=2 nodelay; + limit_req zone=namespaced_dynamicauth_heavy_http2 burst=2 nodelay; + {% endif %} + + keepalive_timeout 0; # Disables HTTP 1.1 keep-alive and forces round-robin. +} + +# This block handles manifests endpoint requests, for which we want to restrict traffic heavier than +# the generic V2 operations, as it handles pushes and pulls. +location ~ /v2/([^/]+)\/[^/]+/manifests/ { + # If we're being accessed via v1.quay.io, pretend we don't support v2. + if ($host = "v1.quay.io") { + return 404; + } + + # Setting ANY header clears all inherited proxy_set_header directives + proxy_set_header X-Forwarded-For $proper_forwarded_for; + proxy_set_header X-Forwarded-Proto $proper_scheme; + proxy_set_header Host $host; + + proxy_http_version 1.1; + + proxy_pass http://registry_app_server; + + set $namespace $1; + + {% if enable_rate_limits %} + limit_req zone=namespaced_dynamicauth_light_http1 burst=10 nodelay; + limit_req zone=namespaced_dynamicauth_light_http2 burst=50 nodelay; + {% endif %} + + keepalive_timeout 0; # Disables HTTP 1.1 keep-alive and forces round-robin. +} + +# This block applies to the beginning of a push or pull +location = /v2/auth { + # If we're being accessed via v1.quay.io, pretend we don't support v2. + if ($host = "v1.quay.io") { + return 404; + } + + # Setting ANY header clears all inherited proxy_set_header directives + proxy_set_header X-Forwarded-For $proper_forwarded_for; + proxy_set_header X-Forwarded-Proto $proper_scheme; + proxy_set_header Host $host; + + proxy_http_version 1.1; + + proxy_pass http://registry_app_server; + + {% if enable_rate_limits %} + limit_req zone=staticauth burst=2 nodelay; + {% endif %} + + keepalive_timeout 0; # Disables HTTP 1.1 keep-alive and forces round-robin. +} + +# This block handles all other V2 requests, for which we can use a higher rate limit. +location ~ ^/v2 { + # If we're being accessed via v1.quay.io, pretend we don't support v2. + if ($host = "v1.quay.io") { + return 404; + } + + # NOTE: We disable gzip for HEAD requests because Docker issues them to determine the Content + # Length of a blob. Unfortunately, nginx, seeing an empty body, overwrites the header with + # a length of 0, which breaks this functionality. Included here for completeness. + if ($request_method = HEAD) { + gzip off; + } + + # Setting ANY header clears all inherited proxy_set_header directives + proxy_set_header X-Forwarded-For $proper_forwarded_for; + proxy_set_header X-Forwarded-Proto $proper_scheme; + proxy_set_header Host $host; + + proxy_http_version 1.1; + + proxy_pass http://registry_app_server; + + {% if enable_rate_limits %} + limit_req zone=dynamicauth_very_light_http1 burst=20 nodelay; + limit_req zone=dynamicauth_very_light_http2 burst=80 nodelay; + {% endif %} + + keepalive_timeout 0; # Disables HTTP 1.1 keep-alive and forces round-robin. +} + +location /v1/ { + # Setting ANY header clears all inherited proxy_set_header directives + proxy_set_header X-Forwarded-For $proper_forwarded_for; + proxy_set_header X-Forwarded-Proto $proper_scheme; + proxy_set_header Host $host; + + proxy_buffering off; + + proxy_request_buffering off; + + proxy_http_version 1.1; + + proxy_pass http://registry_app_server; + proxy_temp_path /tmp 1 2; + + client_max_body_size {{ maximum_layer_size }}; + + {% if enable_rate_limits %} + limit_req zone=dynamicauth_heavy_http1 burst=5 nodelay; + limit_req zone=dynamicauth_heavy_http2 burst=25 nodelay; + {% endif %} + + keepalive_timeout 0; # Disables HTTP 1.1 keep-alive and forces round-robin. +} + +location = /v1/_ping { + add_header Content-Type text/plain; + add_header X-Docker-Registry-Version 0.6.0; + add_header X-Docker-Registry-Standalone 0; + return 200 'true'; +} + +location /c1/ { + proxy_buffering off; + + proxy_request_buffering off; + + proxy_pass http://verbs_app_server; + proxy_temp_path /tmp 1 2; + + {% if enable_rate_limits %} + limit_req zone=staticauth burst=5 nodelay; + {% endif %} +} + +location /static/ { + # checks for static file, if not found proxy to app + alias {{static_dir}}/; + error_page 404 /404; +} + +error_page 502 {{static_dir}}/502.html; + +location ~ ^/b1/controller(/?)(.*) { + proxy_pass http://build_manager_controller_server/$2; +} + +location ~ ^/b1/socket(/?)(.*) { + proxy_pass http://build_manager_websocket_server/$2; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_read_timeout 300; +} diff --git a/conf/rate-limiting.conf b/conf/rate-limiting.conf deleted file mode 100644 index d7e80c67d..000000000 --- a/conf/rate-limiting.conf +++ /dev/null @@ -1,15 +0,0 @@ -# vim: ft=nginx - -# Check the Authorization header and, if it is empty, use their proxy protocol -# IP, else use the header as their unique identifier for rate limiting. -# Enterprise users will never be using proxy protocol, thus the value will be -# empty string. This means they will not get rate limited. -map $http_authorization $registry_bucket { - "" $proxy_protocol_addr; - default $http_authorization; -} - -limit_req_zone $proxy_protocol_addr zone=verbs:10m rate=2r/s; -limit_req_zone $registry_bucket zone=repositories:10m rate=2r/s; -limit_req_status 429; -limit_req_log_level warn; diff --git a/conf/selfsigned/jwt.crt b/conf/selfsigned/jwt.crt deleted file mode 100644 index 131bdc9c0..000000000 --- a/conf/selfsigned/jwt.crt +++ /dev/null @@ -1,31 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIFXDCCA0agAwIBAgIBAjALBgkqhkiG9w0BAQswLTEMMAoGA1UEBhMDVVNBMRAw -DgYDVQQKEwdldGNkLWNhMQswCQYDVQQLEwJDQTAeFw0xNTA3MTYxOTQzMTdaFw0y -NTA3MTYxOTQzMTlaMEYxDDAKBgNVBAYTA1VTQTEQMA4GA1UEChMHZXRjZC1jYTEQ -MA4GA1UECxMHand0YXV0aDESMBAGA1UEAxMJMTI3LjAuMC4xMIICIjANBgkqhkiG -9w0BAQEFAAOCAg8AMIICCgKCAgEAs5RxPVfO7iPZnFIP0DPiiMMMykDEG0OV6O1x -QycVReI2ELIPiWqfDFVcn6XXI/0kpvNeLGr2dDXaQFZYz+rNVDYBjM3djvibFhwa -30URmfHI9iZM703zdMZwc07+TIteIj1Q4MYhbPB4f6oERtLO29RffN9KH2FQvtzx -CF/GFb6vcHOeCeKZEGjxbQ2vfhMJh+UiO6woBooAJULBaM9hxErszqWqu0QKcV2h -NaW6fSf6aVUbFTu9hhYfkujDBR5EmwVFcKxUF+AHDrAshR/VdTHb0SJ3OtKz0vGv -NCc844J8nhUg7SeeO6ONeAq6cDRN65eJ7nJC1Nhhq2DpOgNxu+j0Dz7F+EEtNWpE -ezGjbRjmM4Ekhvsa/SUdzubInrnyHFYcbMZZIZzbgAJfruZHVKWWXjbxyG74xix+ -+KzBs9jkCHSNNWnXTx3dev4dp4QltZ048crA1lioim8/W5GzYjvkfNwx6OohC4yD -5UoblQsY5vDdJ+S8g4feTmJMoNHdS/4ar/sVojUDX3KOF3bCZ6w4Ufx09EBXeUlQ -9gzs63xAvFhGk8anFSQbRoQgoKoivHpzlANquhWvRZCDtW5P4RLaHcOLjhq6nwe6 -WW+vtDgEEKzdSj1We6grDPoT1kTagJ0gvpX+jcesu5d0e8MHt+qu0WTJwvCxcI+r -8zhXX/MCAwEAAaNyMHAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0G -A1UdDgQWBBQqTEeoqfZjPwzZYdkktdV+3Pl6SDAfBgNVHSMEGDAWgBRXeieifbQ2 -jgPj8wwrrixidWU9tzAPBgNVHREECDAGhwR/AAABMAsGCSqGSIb3DQEBCwOCAgEA -KIFrN5mDOvbBEWYZSnyrGkF4bsXCbmLtg3FJkgdn5vJ916/2WtgceJMQKu1KE90V -giaRaSm4HEb63VBrCNdvoU0ZSRO/xQI2SqbERdFa36w31W71D16qDchRsFz+dEt4 -7jm1gIdl/UYCMqFye7t+6H/w4J8p1M90RlSXw8wHyFEPOjEfBOM5uSpO9xsXW04+ -DpfJvsLmvhaaE+OUrPft+VTtf0Wc7vV8jfS21D9nB/CJVaoS71m3FEHD8BlTZIqB -qcU67UJc7qhUJ3HyKbpJgFQcvEQ8GL+PJnsCO7Y/zCCbYLwjV1GffvHMGQ2JAJbB -2qnUxPqVmP87X3YDMXPVubW+CtoRPz7BIYsX2/HejlYOtlT25+SrHwpXRT5lcgbt -a9dcHhUmNNpfTgZpbPrPfdzqw+ze+HcbJAECWgm8v10quGbP5NZCnySM7LIJ8p7C -dLOGGuZnUaruqA3FRYS3147bdhGF1gLwGuM+BwzzvoppMf5kZuBWq6j6Feg1I68z -n1qhlEJSMoS1qUEq/8oXYgSs2ttvMAhZ4CqKPZztp3oZLPzZgL/eKb4JEjhpgitJ -TrgLFwAytHGZIWke/lR+Ca9qo/uMebduLu6akqZ5yrxl/DuHcBV8KGq+rXJIvxxj -O9hZBNQ+WDPvQlSN2z/An17zZePLgxspjZXIkkgSg1Y= ------END CERTIFICATE----- diff --git a/conf/selfsigned/jwt.key.insecure b/conf/selfsigned/jwt.key.insecure deleted file mode 100644 index 00111a443..000000000 --- a/conf/selfsigned/jwt.key.insecure +++ /dev/null @@ -1,51 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIJKQIBAAKCAgEAs5RxPVfO7iPZnFIP0DPiiMMMykDEG0OV6O1xQycVReI2ELIP -iWqfDFVcn6XXI/0kpvNeLGr2dDXaQFZYz+rNVDYBjM3djvibFhwa30URmfHI9iZM -703zdMZwc07+TIteIj1Q4MYhbPB4f6oERtLO29RffN9KH2FQvtzxCF/GFb6vcHOe -CeKZEGjxbQ2vfhMJh+UiO6woBooAJULBaM9hxErszqWqu0QKcV2hNaW6fSf6aVUb -FTu9hhYfkujDBR5EmwVFcKxUF+AHDrAshR/VdTHb0SJ3OtKz0vGvNCc844J8nhUg -7SeeO6ONeAq6cDRN65eJ7nJC1Nhhq2DpOgNxu+j0Dz7F+EEtNWpEezGjbRjmM4Ek -hvsa/SUdzubInrnyHFYcbMZZIZzbgAJfruZHVKWWXjbxyG74xix++KzBs9jkCHSN -NWnXTx3dev4dp4QltZ048crA1lioim8/W5GzYjvkfNwx6OohC4yD5UoblQsY5vDd -J+S8g4feTmJMoNHdS/4ar/sVojUDX3KOF3bCZ6w4Ufx09EBXeUlQ9gzs63xAvFhG -k8anFSQbRoQgoKoivHpzlANquhWvRZCDtW5P4RLaHcOLjhq6nwe6WW+vtDgEEKzd -Sj1We6grDPoT1kTagJ0gvpX+jcesu5d0e8MHt+qu0WTJwvCxcI+r8zhXX/MCAwEA -AQKCAgEAhhD5ZYGLhDARgumk0pwZsF5lyw0FGxGe9lFl8GtaL10NXfOBM+b8rHmB -99IYxs5zMYyZLvH/4oxdzxBnp3m1JvxWtebvVJB3P89lpG/tDw/6JwI7B6Ebc3++ -bed4ZG7brRY3rkdcpvb0DuM/5Bv3wRhQ3WnZ7Yl6fbN24viVaqB8W6iFQP4BpcWj -D/ZaoPXXdLP0lbYV/6PBLhAjUnsYkzIYjsIRr1LBtRbghqueiVdyVHbsDDMYb+VO -VyAckFKjh1QtHkwZT+W5fxa5df1pH+BEKmLfvnOVOpOiaH4ur+8319EQTtz3/bBB -qm/f9mqmDY+JsxFsoXiVmht0oxH1MsHV7jSpwxVj0nN6uV61zlgTgj/kXIASbuRO -swFM1o6+KNuFuqI4w5+Nkw5o+PbtP5UMTVTpUSQBQumUbM+xPClRP/k7LZeK0ikv -36BQ2xaLIzECKXyYgK6b1rypTnJv6hAqJcNozUHnKPcworCNK1xB+n+pycrVzPwZ -32WNXdLSquTeXNmc4vHZxVrFFjGzeWmWESYt6huFWn6xb9IdfhrzpuH5LS7rTIhj -kvZCAiN4n+cuRwjBPaxxkSg/Lh8IyFOchwI6CcWWucGFMxJZpqtCS14B27LNrrJt -bCdO/AQr9h3hvDR5vrvLnxOnNusumIZ3tpvfWeObIdOhkiFoPykCggEBAOtEnCIz -RH2M7bo9IE2C4zE6T40fJvTRrSRju78dFW7mpfe9F2nr/2/FVjbeAw6FJaEFDQLW -OSc3Amr0ACLOpSIuoHHb6uT3XzNnXl6xdsJ1swi1gZJgyge6FUYjMNFjko98qI4O -aqYBZzoDBw+K7bpUXEMwYPZcU0A6P/9+98wkJLHp3DfqqfBH7PiMtAJY6+ZQ2mfs -UFGI6ygVONwPhHQ9kWwtGvBfb+4AgUD0lu9UR3Yij07cze1aVJcVXQJopBvFnEnG -qEsm2oDwnWquG4A7ASCUpHJk+A1K4p7q6opM3Y1Lv8OYzR7dHsAEH/NN0mSn1tyE -dFBrzSAdDr9mI8UCggEBAMNnkXy2EuLlDHzrOiEn0l4ULbtHZjT0FtjCczg/K9Nn -ousAr5I09eNuQDXLheNqOe5LXrpWqc5psf5FsCj5WkkzMbJm/7GPwQN8ufThBneY -4oAO/xrOvwDZP9ihzIo/+chQQMXXA8Dysn6lIOHCGrdvEYF8nIvf95gCbaXfPR8G -Jecsxg3Nc0Pi1bGN5X5q/AwlJDUrd7JjIuTWYxEuhczPcoiEskgjGHGO96EWIjLX -cGB4xav6K8X4BJyxN6Ewek/HT4TjMqd1bIH6020JNZ0Z1rVFtr9DUXf5xkI3gbjI -7X3uNu0yjw31rEfVA6vokfFUZ9TogNsxUw2s/WTX2FcCggEBAIXphJF26vonmDzU -hCl6YcToctVZsPvSySGYXwoWDNgWEsvjZotU2A0PntYWIllGcjenv1MpClkG16p2 -/gjR5G6DabHFQntXTmnc4Xs2uelPwzsmzPy7eONTCL7mUugsLATeKLbK/+tDizUa -+g7fvha749QemmJABObfAQR1iag5vmVCPqXZPSdWWUzUEbXwVT3AMcDLYqA2NduX -0Mh5UKQ1UyvmtJmzSOuIgAmv7qWFLDPS0g1KYzBBpTpl3436b8abAS2BFNPJ5r9T -tdY+CctASpD36m5uiD5QrJNWFW/o9oZxYlJ8C+0QYWtcLa94UVQXsJXOEsKfyZ8I -yxcolR0CggEACrKs4GsHdayclmo6cH5BoizwpAQwsE18wrlCnZ23xIc0ADG1awfD -PoRWt5tA5UZ3jXhK42DDQy2+NPGTx2p/auqGmgHRleMM6if19lYriHiyTdiEVu9i -vaUnPbD+BcOi5TifkzVGW1XuN8jKmBGMbOaDytcLqwzD/WqEnkQukHhBsvpcjXzm -Bp1wnZvrKJSq3+9YoCCVGQscafLi0Zn+cUwaNScuq4xgVjdBj2wqyyXIXT+/cr7r -jpcZiYqaRRTmXV/IFrppl4lyO1uEH8AVU1iKzLnYW3hQCYV/OTjYvUki13YnQ600 -78q3d+dNoCfHdbLtTFa+V0HIDkOeS9sVWQKCAQBoZIeAkKePec19TL5xvqe0MSDC -dZwW/rVPfIraMuETFXlH1dsyUcZu578T7Uvuc/ZAOf7cSedKtxuEtbd0eJ8YtQJ3 -LWuL+JX5TsU0qsPvhQIKpLkznhTinH8/TVi8yxJzsOd56Ta2068U+ad9oRiI14Ne -pSzqQavGp5s1anSD769xKNNHKZkYPHYJ/5Te7hhdpBwQ3kn8AiUuemJ5MNfJO+8e -LCQL/LjuwgKAis0PQbWAHs2d9HJxQLlR62j754ooTDe6FfSoH2zKgdzSTteqHXue -ga/+6pwc/LoLS1TAAv9ChJFIERClNi6Bq/OpcECiVN6eFav6r5UR+w3+mCQk ------END RSA PRIVATE KEY----- diff --git a/conf/server-base.conf b/conf/server-base.conf deleted file mode 100644 index 04513c122..000000000 --- a/conf/server-base.conf +++ /dev/null @@ -1,134 +0,0 @@ -# vim: ft=nginx - -server_name _; - -keepalive_timeout 5; - -if ($host = "www.quay.io") { - return 301 $proper_scheme://quay.io$request_uri; -} - -if ($args ~ "_escaped_fragment_") { - rewrite ^ /snapshot$uri; -} - -# Disable the ability to be embedded into iframes -add_header X-Frame-Options DENY; - - -# Proxy Headers -proxy_set_header X-Forwarded-For $proper_forwarded_for; -proxy_set_header X-Forwarded-Proto $proper_scheme; -proxy_set_header Host $host; -proxy_redirect off; - -proxy_set_header Transfer-Encoding $http_transfer_encoding; - -location / { - proxy_pass http://web_app_server; -} - -location /realtime { - proxy_pass http://web_app_server; - proxy_buffering off; - proxy_request_buffering off; -} - -# At the begining and end of a push/pull, (/v1/repositories|/v2/auth/) is hit by the Docker -# client. By rate-limiting just this endpoint, we can avoid accidentally -# blocking pulls/pushes for images with many layers. -location ~ ^/(v1/repositories|v2/auth)/ { - proxy_buffering off; - - proxy_request_buffering off; - - proxy_pass http://registry_app_server; - proxy_read_timeout 2000; - proxy_temp_path /tmp 1 2; - - limit_req zone=repositories burst=10; -} - -location /secscan/ { - proxy_pass http://jwtproxy_secscan; -} - -location ~ ^/v2 { - # If we're being accessed via v1.quay.io, pretend we don't support v2. - if ($host = "v1.quay.io") { - return 404; - } - - # Setting ANY header clears all inherited proxy_set_header directives - proxy_set_header X-Forwarded-For $proper_forwarded_for; - proxy_set_header X-Forwarded-Proto $proper_scheme; - proxy_set_header Host $host; - - proxy_buffering off; - - proxy_request_buffering off; - - proxy_read_timeout 2000; - - proxy_http_version 1.1; - - proxy_pass http://registry_app_server; - proxy_temp_path /tmp 1 2; - - client_max_body_size 20G; -} - -location ~ ^/v1 { - # Setting ANY header clears all inherited proxy_set_header directives - proxy_set_header X-Forwarded-For $proper_forwarded_for; - proxy_set_header X-Forwarded-Proto $proper_scheme; - proxy_set_header Host $host; - - proxy_buffering off; - - proxy_request_buffering off; - - proxy_http_version 1.1; - - proxy_pass http://registry_app_server; - proxy_temp_path /tmp 1 2; - - client_max_body_size 20G; -} - -location /v1/_ping { - add_header Content-Type text/plain; - add_header X-Docker-Registry-Version 0.6.0; - add_header X-Docker-Registry-Standalone 0; - return 200 'true'; -} - -location /c1/ { - proxy_buffering off; - - proxy_request_buffering off; - - proxy_pass http://verbs_app_server; - proxy_temp_path /tmp 1 2; - - limit_req zone=verbs burst=10; -} - -location /static/ { - # checks for static file, if not found proxy to app - alias /static/; - error_page 404 /404; -} - -error_page 502 /static/502.html; - -location ~ ^/b1/controller(/?)(.*) { - proxy_pass http://build_manager_controller_server/$2; -} - -location ~ ^/b1/socket(/?)(.*) { - proxy_pass http://build_manager_websocket_server/$2; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "upgrade"; -} diff --git a/conf/supervisord.conf.jnj b/conf/supervisord.conf.jnj new file mode 100644 index 000000000..b5224250d --- /dev/null +++ b/conf/supervisord.conf.jnj @@ -0,0 +1,376 @@ +[supervisord] +nodaemon=true + +[unix_http_server] +file=%(ENV_QUAYCONF)s/supervisord.sock +user=root + +[supervisorctl] +serverurl=unix:///%(ENV_QUAYCONF)s/supervisord.sock + +[rpcinterface:supervisor] +supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface + +[eventlistener:stdout] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command = supervisor_stdout +buffer_size = 1024 +events = PROCESS_LOG +result_handler = supervisor_stdout:event_handler + +;;; Run batch scripts +[program:blobuploadcleanupworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.blobuploadcleanupworker.blobuploadcleanupworker +autostart = {{ config['blobuploadcleanupworker']['autostart'] }} +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:buildlogsarchiver] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.buildlogsarchiver.buildlogsarchiver +autostart = {{ config['buildlogsarchiver']['autostart'] }} +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:builder] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m buildman.builder +autostart = {{ config['builder']['autostart'] }} +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:chunkcleanupworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.chunkcleanupworker +autostart = {{ config['chunkcleanupworker']['autostart'] }} +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:expiredappspecifictokenworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.expiredappspecifictokenworker +autostart = {{ config['expiredappspecifictokenworker']['autostart'] }} +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:exportactionlogsworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.exportactionlogsworker +autostart = {{ config['exportactionlogsworker']['autostart'] }} +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:gcworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.gc.gcworker +autostart = {{ config['gcworker']['autostart'] }} +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:globalpromstats] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.globalpromstats.globalpromstats +autostart = {{ config['globalpromstats']['autostart'] }} +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:labelbackfillworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.labelbackfillworker +autostart = {{ config['labelbackfillworker']['autostart'] }} +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:logrotateworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.logrotateworker +autostart = {{ config['logrotateworker']['autostart'] }} +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:namespacegcworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.namespacegcworker +autostart = {{ config['namespacegcworker']['autostart'] }} +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:notificationworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.notificationworker.notificationworker +autostart = {{ config['notificationworker']['autostart'] }} +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:queuecleanupworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.queuecleanupworker +autostart = {{ config['queuecleanupworker']['autostart'] }} +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:repositoryactioncounter] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.repositoryactioncounter +autostart = {{ config['repositoryactioncounter']['autostart'] }} +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:security_notification_worker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.security_notification_worker +autostart = {{ config['security_notification_worker']['autostart'] }} +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:securityworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.securityworker.securityworker +autostart = {{ config['securityworker']['autostart'] }} +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:storagereplication] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.storagereplication +autostart = {{ config['storagereplication']['autostart'] }} +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:tagbackfillworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.tagbackfillworker +autostart = {{ config['tagbackfillworker']['autostart'] }} +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:teamsyncworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.teamsyncworker.teamsyncworker +autostart = {{ config['teamsyncworker']['autostart'] }} +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +;;; Run interactive scripts +[program:dnsmasq] +command=/usr/sbin/dnsmasq --no-daemon --user=root --listen-address=127.0.0.1 --port=8053 +autostart = {{ config['dnsmasq']['autostart'] }} +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:gunicorn-registry] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s, + DB_CONNECTION_POOLING=%(ENV_DB_CONNECTION_POOLING_REGISTRY)s +command=nice -n 10 gunicorn -c %(ENV_QUAYCONF)s/gunicorn_registry.py registry:application +autostart = {{ config['gunicorn-registry']['autostart'] }} +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:gunicorn-secscan] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=gunicorn -c %(ENV_QUAYCONF)s/gunicorn_secscan.py secscan:application +autostart = {{ config['gunicorn-secscan']['autostart'] }} +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:gunicorn-verbs] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=nice -n 10 gunicorn -c %(ENV_QUAYCONF)s/gunicorn_verbs.py verbs:application +autostart = {{ config['gunicorn-verbs']['autostart'] }} +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:gunicorn-web] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=gunicorn -c %(ENV_QUAYCONF)s/gunicorn_web.py web:application +autostart = {{ config['gunicorn-web']['autostart'] }} +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:jwtproxy] +command=/usr/local/bin/jwtproxy --config %(ENV_QUAYCONF)s/jwtproxy_conf.yaml +autostart = {{ config['jwtproxy']['autostart'] }} +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:memcache] +command=memcached -u memcached -m 64 -l 127.0.0.1 -p 18080 +autostart = {{ config['memcache']['autostart'] }} +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:nginx] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=nginx -c %(ENV_QUAYCONF)s/nginx/nginx.conf +autostart = {{ config['nginx']['autostart'] }} +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:prometheus-aggregator] +command=/usr/local/bin/prometheus-aggregator +autostart = {{ config['prometheus-aggregator']['autostart'] }} +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:servicekey] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.servicekeyworker.servicekeyworker +autostart = {{ config['servicekey']['autostart'] }} +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:repomirrorworker] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=python -m workers.repomirrorworker.repomirrorworker +autostart = {{ config['repomirrorworker']['autostart'] }} +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true +# EOF NO NEWLINE \ No newline at end of file diff --git a/config.py b/config.py index ba11fc48c..ae742ece8 100644 --- a/config.py +++ b/config.py @@ -1,7 +1,10 @@ -import os.path +from uuid import uuid4 +import os.path import requests +from _init import ROOT_DIR, CONF_DIR + def build_requests_session(): sess = requests.Session() @@ -17,8 +20,11 @@ def build_requests_session(): CLIENT_WHITELIST = ['SERVER_HOSTNAME', 'PREFERRED_URL_SCHEME', 'MIXPANEL_KEY', 'STRIPE_PUBLISHABLE_KEY', 'ENTERPRISE_LOGO_URL', 'SENTRY_PUBLIC_DSN', 'AUTHENTICATION_TYPE', 'REGISTRY_TITLE', 'REGISTRY_TITLE_SHORT', - 'CONTACT_INFO', 'AVATAR_KIND', 'LOCAL_OAUTH_HANDLER', 'DOCUMENTATION_LOCATION', - 'DOCUMENTATION_METADATA', 'SETUP_COMPLETE'] + 'CONTACT_INFO', 'AVATAR_KIND', 'LOCAL_OAUTH_HANDLER', + 'SETUP_COMPLETE', 'DEBUG', 'MARKETO_MUNCHKIN_ID', + 'STATIC_SITE_BUCKET', 'RECAPTCHA_SITE_KEY', 'CHANNEL_COLORS', + 'TAG_EXPIRATION_OPTIONS', 'INTERNAL_OIDC_SERVICE_ID', + 'SEARCH_RESULTS_PER_PAGE', 'SEARCH_MAX_RESULT_PAGE_COUNT', 'BRANDING'] def frontend_visible_config(config_dict): @@ -29,29 +35,68 @@ def frontend_visible_config(config_dict): if name in config_dict: visible_dict[name] = config_dict.get(name, None) + if 'ENTERPRISE_LOGO_URL' in config_dict: + visible_dict['BRANDING'] = visible_dict.get('BRANDING', {}) + visible_dict['BRANDING']['logo'] = config_dict['ENTERPRISE_LOGO_URL'] return visible_dict -class DefaultConfig(object): +# Configuration that should not be changed by end users +class ImmutableConfig(object): + + # Requests based HTTP client with a large request pool + HTTPCLIENT = build_requests_session() + + # Status tag config + STATUS_TAGS = {} + for tag_name in ['building', 'failed', 'none', 'ready', 'cancelled']: + tag_path = os.path.join(ROOT_DIR, 'buildstatus', tag_name + '.svg') + with open(tag_path) as tag_svg: + STATUS_TAGS[tag_name] = tag_svg.read() + + # Reverse DNS prefixes that are reserved for internal use on labels and should not be allowable + # to be set via the API. + DEFAULT_LABEL_KEY_RESERVED_PREFIXES = ['com.docker.', 'io.docker.', 'org.dockerproject.', + 'org.opencontainers.', 'io.cncf.', + 'io.kubernetes.', 'io.k8s.', + 'io.quay', 'com.coreos', 'com.tectonic', + 'internal', 'quay'] + + # Colors for local avatars. + AVATAR_COLORS = ['#969696', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c', '#98df8a', '#d62728', + '#ff9896', '#9467bd', '#c5b0d5', '#8c564b', '#c49c94', '#e377c2', '#f7b6d2', + '#7f7f7f', '#c7c7c7', '#bcbd22', '#1f77b4', '#17becf', '#9edae5', '#393b79', + '#5254a3', '#6b6ecf', '#9c9ede', '#9ecae1', '#31a354', '#b5cf6b', '#a1d99b', + '#8c6d31', '#ad494a', '#e7ba52', '#a55194'] + + # Colors for channels. + CHANNEL_COLORS = ['#969696', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c', '#98df8a', '#d62728', + '#ff9896', '#9467bd', '#c5b0d5', '#8c564b', '#c49c94', '#e377c2', '#f7b6d2', + '#7f7f7f', '#c7c7c7', '#bcbd22', '#1f77b4', '#17becf', '#9edae5', '#393b79', + '#5254a3', '#6b6ecf', '#9c9ede', '#9ecae1', '#31a354', '#b5cf6b', '#a1d99b', + '#8c6d31', '#ad494a', '#e7ba52', '#a55194'] + + PROPAGATE_EXCEPTIONS = True + + +class DefaultConfig(ImmutableConfig): # Flask config JSONIFY_PRETTYPRINT_REGULAR = False SESSION_COOKIE_SECURE = False + SESSION_COOKIE_HTTPONLY = True + SESSION_COOKIE_SAMESITE = 'Lax' + LOGGING_LEVEL = 'DEBUG' SEND_FILE_MAX_AGE_DEFAULT = 0 PREFERRED_URL_SCHEME = 'http' SERVER_HOSTNAME = 'localhost:5000' - REGISTRY_TITLE = 'Quay Enterprise' - REGISTRY_TITLE_SHORT = 'Quay Enterprise' + REGISTRY_TITLE = 'Project Quay' + REGISTRY_TITLE_SHORT = 'Project Quay' - CONTACT_INFO = [ - 'mailto:support@quay.io', - 'irc://chat.freenode.net:6665/quay', - 'tel:+1-888-930-3475', - 'https://twitter.com/quayio', - ] + CONTACT_INFO = [] # Mail config MAIL_SERVER = '' @@ -59,7 +104,7 @@ class DefaultConfig(object): MAIL_PORT = 587 MAIL_USERNAME = None MAIL_PASSWORD = None - MAIL_DEFAULT_SENDER = 'support@quay.io' + MAIL_DEFAULT_SENDER = 'example@projectquay.io' MAIL_FAIL_SILENTLY = False TESTING = True @@ -76,9 +121,18 @@ class DefaultConfig(object): DB_TRANSACTION_FACTORY = create_transaction + # If set to 'readonly', the entire registry is placed into read only mode and no write operations + # may be performed against it. + REGISTRY_STATE = 'normal' + + # If set to true, TLS is used, but is terminated by an external service (such as a load balancer). + # Note that PREFERRED_URL_SCHEME must be `https` when this flag is set or it can lead to undefined + # behavior. + EXTERNAL_TLS_TERMINATION = False + # If true, CDN URLs will be used for our external dependencies, rather than the local # copies. - USE_CDN = True + USE_CDN = False # Authentication AUTHENTICATION_TYPE = 'Database' @@ -98,6 +152,7 @@ class DefaultConfig(object): # Build Queue Metrics QUEUE_METRICS_TYPE = 'Null' + QUEUE_WORKER_METRICS_REFRESH_SECONDS = 300 # Exception logging EXCEPTION_LOG_TYPE = 'FakeSentry' @@ -117,24 +172,20 @@ class DefaultConfig(object): # Gitlab Config. GITLAB_TRIGGER_CONFIG = None - # Requests based HTTP client with a large request pool - HTTPCLIENT = build_requests_session() - - # Status tag config - STATUS_TAGS = {} - for tag_name in ['building', 'failed', 'none', 'ready']: - tag_path = os.path.join('buildstatus', tag_name + '.svg') - with open(tag_path) as tag_svg: - STATUS_TAGS[tag_name] = tag_svg.read() - NOTIFICATION_QUEUE_NAME = 'notification' DOCKERFILE_BUILD_QUEUE_NAME = 'dockerfilebuild' REPLICATION_QUEUE_NAME = 'imagestoragereplication' SECSCAN_NOTIFICATION_QUEUE_NAME = 'security_notification' + CHUNK_CLEANUP_QUEUE_NAME = 'chunk_cleanup' + NAMESPACE_GC_QUEUE_NAME = 'namespacegc' + EXPORT_ACTION_LOGS_QUEUE_NAME = 'exportactionlogs' # Super user config. Note: This MUST BE an empty list for the default config. SUPER_USERS = [] + # Feature Flag: Whether sessions are permanent. + FEATURE_PERMANENT_SESSIONS = True + # Feature Flag: Whether super users are supported. FEATURE_SUPER_USERS = True @@ -153,12 +204,6 @@ class DefaultConfig(object): # Feature Flag: Whether Google login is supported. FEATURE_GOOGLE_LOGIN = False - # Feature Flag: Whther Dex login is supported. - FEATURE_DEX_LOGIN = False - - # Feature flag, whether to enable olark chat - FEATURE_OLARK_CHAT = False - # Feature Flag: Whether to support GitHub build triggers. FEATURE_GITHUB_BUILD = False @@ -177,6 +222,10 @@ class DefaultConfig(object): # Feature Flag: Whether users can be created (by non-super users). FEATURE_USER_CREATION = True + # Feature Flag: Whether users being created must be invited by another user. + # If FEATURE_USER_CREATION is off, this flag has no effect. + FEATURE_INVITE_ONLY_USER_CREATION = False + # Feature Flag: Whether users can be renamed FEATURE_USER_RENAME = False @@ -197,6 +246,10 @@ class DefaultConfig(object): # Documentation: http://pythonhosted.org/semantic_version/reference.html#semantic_version.Spec BLACKLIST_V2_SPEC = '<1.6.0' + # Feature Flag: Whether to restrict V1 pushes to the whitelist. + FEATURE_RESTRICTED_V1_PUSH = False + V1_PUSH_WHITELIST = [] + # Feature Flag: Whether or not to rotate old action logs to storage. FEATURE_ACTION_LOG_ROTATION = False @@ -207,12 +260,50 @@ class DefaultConfig(object): # Docker. FEATURE_LIBRARY_SUPPORT = True + # Feature Flag: Whether to require invitations when adding a user to a team. + FEATURE_REQUIRE_TEAM_INVITE = True + + # Feature Flag: Whether to proxy all direct download URLs in storage via the registry's nginx. + FEATURE_PROXY_STORAGE = False + + # Feature Flag: Whether to collect and support user metadata. + FEATURE_USER_METADATA = False + + # Feature Flag: Whether to support signing + FEATURE_SIGNING = False + + # Feature Flag: Whether to enable support for App repositories. + FEATURE_APP_REGISTRY = False + + # Feature Flag: Whether app registry is in a read-only mode. + FEATURE_READONLY_APP_REGISTRY = False + + # Feature Flag: If set to true, the _catalog endpoint returns public repositories. Otherwise, + # only private repositories can be returned. + FEATURE_PUBLIC_CATALOG = False + + # Feature Flag: If set to true, build logs may be read by those with read access to the repo, + # rather than only write access or admin access. + FEATURE_READER_BUILD_LOGS = False + + # Feature Flag: If set to true, autocompletion will apply to partial usernames. + FEATURE_PARTIAL_USER_AUTOCOMPLETE = True + + # Feature Flag: If set to true, users can confirm (and modify) their initial usernames when + # logging in via OIDC or a non-database internal auth provider. + FEATURE_USERNAME_CONFIRMATION = True + + # If a namespace is defined in the public namespace list, then it will appear on *all* + # user's repository list pages, regardless of whether that user is a member of the namespace. + # Typically, this is used by an enterprise customer in configuring a set of "well-known" + # namespaces. + PUBLIC_NAMESPACES = [] + # The namespace to use for library repositories. # Note: This must remain 'library' until Docker removes their hard-coded namespace for libraries. # See: https://github.com/docker/docker/blob/master/registry/session.go#L320 LIBRARY_NAMESPACE = 'library' - BUILD_MANAGER = ('enterprise', {}) DISTRIBUTED_STORAGE_CONFIG = { @@ -237,17 +328,10 @@ class DefaultConfig(object): # Action logs archive ACTION_LOG_ARCHIVE_LOCATION = 'local_us' ACTION_LOG_ARCHIVE_PATH = 'actionlogarchive/' + ACTION_LOG_ROTATION_THRESHOLD = '30d' - # For enterprise: - MAXIMUM_REPOSITORY_USAGE = 20 - - # System logs. - SYSTEM_LOGS_PATH = "/var/log/" - SYSTEM_LOGS_FILE = "/var/log/syslog" - SYSTEM_SERVICES_PATH = "conf/init/service/" - - # Services that should not be shown in the logs view. - SYSTEM_SERVICE_BLACKLIST = [] + # Allow registry pulls when unable to write to the audit log + ALLOW_PULLS_WITHOUT_STRICT_LOGGING = False # Temporary tag expiration in seconds, this may actually be longer based on GC policy PUSH_TEMP_TAG_EXPIRATION_SEC = 60 * 60 # One hour per layer @@ -256,29 +340,27 @@ class DefaultConfig(object): SIGNED_GRANT_EXPIRATION_SEC = 60 * 60 * 24 # One day to complete a push/pull # Registry v2 JWT Auth config - JWT_AUTH_MAX_FRESH_S = 60 * 60 + 60 # At most signed for one hour, accounting for clock skew - JWT_AUTH_TOKEN_ISSUER = 'quay-test-issuer' - JWT_AUTH_CERTIFICATE_PATH = 'conf/selfsigned/jwt.crt' - JWT_AUTH_PRIVATE_KEY_PATH = 'conf/selfsigned/jwt.key.insecure' + REGISTRY_JWT_AUTH_MAX_FRESH_S = 60 * 60 + 60 # At most signed one hour, accounting for clock skew # The URL endpoint to which we redirect OAuth when generating a token locally. LOCAL_OAUTH_HANDLER = '/oauth/localapp' # The various avatar background colors. AVATAR_KIND = 'local' - AVATAR_COLORS = ['#969696', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c', '#98df8a', '#d62728', - '#ff9896', '#9467bd', '#c5b0d5', '#8c564b', '#c49c94', '#e377c2', '#f7b6d2', - '#7f7f7f', '#c7c7c7', '#bcbd22', '#1f77b4', '#17becf', '#9edae5', '#393b79', - '#5254a3', '#6b6ecf', '#9c9ede', '#9ecae1', '#31a354', '#b5cf6b', '#a1d99b', - '#8c6d31', '#ad494a', '#e7ba52', '#a55194'] - # The location of the Quay documentation. - DOCUMENTATION_LOCATION = 'http://docs.quay.io' - DOCUMENTATION_METADATA = 'https://coreos.github.io/quay-docs/search.json' + # Custom branding + BRANDING = { + 'logo': '/static/img/quay-horizontal-color.svg', + 'footer_img': None, + 'footer_url': None, + } # How often the Garbage Collection worker runs. GARBAGE_COLLECTION_FREQUENCY = 30 # seconds + # How long notifications will try to send before timing out. + NOTIFICATION_SEND_TIMEOUT = 10 + # Security scanner FEATURE_SECURITY_SCANNER = False FEATURE_SECURITY_NOTIFICATIONS = False @@ -286,11 +368,20 @@ class DefaultConfig(object): # The endpoint for the security scanner. SECURITY_SCANNER_ENDPOINT = 'http://192.168.99.101:6060' + # The number of seconds between indexing intervals in the security scanner + SECURITY_SCANNER_INDEXING_INTERVAL = 30 + + # If specified, the security scanner will only index images newer than the provided ID. + SECURITY_SCANNER_INDEXING_MIN_ID = None + # If specified, the endpoint to be used for all POST calls to the security scanner. SECURITY_SCANNER_ENDPOINT_BATCH = None + # If specified, GET requests that return non-200 will be retried at the following instances. + SECURITY_SCANNER_READONLY_FAILOVER_ENDPOINTS = [] + # The indexing engine version running inside the security scanner. - SECURITY_SCANNER_ENGINE_VERSION_TARGET = 2 + SECURITY_SCANNER_ENGINE_VERSION_TARGET = 3 # The version of the API to use for the security scanner. SECURITY_SCANNER_API_VERSION = 'v1' @@ -304,10 +395,22 @@ class DefaultConfig(object): # The issuer name for the security scanner. SECURITY_SCANNER_ISSUER_NAME = 'security_scanner' + # Repository mirror + FEATURE_REPO_MIRROR = False + + # The number of seconds between indexing intervals in the repository mirror + REPO_MIRROR_INTERVAL = 30 + + # Require HTTPS and verify certificates of Quay registry during mirror. + REPO_MIRROR_TLS_VERIFY = True + + # Replaces the SERVER_HOSTNAME as the destination for mirroring. + REPO_MIRROR_SERVER_HOSTNAME = None + # JWTProxy Settings # The address (sans schema) to proxy outgoing requests through the jwtproxy # to be signed - JWTPROXY_SIGNER = 'localhost:8080' + JWTPROXY_SIGNER = 'localhost:8081' # The audience that jwtproxy should verify on incoming requests # If None, will be calculated off of the SERVER_HOSTNAME (default) @@ -317,13 +420,14 @@ class DefaultConfig(object): FEATURE_BITTORRENT = False BITTORRENT_PIECE_SIZE = 512 * 1024 BITTORRENT_ANNOUNCE_URL = 'https://localhost:6881/announce' - BITTORRENT_FILENAME_PEPPER = '3ae93fef-c30a-427e-9ba0-eea0fd710419' + BITTORRENT_FILENAME_PEPPER = str(uuid4()) BITTORRENT_WEBSEED_LIFETIME = 3600 # "Secret" key for generating encrypted paging tokens. Only needed to be secret to # hide the ID range for production (in which this value is overridden). Should *not* # be relied upon for secure encryption otherwise. - PAGE_TOKEN_KEY = 'um=/?Kqgp)2yQaS/A6C{NL=dXE&>C:}(' + # This value is a Fernet key and should be 32bytes URL-safe base64 encoded. + PAGE_TOKEN_KEY = '0OYrc16oBuksR8T3JGB-xxYSlZ2-7I_zzqrLzggBJ58=' # The timeout for service key approval. UNAPPROVED_SERVICE_KEY_TTL_SEC = 60 * 60 * 24 # One day @@ -335,11 +439,171 @@ class DefaultConfig(object): # lowest user in the database will be used. SERVICE_LOG_ACCOUNT_ID = None - # The location of the private key generated for this instance - INSTANCE_SERVICE_KEY_LOCATION = 'conf/quay.pem' + # The service key ID for the instance service. + # NOTE: If changed, jwtproxy_conf.yaml.jnj must also be updated. + INSTANCE_SERVICE_KEY_SERVICE = 'quay' - # This instance's service key expiration in minutes + # The location of the key ID file generated for this instance. + INSTANCE_SERVICE_KEY_KID_LOCATION = os.path.join(CONF_DIR, 'quay.kid') + + # The location of the private key generated for this instance. + # NOTE: If changed, jwtproxy_conf.yaml.jnj must also be updated. + INSTANCE_SERVICE_KEY_LOCATION = os.path.join(CONF_DIR, 'quay.pem') + + # This instance's service key expiration in minutes. INSTANCE_SERVICE_KEY_EXPIRATION = 120 - # Number of minutes between expiration refresh in minutes - INSTANCE_SERVICE_KEY_REFRESH = 60 + # Number of minutes between expiration refresh in minutes. Should be the expiration / 2 minus + # some additional window time. + INSTANCE_SERVICE_KEY_REFRESH = 55 + + # The whitelist of client IDs for OAuth applications that allow for direct login. + DIRECT_OAUTH_CLIENTID_WHITELIST = [] + + # URL that specifies the location of the prometheus stats aggregator. + PROMETHEUS_AGGREGATOR_URL = 'http://localhost:9092' + + # Namespace prefix for all prometheus metrics. + PROMETHEUS_NAMESPACE = 'quay' + + # Overridable list of reverse DNS prefixes that are reserved for internal use on labels. + LABEL_KEY_RESERVED_PREFIXES = [] + + # Delays workers from starting until a random point in time between 0 and their regular interval. + STAGGER_WORKERS = True + + # Location of the static marketing site. + STATIC_SITE_BUCKET = None + + # Site key and secret key for using recaptcha. + FEATURE_RECAPTCHA = False + RECAPTCHA_SITE_KEY = None + RECAPTCHA_SECRET_KEY = None + + # Server where TUF metadata can be found + TUF_SERVER = None + + # Prefix to add to metadata e.g. // + TUF_GUN_PREFIX = None + + # Maximum size allowed for layers in the registry. + MAXIMUM_LAYER_SIZE = '20G' + + # Feature Flag: Whether team syncing from the backing auth is enabled. + FEATURE_TEAM_SYNCING = False + TEAM_RESYNC_STALE_TIME = '30m' + TEAM_SYNC_WORKER_FREQUENCY = 60 # seconds + + # Feature Flag: If enabled, non-superusers can setup team syncing. + FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP = False + + # The default configurable tag expiration time for time machine. + DEFAULT_TAG_EXPIRATION = '2w' + + # The options to present in namespace settings for the tag expiration. If empty, no option + # will be given and the default will be displayed read-only. + TAG_EXPIRATION_OPTIONS = ['0s', '1d', '1w', '2w', '4w'] + + # Feature Flag: Whether users can view and change their tag expiration. + FEATURE_CHANGE_TAG_EXPIRATION = True + + # Defines a secret for enabling the health-check endpoint's debug information. + ENABLE_HEALTH_DEBUG_SECRET = None + + # The lifetime for a user recovery token before it becomes invalid. + USER_RECOVERY_TOKEN_LIFETIME = '30m' + + # If specified, when app specific passwords expire by default. + APP_SPECIFIC_TOKEN_EXPIRATION = None + + # Feature Flag: If enabled, users can create and use app specific tokens to login via the CLI. + FEATURE_APP_SPECIFIC_TOKENS = True + + # How long expired app specific tokens should remain visible to users before being automatically + # deleted. Set to None to turn off garbage collection. + EXPIRED_APP_SPECIFIC_TOKEN_GC = '1d' + + # The size of pages returned by the Docker V2 API. + V2_PAGINATION_SIZE = 50 + + # If enabled, ensures that API calls are made with the X-Requested-With header + # when called from a browser. + BROWSER_API_CALLS_XHR_ONLY = True + + # If set to a non-None integer value, the default number of maximum builds for a namespace. + DEFAULT_NAMESPACE_MAXIMUM_BUILD_COUNT = None + + # If set to a non-None integer value, the default number of maximum builds for a namespace whose + # creator IP is deemed a threat. + THREAT_NAMESPACE_MAXIMUM_BUILD_COUNT = None + + # The API Key to use when requesting IP information. + IP_DATA_API_KEY = None + + # For Billing Support Only: The number of allowed builds on a namespace that has been billed + # successfully. + BILLED_NAMESPACE_MAXIMUM_BUILD_COUNT = None + + # Configuration for the data model cache. + DATA_MODEL_CACHE_CONFIG = { + 'engine': 'memcached', + 'endpoint': ('127.0.0.1', 18080), + } + + # Defines the number of successive failures of a build trigger's build before the trigger is + # automatically disabled. + SUCCESSIVE_TRIGGER_FAILURE_DISABLE_THRESHOLD = 100 + + # Defines the number of successive internal errors of a build trigger's build before the + # trigger is automatically disabled. + SUCCESSIVE_TRIGGER_INTERNAL_ERROR_DISABLE_THRESHOLD = 5 + + # Defines the delay required (in seconds) before the last_accessed field of a user/robot or access + # token will be updated after the previous update. + LAST_ACCESSED_UPDATE_THRESHOLD_S = 60 + + # Defines the number of results per page used to show search results + SEARCH_RESULTS_PER_PAGE = 10 + + # Defines the maximum number of pages the user can paginate before they are limited + SEARCH_MAX_RESULT_PAGE_COUNT = 10 + + # Feature Flag: Whether to record when users were last accessed. + FEATURE_USER_LAST_ACCESSED = True + + # Feature Flag: Whether to allow users to retrieve aggregated log counts. + FEATURE_AGGREGATED_LOG_COUNT_RETRIEVAL = True + + # Feature Flag: Whether rate limiting is enabled. + FEATURE_RATE_LIMITS = False + + # Feature Flag: Whether to support log exporting. + FEATURE_LOG_EXPORT = True + + # Maximum number of action logs pages that can be returned via the API. + ACTION_LOG_MAX_PAGE = None + + # Log model + LOGS_MODEL = 'database' + LOGS_MODEL_CONFIG = {} + + # Namespace in which all audit logging is disabled. + DISABLED_FOR_AUDIT_LOGS = [] + + # Namespace in which pull audit logging is disabled. + DISABLED_FOR_PULL_LOGS = [] + + # Feature Flag: Whether pull logs are disabled for free namespace. + FEATURE_DISABLE_PULL_LOGS_FOR_FREE_NAMESPACES = False + + # Feature Flag: If set to true, no account using blacklisted email addresses will be allowed + # to be created. + FEATURE_BLACKLISTED_EMAILS = False + + # The list of domains, including subdomains, for which any *new* User with a matching + # email address will be denied creation. This option is only used if + # FEATURE_BLACKLISTED_EMAILS is enabled. + BLACKLISTED_EMAIL_DOMAINS = [] + + # Feature Flag: Whether garbage collection is enabled. + FEATURE_GARBAGE_COLLECTION = True diff --git a/config_app/Procfile b/config_app/Procfile new file mode 100644 index 000000000..16b3fb8a4 --- /dev/null +++ b/config_app/Procfile @@ -0,0 +1,3 @@ +app: PYTHONPATH="../" gunicorn -c conf/gunicorn_local.py config_application:application +webpack: npm run watch-config-app + diff --git a/config_app/README.md b/config_app/README.md new file mode 100644 index 000000000..7f6ba22b7 --- /dev/null +++ b/config_app/README.md @@ -0,0 +1,106 @@ +# Quay config tool + +The Quay config tool is a project to ease the setup, modification, and deployment of Red Hat Quay (sometimes referred to as Red Hat Quay). + +The project was built by [Sam Chow] in the summer of 2018. + +[Sam Chow]: https://github.com/blueish + +## Project Features +* Isolated setup tool for creating the config +* Ability to download config as a tarball +* Ability to load your config from a tarball and modify it +* When running on Kubernetes, allows you to deploy your changes to the current cluster and cycles all pods +* When running on Kubernetes, allows you to modify the existing configuration + + +## Project Layout +- `conf/` - nginx/gunicorn configuration + +- `config_endpoints/` - backend flask endpoints for serving web and all other API endpoints + +- `config_util/` - utils used by api endpoints for accessing k8s, etc. + +- `config_util/config` - config providers used to manipulate the local config directory before being tarred, uploaded, etc. + +- `docs/` - some updated documentation on how to run the app in both a docker container and on kubernetes + +- `init/` - initial scripts/services started by the docker image + +- `js/` - all frontend javascript + +- `js/components` - mix of the old components ported over, and new components written for the config tool. +(config-setup-app is the entrypoint of the frontend) + +- `js/core-config-setup` - The main component responsible for modification of the config. +Holds most of the components that modify the configuration + +- `js/setup` - The modal component that covers the setup of the DB and SuperUser + +## Running the config tool +Currently, the config tool is still being built alongside the regular Quay container, and is started with the `config` argument to the image. A password is required to be +specified, which will then need to be entered with the username `quayconfig` in the browser. + +```bash +docker run {quay-image} config {password} +``` + +The password can also be specified via the `CONFIG_APP_PASSWORD` environment variable: + +```bash +docker run -e CONFIG_APP_PASSWORD={password} {quay-image} config +``` + + +## Local development +If you wish to work on it locally, there's a script in the base dir of quay: +```bash +./local-config-app.sh +``` +Webpack is setup for hot reloading so the JS will be rebuilt if you're working on it. + + +## Local development on kubernetes +Assuming you're running on minikube, you can build the docker image with the minikube docker daemon: +```bash +eval $(minikube docker-env) +docker built -t config-app . # run in quay dir, not quay/config_app +``` + +You'll now have to create the namespace, config secret (and optionally the quay-enterprise app and nodeport) +- [quay-enterprise-namespace.yml](files/quay-enterprise-namespace.yml) +- [quay-enterprise-config-secret.yml](files/quay-enterprise-config-secret.yml) +- [quay-enterprise-redis.yml](files/quay-enterprise-redis.yml) + +(Optional, use if you're testing the deployment feature on kube) +- [quay-enterprise-app-rc.yml](files/quay-enterprise-app-rc.yml) +- [quay-enterprise-service-nodeport.yml](files/quay-enterprise-service-nodeport.yml) + +And the following for the config-tool +- [config-tool-service-nodeport.yml](docs/k8s_templates/config-tool-service-nodeport.yml) +- [config-tool-serviceaccount.yml](docs/k8s_templates/config-tool-serviceaccount.yml) +- [config-tool-servicetoken-role.yml](docs/k8s_templates/config-tool-servicetoken-role.yml) +- [config-tool-servicetoken-role-binding.yml](docs/k8s_templates/config-tool-servicetoken-role-binding.yml) +- [qe-config-tool.yml](docs/k8s_templates/qe-config-tool.yml) +(Note: right now the config tool template uses the tag `config-tool:latest`, which will be the image you created in the minikube docker) + +Apply all of these onto the cluster +```bash +kubectl apply -f +``` + +You can get minikube to route you to the services: +```bash +minikube service quay-enterprise-config-tool -n quay-enterprise +``` + +It should open up on your default browser. + +(Note: The config tool is only available through SSL and self-signs certs on startup, so you'll have to use https://\ +and pass through the warning on your browser to access it.) + +When you make changes to the app, you'll have to rebuild the image and cycle the deployment: +```bash +kubectl scale deploy --replicas=0 quay-enterprise-config-tool -n quay-enterprise +kubectl scale deploy --replicas=1 quay-enterprise-config-tool -n quay-enterprise +``` diff --git a/config_app/__init__.py b/config_app/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/config_app/_init_config.py b/config_app/_init_config.py new file mode 100644 index 000000000..8b0533570 --- /dev/null +++ b/config_app/_init_config.py @@ -0,0 +1,38 @@ +import os +import re +import subprocess + +# Note: this currently points to the directory above, since we're in the quay config_app dir +# TODO(config_extract): revert to root directory rather than the one above +ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + +CONF_DIR = os.getenv("QUAYCONF", os.path.join(ROOT_DIR, "conf/")) +STATIC_DIR = os.path.join(ROOT_DIR, 'static/') +STATIC_LDN_DIR = os.path.join(STATIC_DIR, 'ldn/') +STATIC_FONTS_DIR = os.path.join(STATIC_DIR, 'fonts/') +TEMPLATE_DIR = os.path.join(ROOT_DIR, 'templates/') +IS_KUBERNETES = 'KUBERNETES_SERVICE_HOST' in os.environ + + +def _get_version_number_changelog(): + try: + with open(os.path.join(ROOT_DIR, 'CHANGELOG.md')) as f: + return re.search(r'(v[0-9]+\.[0-9]+\.[0-9]+)', f.readline()).group(0) + except IOError: + return '' + + +def _get_git_sha(): + if os.path.exists("GIT_HEAD"): + with open(os.path.join(ROOT_DIR, "GIT_HEAD")) as f: + return f.read() + else: + try: + return subprocess.check_output(["git", "rev-parse", "HEAD"]).strip()[0:8] + except (OSError, subprocess.CalledProcessError): + pass + return "unknown" + + +__version__ = _get_version_number_changelog() +__gitrev__ = _get_git_sha() diff --git a/config_app/c_app.py b/config_app/c_app.py new file mode 100644 index 000000000..0df198dd1 --- /dev/null +++ b/config_app/c_app.py @@ -0,0 +1,47 @@ +import os +import logging + +from flask import Flask + +from data import database, model +from util.config.superusermanager import SuperUserManager +from util.ipresolver import NoopIPResolver + +from config_app._init_config import ROOT_DIR, IS_KUBERNETES +from config_app.config_util.config import get_config_provider +from util.security.instancekeys import InstanceKeys + +app = Flask(__name__) + +logger = logging.getLogger(__name__) + +OVERRIDE_CONFIG_DIRECTORY = os.path.join(ROOT_DIR, 'config_app/conf/stack') +INIT_SCRIPTS_LOCATION = '/conf/init/' + +is_testing = 'TEST' in os.environ +is_kubernetes = IS_KUBERNETES + +logger.debug('Configuration is on a kubernetes deployment: %s' % IS_KUBERNETES) + +config_provider = get_config_provider(OVERRIDE_CONFIG_DIRECTORY, 'config.yaml', 'config.py', + testing=is_testing) + +if is_testing: + from test.testconfig import TestConfig + + logger.debug('Loading test config.') + app.config.from_object(TestConfig()) +else: + from config import DefaultConfig + + logger.debug('Loading default config.') + app.config.from_object(DefaultConfig()) + app.teardown_request(database.close_db_filter) + +# Load the override config via the provider. +config_provider.update_app_config(app.config) +superusers = SuperUserManager(app) +ip_resolver = NoopIPResolver() +instance_keys = InstanceKeys(app) + +model.config.app_config = app.config diff --git a/config_app/conf/__init__.py b/config_app/conf/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/config_app/conf/dhparams.pem b/config_app/conf/dhparams.pem new file mode 100644 index 000000000..130ed84e3 --- /dev/null +++ b/config_app/conf/dhparams.pem @@ -0,0 +1,8 @@ +-----BEGIN DH PARAMETERS----- +MIIBCAKCAQEAk7fEh4MFr446aU61ZGxCl8VHvcJhDGcdd+3zaNxdWF7Wvr5QE8zX +QswoM5K2szlK7klcJOXer2IToHHQQn00nuWO3m6quZGV6EPbRmRKfRGa8pzSwH+R +Ph0OUpEQPh7zvegeVwEbrblD7i53ookbHlYGtxsPb28Y06OP5/xpks9C815Zy4gy +tx2yHi4FkFo52yErBF9jD/glsZYVHCo42LFrVGa5/7V0g++fG8yXCrBnqmz2d8FF +uU6/KJcmDCUn1m3mDfcf5HgeXSIsukW/XMZ3l9w1fdluJRwdEE9W2ePgqMiG3eC0 +2T1sPfXCdXPQ7/5Gzf1eMtRZ/McipxVbgwIBAg== +-----END DH PARAMETERS----- diff --git a/config_app/conf/gunicorn_local.py b/config_app/conf/gunicorn_local.py new file mode 100644 index 000000000..d0ea0a758 --- /dev/null +++ b/config_app/conf/gunicorn_local.py @@ -0,0 +1,26 @@ +import sys +import os +sys.path.append(os.path.join(os.path.dirname(__file__), "../")) + +import logging + +from Crypto import Random +from config_app.config_util.log import logfile_path + + +logconfig = logfile_path(debug=True) +bind = '0.0.0.0:5000' +workers = 1 +worker_class = 'gevent' +daemon = False +pythonpath = '.' +preload_app = True + +def post_fork(server, worker): + # Reset the Random library to ensure it won't raise the "PID check failed." error after + # gunicorn forks. + Random.atfork() + +def when_ready(server): + logger = logging.getLogger(__name__) + logger.debug('Starting local gunicorn with %s workers and %s worker class', workers, worker_class) diff --git a/config_app/conf/gunicorn_web.py b/config_app/conf/gunicorn_web.py new file mode 100644 index 000000000..14225fe72 --- /dev/null +++ b/config_app/conf/gunicorn_web.py @@ -0,0 +1,26 @@ +import sys +import os +sys.path.append(os.path.join(os.path.dirname(__file__), "../")) + +import logging + +from Crypto import Random +from config_app.config_util.log import logfile_path + + +logconfig = logfile_path(debug=True) + +bind = 'unix:/tmp/gunicorn_web.sock' +workers = 1 +worker_class = 'gevent' +pythonpath = '.' +preload_app = True + +def post_fork(server, worker): + # Reset the Random library to ensure it won't raise the "PID check failed." error after + # gunicorn forks. + Random.atfork() + +def when_ready(server): + logger = logging.getLogger(__name__) + logger.debug('Starting local gunicorn with %s workers and %s worker class', workers, worker_class) diff --git a/config_app/conf/htpasswd b/config_app/conf/htpasswd new file mode 100644 index 000000000..eb49fca13 --- /dev/null +++ b/config_app/conf/htpasswd @@ -0,0 +1 @@ +quayconfig: \ No newline at end of file diff --git a/config_app/conf/http-base.conf b/config_app/conf/http-base.conf new file mode 100644 index 000000000..ad7409008 --- /dev/null +++ b/config_app/conf/http-base.conf @@ -0,0 +1,49 @@ +# vim: ft=nginx + +set_real_ip_from 0.0.0.0/0; +real_ip_recursive on; +log_format lb_logs '$remote_addr ($proxy_protocol_addr) ' + '- $remote_user [$time_local] ' + '"$request" $status $body_bytes_sent ' + '"$http_referer" "$http_user_agent" ' + '($request_time $request_length $upstream_response_time)'; + +types_hash_max_size 2048; +include /etc/opt/rh/rh-nginx112/nginx/mime.types; + +default_type application/octet-stream; + +access_log /var/log/nginx/access.log; +error_log /var/log/nginx/error.log; +client_body_temp_path /tmp/nginx 1 2; +proxy_temp_path /tmp/nginx-proxy; +fastcgi_temp_path /tmp/nginx-fastcgi; +uwsgi_temp_path /tmp/nginx-uwsgi; +scgi_temp_path /tmp/nginx-scgi; + +sendfile on; + +gzip on; +gzip_http_version 1.0; +gzip_proxied any; +gzip_min_length 500; +gzip_disable "MSIE [1-6]\."; +gzip_types text/plain text/xml text/css + text/javascript application/x-javascript + application/javascript image/svg+xml + application/octet-stream; + +map $proxy_protocol_addr $proper_forwarded_for { + "" $proxy_add_x_forwarded_for; + default $proxy_protocol_addr; +} + +map $http_x_forwarded_proto $proper_scheme { + default $scheme; + https https; +} + +upstream web_app_server { + server unix:/tmp/gunicorn_web.sock fail_timeout=0; +} + diff --git a/config_app/conf/logging.conf b/config_app/conf/logging.conf new file mode 100644 index 000000000..3f1d3a33f --- /dev/null +++ b/config_app/conf/logging.conf @@ -0,0 +1,33 @@ +[loggers] +keys=root,gunicorn.error,gunicorn.access + +[handlers] +keys=console + +[formatters] +keys=generic,json + +[logger_root] +level=INFO +handlers=console + +[handler_console] +class=StreamHandler +formatter=generic +args=(sys.stdout, ) + +[formatter_generic] +format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s +class=logging.Formatter + +[logger_gunicorn.error] +level=ERROR +handlers=console +propagate=0 +qualname=gunicorn.error + +[logger_gunicorn.access] +handlers=console +propagate=0 +qualname=gunicorn.access +level=DEBUG diff --git a/config_app/conf/logging_debug.conf b/config_app/conf/logging_debug.conf new file mode 100644 index 000000000..b57ff1519 --- /dev/null +++ b/config_app/conf/logging_debug.conf @@ -0,0 +1,38 @@ +[loggers] +keys=root,boto,gunicorn.error,gunicorn.access + +[handlers] +keys=console + +[formatters] +keys=generic,json + +[logger_root] +level=DEBUG +handlers=console + +[logger_boto] +level=INFO +handlers=console +qualname=boto + +[logger_gunicorn.access] +handlers=console +propagate=0 +qualname=gunicorn.access +level=DEBUG + +[handler_console] +class=StreamHandler +formatter=generic +args=(sys.stdout, ) + +[logger_gunicorn.error] +level=ERROR +handlers=console +propagate=0 +qualname=gunicorn.error + +[formatter_generic] +format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s +class=logging.Formatter diff --git a/config_app/conf/logging_debug_json.conf b/config_app/conf/logging_debug_json.conf new file mode 100644 index 000000000..21eb994a8 --- /dev/null +++ b/config_app/conf/logging_debug_json.conf @@ -0,0 +1,38 @@ +[loggers] +keys=root,boto,gunicorn.error,gunicorn.access + +[handlers] +keys=console + +[formatters] +keys=generic,json + +[logger_root] +level=DEBUG +handlers=console + +[logger_boto] +level=INFO +handlers=console +qualname=boto + +[logger_gunicorn.access] +handlers=console +propagate=0 +qualname=gunicorn.access +level=DEBUG + +[handler_console] +class=StreamHandler +formatter=json +args=(sys.stdout, ) + +[logger_gunicorn.error] +level=ERROR +handlers=console +propagate=0 +qualname=gunicorn.error + +[formatter_generic] +format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s +class=logging.Formatter diff --git a/config_app/conf/logging_json.conf b/config_app/conf/logging_json.conf new file mode 100644 index 000000000..05d4c5dde --- /dev/null +++ b/config_app/conf/logging_json.conf @@ -0,0 +1,33 @@ +[loggers] +keys=root,gunicorn.error,gunicorn.access + +[handlers] +keys=console + +[formatters] +keys=json,generic + +[logger_root] +level=INFO +handlers=console + +[handler_console] +class=StreamHandler +formatter=json +args=(sys.stdout, ) + +[formatter_generic] +format=%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s +class=logging.Formatter + +[logger_gunicorn.error] +level=ERROR +handlers=console +propagate=0 +qualname=gunicorn.error + +[logger_gunicorn.access] +handlers=console +propagate=0 +qualname=gunicorn.access +level=DEBUG diff --git a/conf/nginx.conf b/config_app/conf/nginx.conf similarity index 50% rename from conf/nginx.conf rename to config_app/conf/nginx.conf index adfe42f1c..99812a602 100644 --- a/conf/nginx.conf +++ b/config_app/conf/nginx.conf @@ -4,45 +4,23 @@ include root-base.conf; http { include http-base.conf; - include hosted-http-base.conf; - include rate-limiting.conf; - ssl_certificate ./stack/ssl.cert; - ssl_certificate_key ./stack/ssl.key; + ssl_certificate /quay-registry/config_app/quay-config.cert; + ssl_certificate_key /quay-registry/config_app/quay-config.key; ssl_ciphers 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA'; - ssl_protocols TLSv1 TLSv1.1 TLSv1.2; - ssl_session_cache shared:SSL:10m; - ssl_session_timeout 5m; + ssl_protocols TLSv1.1 TLSv1.2; + ssl_session_cache shared:SSL:60m; + ssl_session_timeout 2h; + ssl_session_tickets on; ssl_prefer_server_ciphers on; ssl_dhparam dhparams.pem; - # TODO: learn wtf OCSP stapling is and do it the right way - #ssl_stapling on; - #ssl_stapling_verify off; - #ssl_trusted_certificate ./stack/ssl.key; - server { include server-base.conf; - listen 443 ssl http2 default; + listen 8443 ssl http2 default; - ssl on; - - # This header must be set only for HTTPS - add_header Strict-Transport-Security "max-age=63072000; preload"; - } - - server { - include server-base.conf; - - listen 8443 ssl http2 default proxy_protocol; - ssl on; - - # This header must be set only for HTTPS - add_header Strict-Transport-Security "max-age=63072000; preload"; - - real_ip_header proxy_protocol; - - access_log /dev/stdout lb_pp; + access_log /var/log/nginx/access.log lb_logs; + error_log /var/log/nginx/error.log warn; } } diff --git a/config_app/conf/root-base.conf b/config_app/conf/root-base.conf new file mode 100644 index 000000000..972f883dd --- /dev/null +++ b/config_app/conf/root-base.conf @@ -0,0 +1,15 @@ +# vim: ft=nginx + +pid /tmp/nginx.pid; +error_log /var/log/nginx/error.log; + +worker_processes auto; +worker_priority -10; +worker_rlimit_nofile 10240; + +daemon off; + +events { + worker_connections 10240; + accept_mutex off; +} diff --git a/config_app/conf/server-base.conf b/config_app/conf/server-base.conf new file mode 100644 index 000000000..b71f49c9e --- /dev/null +++ b/config_app/conf/server-base.conf @@ -0,0 +1,21 @@ +# vim: ft=nginx + +server_name _; + +# Proxy Headers +proxy_set_header X-Forwarded-For $proper_forwarded_for; +proxy_set_header X-Forwarded-Proto $proper_scheme; +proxy_set_header Host $host; +proxy_redirect off; + +proxy_set_header Transfer-Encoding $http_transfer_encoding; + +# The DB migrations sometimes take a while, so increase timeout so we don't report an error. +proxy_read_timeout 500s; + +location / { + auth_basic "Quay config tool"; + auth_basic_user_file htpasswd; + proxy_pass http://web_app_server; +} + diff --git a/config_app/conf/supervisord.conf b/config_app/conf/supervisord.conf new file mode 100644 index 000000000..2cd19367a --- /dev/null +++ b/config_app/conf/supervisord.conf @@ -0,0 +1,44 @@ +; TODO: Dockerfile - pip install supervisor supervisor-stdout + +[supervisord] +nodaemon=true + +[unix_http_server] +file=%(ENV_QUAYDIR)s/config_app/conf/supervisord.sock +user=root + +[supervisorctl] +serverurl=unix:///%(ENV_QUAYDIR)s/config_app/conf/supervisord.sock + +[rpcinterface:supervisor] +supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface + +[eventlistener:stdout] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command = supervisor_stdout +buffer_size = 1024 +events = PROCESS_LOG +result_handler = supervisor_stdout:event_handler + +[program:gunicorn-config] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=gunicorn -c %(ENV_QUAYDIR)s/config_app/conf/gunicorn_web.py config_application:application +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true + +[program:nginx] +environment= + PYTHONPATH=%(ENV_QUAYDIR)s +command=nginx -c %(ENV_QUAYDIR)s/config_app/conf/nginx.conf +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_events_enabled = true +stderr_events_enabled = true diff --git a/config_app/config_application.py b/config_app/config_application.py new file mode 100644 index 000000000..43676e354 --- /dev/null +++ b/config_app/config_application.py @@ -0,0 +1,8 @@ +from config_app.c_app import app as application + +# Bind all of the blueprints +import config_web + +if __name__ == '__main__': + logging.config.fileConfig(logfile_path(debug=True), disable_existing_loggers=False) + application.run(port=5000, debug=True, threaded=True, host='0.0.0.0') diff --git a/config_app/config_endpoints/__init__.py b/config_app/config_endpoints/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/config_app/config_endpoints/api/__init__.py b/config_app/config_endpoints/api/__init__.py new file mode 100644 index 000000000..c80fc1c9c --- /dev/null +++ b/config_app/config_endpoints/api/__init__.py @@ -0,0 +1,158 @@ +import logging + +from flask import Blueprint, request, abort +from flask_restful import Resource, Api +from flask_restful.utils.cors import crossdomain +from data import model +from email.utils import formatdate +from calendar import timegm +from functools import partial, wraps +from jsonschema import validate, ValidationError + +from config_app.c_app import app, IS_KUBERNETES +from config_app.config_endpoints.exception import InvalidResponse, InvalidRequest + +logger = logging.getLogger(__name__) +api_bp = Blueprint('api', __name__) + +CROSS_DOMAIN_HEADERS = ['Authorization', 'Content-Type', 'X-Requested-With'] + + +class ApiExceptionHandlingApi(Api): + pass + + @crossdomain(origin='*', headers=CROSS_DOMAIN_HEADERS) + def handle_error(self, error): + return super(ApiExceptionHandlingApi, self).handle_error(error) + + +api = ApiExceptionHandlingApi() + +api.init_app(api_bp) + +def log_action(kind, user_or_orgname, metadata=None, repo=None, repo_name=None): + if not metadata: + metadata = {} + + if repo: + repo_name = repo.name + + model.log.log_action(kind, user_or_orgname, repo_name, user_or_orgname, request.remote_addr, metadata) + +def format_date(date): + """ Output an RFC822 date format. """ + if date is None: + return None + return formatdate(timegm(date.utctimetuple())) + + + +def resource(*urls, **kwargs): + def wrapper(api_resource): + if not api_resource: + return None + + api_resource.registered = True + api.add_resource(api_resource, *urls, **kwargs) + return api_resource + + return wrapper + + +class ApiResource(Resource): + registered = False + method_decorators = [] + + def options(self): + return None, 200 + + +def add_method_metadata(name, value): + def modifier(func): + if func is None: + return None + + if '__api_metadata' not in dir(func): + func.__api_metadata = {} + func.__api_metadata[name] = value + return func + + return modifier + + +def method_metadata(func, name): + if func is None: + return None + + if '__api_metadata' in dir(func): + return func.__api_metadata.get(name, None) + return None + + +def no_cache(f): + @wraps(f) + def add_no_cache(*args, **kwargs): + response = f(*args, **kwargs) + if response is not None: + response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate' + return response + return add_no_cache + + +def define_json_response(schema_name): + def wrapper(func): + @add_method_metadata('response_schema', schema_name) + @wraps(func) + def wrapped(self, *args, **kwargs): + schema = self.schemas[schema_name] + resp = func(self, *args, **kwargs) + + if app.config['TESTING']: + try: + validate(resp, schema) + except ValidationError as ex: + raise InvalidResponse(ex.message) + + return resp + return wrapped + return wrapper + + +def validate_json_request(schema_name, optional=False): + def wrapper(func): + @add_method_metadata('request_schema', schema_name) + @wraps(func) + def wrapped(self, *args, **kwargs): + schema = self.schemas[schema_name] + try: + json_data = request.get_json() + if json_data is None: + if not optional: + raise InvalidRequest('Missing JSON body') + else: + validate(json_data, schema) + return func(self, *args, **kwargs) + except ValidationError as ex: + raise InvalidRequest(ex.message) + return wrapped + return wrapper + +def kubernetes_only(f): + """ Aborts the request with a 400 if the app is not running on kubernetes """ + @wraps(f) + def abort_if_not_kube(*args, **kwargs): + if not IS_KUBERNETES: + abort(400) + + return f(*args, **kwargs) + return abort_if_not_kube + +nickname = partial(add_method_metadata, 'nickname') + + +import config_app.config_endpoints.api.discovery +import config_app.config_endpoints.api.kube_endpoints +import config_app.config_endpoints.api.suconfig +import config_app.config_endpoints.api.superuser +import config_app.config_endpoints.api.tar_config_loader +import config_app.config_endpoints.api.user diff --git a/config_app/config_endpoints/api/discovery.py b/config_app/config_endpoints/api/discovery.py new file mode 100644 index 000000000..183963ea3 --- /dev/null +++ b/config_app/config_endpoints/api/discovery.py @@ -0,0 +1,254 @@ +# TODO to extract the discovery stuff into a util at the top level and then use it both here and old discovery.py +import logging +import sys +from collections import OrderedDict + +from config_app.c_app import app +from config_app.config_endpoints.api import method_metadata +from config_app.config_endpoints.common import fully_qualified_name, PARAM_REGEX, TYPE_CONVERTER + +logger = logging.getLogger(__name__) + + +def generate_route_data(): + include_internal = True + compact = True + + def swagger_parameter(name, description, kind='path', param_type='string', required=True, + enum=None, schema=None): + # https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#parameterObject + parameter_info = { + 'name': name, + 'in': kind, + 'required': required + } + + if schema: + parameter_info['schema'] = { + '$ref': '#/definitions/%s' % schema + } + else: + parameter_info['type'] = param_type + + if enum is not None and len(list(enum)) > 0: + parameter_info['enum'] = list(enum) + + return parameter_info + + paths = {} + models = {} + tags = [] + tags_added = set() + operation_ids = set() + + for rule in app.url_map.iter_rules(): + endpoint_method = app.view_functions[rule.endpoint] + + # Verify that we have a view class for this API method. + if not 'view_class' in dir(endpoint_method): + continue + + view_class = endpoint_method.view_class + + # Hide the class if it is internal. + internal = method_metadata(view_class, 'internal') + if not include_internal and internal: + continue + + # Build the tag. + parts = fully_qualified_name(view_class).split('.') + tag_name = parts[-2] + if not tag_name in tags_added: + tags_added.add(tag_name) + tags.append({ + 'name': tag_name, + 'description': (sys.modules[view_class.__module__].__doc__ or '').strip() + }) + + # Build the Swagger data for the path. + swagger_path = PARAM_REGEX.sub(r'{\2}', rule.rule) + full_name = fully_qualified_name(view_class) + path_swagger = { + 'x-name': full_name, + 'x-path': swagger_path, + 'x-tag': tag_name + } + + related_user_res = method_metadata(view_class, 'related_user_resource') + if related_user_res is not None: + path_swagger['x-user-related'] = fully_qualified_name(related_user_res) + + paths[swagger_path] = path_swagger + + # Add any global path parameters. + param_data_map = view_class.__api_path_params if '__api_path_params' in dir( + view_class) else {} + if param_data_map: + path_parameters_swagger = [] + for path_parameter in param_data_map: + description = param_data_map[path_parameter].get('description') + path_parameters_swagger.append(swagger_parameter(path_parameter, description)) + + path_swagger['parameters'] = path_parameters_swagger + + # Add the individual HTTP operations. + method_names = list(rule.methods.difference(['HEAD', 'OPTIONS'])) + for method_name in method_names: + # https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#operation-object + method = getattr(view_class, method_name.lower(), None) + if method is None: + logger.debug('Unable to find method for %s in class %s', method_name, view_class) + continue + + operationId = method_metadata(method, 'nickname') + operation_swagger = { + 'operationId': operationId, + 'parameters': [], + } + + if operationId is None: + continue + + if operationId in operation_ids: + raise Exception('Duplicate operation Id: %s' % operationId) + + operation_ids.add(operationId) + + # Mark the method as internal. + internal = method_metadata(method, 'internal') + if internal is not None: + operation_swagger['x-internal'] = True + + if include_internal: + requires_fresh_login = method_metadata(method, 'requires_fresh_login') + if requires_fresh_login is not None: + operation_swagger['x-requires-fresh-login'] = True + + # Add the path parameters. + if rule.arguments: + for path_parameter in rule.arguments: + description = param_data_map.get(path_parameter, {}).get('description') + operation_swagger['parameters'].append( + swagger_parameter(path_parameter, description)) + + # Add the query parameters. + if '__api_query_params' in dir(method): + for query_parameter_info in method.__api_query_params: + name = query_parameter_info['name'] + description = query_parameter_info['help'] + param_type = TYPE_CONVERTER[query_parameter_info['type']] + required = query_parameter_info['required'] + + operation_swagger['parameters'].append( + swagger_parameter(name, description, kind='query', + param_type=param_type, + required=required, + enum=query_parameter_info['choices'])) + + # Add the OAuth security block. + # https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#securityRequirementObject + scope = method_metadata(method, 'oauth2_scope') + if scope and not compact: + operation_swagger['security'] = [{'oauth2_implicit': [scope.scope]}] + + # Add the responses block. + # https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#responsesObject + response_schema_name = method_metadata(method, 'response_schema') + if not compact: + if response_schema_name: + models[response_schema_name] = view_class.schemas[response_schema_name] + + models['ApiError'] = { + 'type': 'object', + 'properties': { + 'status': { + 'type': 'integer', + 'description': 'Status code of the response.' + }, + 'type': { + 'type': 'string', + 'description': 'Reference to the type of the error.' + }, + 'detail': { + 'type': 'string', + 'description': 'Details about the specific instance of the error.' + }, + 'title': { + 'type': 'string', + 'description': 'Unique error code to identify the type of error.' + }, + 'error_message': { + 'type': 'string', + 'description': 'Deprecated; alias for detail' + }, + 'error_type': { + 'type': 'string', + 'description': 'Deprecated; alias for detail' + } + }, + 'required': [ + 'status', + 'type', + 'title', + ] + } + + responses = { + '400': { + 'description': 'Bad Request', + }, + + '401': { + 'description': 'Session required', + }, + + '403': { + 'description': 'Unauthorized access', + }, + + '404': { + 'description': 'Not found', + }, + } + + for _, body in responses.items(): + body['schema'] = {'$ref': '#/definitions/ApiError'} + + if method_name == 'DELETE': + responses['204'] = { + 'description': 'Deleted' + } + elif method_name == 'POST': + responses['201'] = { + 'description': 'Successful creation' + } + else: + responses['200'] = { + 'description': 'Successful invocation' + } + + if response_schema_name: + responses['200']['schema'] = { + '$ref': '#/definitions/%s' % response_schema_name + } + + operation_swagger['responses'] = responses + + # Add the request block. + request_schema_name = method_metadata(method, 'request_schema') + if request_schema_name and not compact: + models[request_schema_name] = view_class.schemas[request_schema_name] + + operation_swagger['parameters'].append( + swagger_parameter('body', 'Request body contents.', kind='body', + schema=request_schema_name)) + + # Add the operation to the parent path. + if not internal or (internal and include_internal): + path_swagger[method_name.lower()] = operation_swagger + + tags.sort(key=lambda t: t['name']) + paths = OrderedDict(sorted(paths.items(), key=lambda p: p[1]['x-tag'])) + + if compact: + return {'paths': paths} diff --git a/config_app/config_endpoints/api/kube_endpoints.py b/config_app/config_endpoints/api/kube_endpoints.py new file mode 100644 index 000000000..a7143412d --- /dev/null +++ b/config_app/config_endpoints/api/kube_endpoints.py @@ -0,0 +1,143 @@ +import logging + +from flask import request, make_response + +from config_app.config_util.config import get_config_as_kube_secret +from data.database import configure + +from config_app.c_app import app, config_provider +from config_app.config_endpoints.api import resource, ApiResource, nickname, kubernetes_only, validate_json_request +from config_app.config_util.k8saccessor import KubernetesAccessorSingleton, K8sApiException + +logger = logging.getLogger(__name__) + +@resource('/v1/kubernetes/deployments/') +class SuperUserKubernetesDeployment(ApiResource): + """ Resource for the getting the status of Red Hat Quay deployments and cycling them """ + schemas = { + 'ValidateDeploymentNames': { + 'type': 'object', + 'description': 'Validates deployment names for cycling', + 'required': [ + 'deploymentNames' + ], + 'properties': { + 'deploymentNames': { + 'type': 'array', + 'description': 'The names of the deployments to cycle' + }, + }, + } + } + + @kubernetes_only + @nickname('scGetNumDeployments') + def get(self): + return KubernetesAccessorSingleton.get_instance().get_qe_deployments() + + @kubernetes_only + @validate_json_request('ValidateDeploymentNames') + @nickname('scCycleQEDeployments') + def put(self): + deployment_names = request.get_json()['deploymentNames'] + return KubernetesAccessorSingleton.get_instance().cycle_qe_deployments(deployment_names) + + +@resource('/v1/kubernetes/deployment//status') +class QEDeploymentRolloutStatus(ApiResource): + @kubernetes_only + @nickname('scGetDeploymentRolloutStatus') + def get(self, deployment): + deployment_rollout_status = KubernetesAccessorSingleton.get_instance().get_deployment_rollout_status(deployment) + return { + 'status': deployment_rollout_status.status, + 'message': deployment_rollout_status.message, + } + + +@resource('/v1/kubernetes/deployments/rollback') +class QEDeploymentRollback(ApiResource): + """ Resource for rolling back deployments """ + schemas = { + 'ValidateDeploymentNames': { + 'type': 'object', + 'description': 'Validates deployment names for rolling back', + 'required': [ + 'deploymentNames' + ], + 'properties': { + 'deploymentNames': { + 'type': 'array', + 'description': 'The names of the deployments to rollback' + }, + }, + } + } + + @kubernetes_only + @nickname('scRollbackDeployments') + @validate_json_request('ValidateDeploymentNames') + def post(self): + """ + Returns the config to its original state and rolls back deployments + :return: + """ + deployment_names = request.get_json()['deploymentNames'] + + # To roll back a deployment, we must do 2 things: + # 1. Roll back the config secret to its old value (discarding changes we made in this session) + # 2. Trigger a rollback to the previous revision, so that the pods will be restarted with + # the old config + old_secret = get_config_as_kube_secret(config_provider.get_old_config_dir()) + kube_accessor = KubernetesAccessorSingleton.get_instance() + kube_accessor.replace_qe_secret(old_secret) + + try: + for name in deployment_names: + kube_accessor.rollback_deployment(name) + except K8sApiException as e: + logger.exception('Failed to rollback deployment.') + return make_response(e.message, 503) + + return make_response('Ok', 204) + + +@resource('/v1/kubernetes/config') +class SuperUserKubernetesConfiguration(ApiResource): + """ Resource for saving the config files to kubernetes secrets. """ + + @kubernetes_only + @nickname('scDeployConfiguration') + def post(self): + try: + new_secret = get_config_as_kube_secret(config_provider.get_config_dir_path()) + KubernetesAccessorSingleton.get_instance().replace_qe_secret(new_secret) + except K8sApiException as e: + logger.exception('Failed to deploy qe config secret to kubernetes.') + return make_response(e.message, 503) + + return make_response('Ok', 201) + + +@resource('/v1/kubernetes/config/populate') +class KubernetesConfigurationPopulator(ApiResource): + """ Resource for populating the local configuration from the cluster's kubernetes secrets. """ + + @kubernetes_only + @nickname('scKubePopulateConfig') + def post(self): + # Get a clean transient directory to write the config into + config_provider.new_config_dir() + + kube_accessor = KubernetesAccessorSingleton.get_instance() + kube_accessor.save_secret_to_directory(config_provider.get_config_dir_path()) + config_provider.create_copy_of_config_dir() + + # We update the db configuration to connect to their specified one + # (Note, even if this DB isn't valid, it won't affect much in the config app, since we'll report an error, + # and all of the options create a new clean dir, so we'll never pollute configs) + combined = dict(**app.config) + combined.update(config_provider.get_config()) + configure(combined) + + return 200 diff --git a/config_app/config_endpoints/api/suconfig.py b/config_app/config_endpoints/api/suconfig.py new file mode 100644 index 000000000..810d4a229 --- /dev/null +++ b/config_app/config_endpoints/api/suconfig.py @@ -0,0 +1,302 @@ +import logging + +from flask import abort, request + +from config_app.config_endpoints.api.suconfig_models_pre_oci import pre_oci_model as model +from config_app.config_endpoints.api import resource, ApiResource, nickname, validate_json_request +from config_app.c_app import (app, config_provider, superusers, ip_resolver, + instance_keys, INIT_SCRIPTS_LOCATION) + +from data.database import configure +from data.runmigration import run_alembic_migration +from util.config.configutil import add_enterprise_config_defaults +from util.config.validator import validate_service_for_config, ValidatorContext, \ + is_valid_config_upload_filename + +logger = logging.getLogger(__name__) + + +def database_is_valid(): + """ Returns whether the database, as configured, is valid. """ + return model.is_valid() + + +def database_has_users(): + """ Returns whether the database has any users defined. """ + return model.has_users() + + +@resource('/v1/superuser/config') +class SuperUserConfig(ApiResource): + """ Resource for fetching and updating the current configuration, if any. """ + schemas = { + 'UpdateConfig': { + 'type': 'object', + 'description': 'Updates the YAML config file', + 'required': [ + 'config', + ], + 'properties': { + 'config': { + 'type': 'object' + }, + 'password': { + 'type': 'string' + }, + }, + }, + } + + @nickname('scGetConfig') + def get(self): + """ Returns the currently defined configuration, if any. """ + config_object = config_provider.get_config() + return { + 'config': config_object + } + + @nickname('scUpdateConfig') + @validate_json_request('UpdateConfig') + def put(self): + """ Updates the config override file. """ + # Note: This method is called to set the database configuration before super users exists, + # so we also allow it to be called if there is no valid registry configuration setup. + config_object = request.get_json()['config'] + + # Add any enterprise defaults missing from the config. + add_enterprise_config_defaults(config_object, app.config['SECRET_KEY']) + + # Write the configuration changes to the config override file. + config_provider.save_config(config_object) + + # now try to connect to the db provided in their config to validate it works + combined = dict(**app.config) + combined.update(config_provider.get_config()) + configure(combined, testing=app.config['TESTING']) + + return { + 'exists': True, + 'config': config_object + } + + +@resource('/v1/superuser/registrystatus') +class SuperUserRegistryStatus(ApiResource): + """ Resource for determining the status of the registry, such as if config exists, + if a database is configured, and if it has any defined users. + """ + + @nickname('scRegistryStatus') + def get(self): + """ Returns the status of the registry. """ + # If there is no config file, we need to setup the database. + if not config_provider.config_exists(): + return { + 'status': 'config-db' + } + + # If the database isn't yet valid, then we need to set it up. + if not database_is_valid(): + return { + 'status': 'setup-db' + } + + config = config_provider.get_config() + if config and config.get('SETUP_COMPLETE'): + return { + 'status': 'config' + } + + return { + 'status': 'create-superuser' if not database_has_users() else 'config' + } + + +class _AlembicLogHandler(logging.Handler): + def __init__(self): + super(_AlembicLogHandler, self).__init__() + self.records = [] + + def emit(self, record): + self.records.append({ + 'level': record.levelname, + 'message': record.getMessage() + }) + + +def _reload_config(): + combined = dict(**app.config) + combined.update(config_provider.get_config()) + configure(combined) + return combined + + +@resource('/v1/superuser/setupdb') +class SuperUserSetupDatabase(ApiResource): + """ Resource for invoking alembic to setup the database. """ + + @nickname('scSetupDatabase') + def get(self): + """ Invokes the alembic upgrade process. """ + # Note: This method is called after the database configured is saved, but before the + # database has any tables. Therefore, we only allow it to be run in that unique case. + if config_provider.config_exists() and not database_is_valid(): + combined = _reload_config() + + app.config['DB_URI'] = combined['DB_URI'] + db_uri = app.config['DB_URI'] + escaped_db_uri = db_uri.replace('%', '%%') + + log_handler = _AlembicLogHandler() + + try: + run_alembic_migration(escaped_db_uri, log_handler, setup_app=False) + except Exception as ex: + return { + 'error': str(ex) + } + + return { + 'logs': log_handler.records + } + + abort(403) + + +@resource('/v1/superuser/config/createsuperuser') +class SuperUserCreateInitialSuperUser(ApiResource): + """ Resource for creating the initial super user. """ + schemas = { + 'CreateSuperUser': { + 'type': 'object', + 'description': 'Information for creating the initial super user', + 'required': [ + 'username', + 'password', + 'email' + ], + 'properties': { + 'username': { + 'type': 'string', + 'description': 'The username for the superuser' + }, + 'password': { + 'type': 'string', + 'description': 'The password for the superuser' + }, + 'email': { + 'type': 'string', + 'description': 'The e-mail address for the superuser' + }, + }, + }, + } + + @nickname('scCreateInitialSuperuser') + @validate_json_request('CreateSuperUser') + def post(self): + """ Creates the initial super user, updates the underlying configuration and + sets the current session to have that super user. """ + + _reload_config() + + # Special security check: This method is only accessible when: + # - There is a valid config YAML file. + # - There are currently no users in the database (clean install) + # + # We do this special security check because at the point this method is called, the database + # is clean but does not (yet) have any super users for our permissions code to check against. + if config_provider.config_exists() and not database_has_users(): + data = request.get_json() + username = data['username'] + password = data['password'] + email = data['email'] + + # Create the user in the database. + superuser_uuid = model.create_superuser(username, password, email) + + # Add the user to the config. + config_object = config_provider.get_config() + config_object['SUPER_USERS'] = [username] + config_provider.save_config(config_object) + + # Update the in-memory config for the new superuser. + superusers.register_superuser(username) + + return { + 'status': True + } + + abort(403) + + +@resource('/v1/superuser/config/validate/') +class SuperUserConfigValidate(ApiResource): + """ Resource for validating a block of configuration against an external service. """ + schemas = { + 'ValidateConfig': { + 'type': 'object', + 'description': 'Validates configuration', + 'required': [ + 'config' + ], + 'properties': { + 'config': { + 'type': 'object' + }, + 'password': { + 'type': 'string', + 'description': 'The users password, used for auth validation' + } + }, + }, + } + + @nickname('scValidateConfig') + @validate_json_request('ValidateConfig') + def post(self, service): + """ Validates the given config for the given service. """ + # Note: This method is called to validate the database configuration before super users exists, + # so we also allow it to be called if there is no valid registry configuration setup. Note that + # this is also safe since this method does not access any information not given in the request. + config = request.get_json()['config'] + validator_context = ValidatorContext.from_app(app, config, + request.get_json().get('password', ''), + instance_keys=instance_keys, + ip_resolver=ip_resolver, + config_provider=config_provider, + init_scripts_location=INIT_SCRIPTS_LOCATION) + + return validate_service_for_config(service, validator_context) + + +@resource('/v1/superuser/config/file/') +class SuperUserConfigFile(ApiResource): + """ Resource for fetching the status of config files and overriding them. """ + + @nickname('scConfigFileExists') + def get(self, filename): + """ Returns whether the configuration file with the given name exists. """ + if not is_valid_config_upload_filename(filename): + abort(404) + + return { + 'exists': config_provider.volume_file_exists(filename) + } + + @nickname('scUpdateConfigFile') + def post(self, filename): + """ Updates the configuration file with the given name. """ + if not is_valid_config_upload_filename(filename): + abort(404) + + # Note: This method can be called before the configuration exists + # to upload the database SSL cert. + uploaded_file = request.files['file'] + if not uploaded_file: + abort(400) + + config_provider.save_volume_file(filename, uploaded_file) + return { + 'status': True + } diff --git a/config_app/config_endpoints/api/suconfig_models_interface.py b/config_app/config_endpoints/api/suconfig_models_interface.py new file mode 100644 index 000000000..9f8cbd0cb --- /dev/null +++ b/config_app/config_endpoints/api/suconfig_models_interface.py @@ -0,0 +1,39 @@ +from abc import ABCMeta, abstractmethod +from six import add_metaclass + + +@add_metaclass(ABCMeta) +class SuperuserConfigDataInterface(object): + """ + Interface that represents all data store interactions required by the superuser config API. + """ + + @abstractmethod + def is_valid(self): + """ + Returns true if the configured database is valid. + """ + + @abstractmethod + def has_users(self): + """ + Returns true if there are any users defined. + """ + + @abstractmethod + def create_superuser(self, username, password, email): + """ + Creates a new superuser with the given username, password and email. Returns the user's UUID. + """ + + @abstractmethod + def has_federated_login(self, username, service_name): + """ + Returns true if the matching user has a federated login under the matching service. + """ + + @abstractmethod + def attach_federated_login(self, username, service_name, federated_username): + """ + Attaches a federatated login to the matching user, under the given service. + """ diff --git a/config_app/config_endpoints/api/suconfig_models_pre_oci.py b/config_app/config_endpoints/api/suconfig_models_pre_oci.py new file mode 100644 index 000000000..fbc238078 --- /dev/null +++ b/config_app/config_endpoints/api/suconfig_models_pre_oci.py @@ -0,0 +1,37 @@ +from data import model +from data.database import User +from config_app.config_endpoints.api.suconfig_models_interface import SuperuserConfigDataInterface + + +class PreOCIModel(SuperuserConfigDataInterface): + # Note: this method is different than has_users: the user select will throw if the user + # table does not exist, whereas has_users assumes the table is valid + def is_valid(self): + try: + list(User.select().limit(1)) + return True + except: + return False + + def has_users(self): + return bool(list(User.select().limit(1))) + + def create_superuser(self, username, password, email): + return model.user.create_user(username, password, email, auto_verify=True).uuid + + def has_federated_login(self, username, service_name): + user = model.user.get_user(username) + if user is None: + return False + + return bool(model.user.lookup_federated_login(user, service_name)) + + def attach_federated_login(self, username, service_name, federated_username): + user = model.user.get_user(username) + if user is None: + return False + + model.user.attach_federated_login(user, service_name, federated_username) + + +pre_oci_model = PreOCIModel() diff --git a/config_app/config_endpoints/api/superuser.py b/config_app/config_endpoints/api/superuser.py new file mode 100644 index 000000000..7e5adccb5 --- /dev/null +++ b/config_app/config_endpoints/api/superuser.py @@ -0,0 +1,248 @@ +import logging +import pathvalidate +import os +import subprocess +from datetime import datetime + +from flask import request, jsonify, make_response + +from endpoints.exception import NotFound +from data.database import ServiceKeyApprovalType +from data.model import ServiceKeyDoesNotExist +from util.config.validator import EXTRA_CA_DIRECTORY + +from config_app.config_endpoints.exception import InvalidRequest +from config_app.config_endpoints.api import resource, ApiResource, nickname, log_action, validate_json_request +from config_app.config_endpoints.api.superuser_models_pre_oci import pre_oci_model +from config_app.config_util.ssl import load_certificate, CertInvalidException +from config_app.c_app import app, config_provider, INIT_SCRIPTS_LOCATION + + +logger = logging.getLogger(__name__) + + +@resource('/v1/superuser/customcerts/') +class SuperUserCustomCertificate(ApiResource): + """ Resource for managing a custom certificate. """ + + @nickname('uploadCustomCertificate') + def post(self, certpath): + uploaded_file = request.files['file'] + if not uploaded_file: + raise InvalidRequest('Missing certificate file') + + # Save the certificate. + certpath = pathvalidate.sanitize_filename(certpath) + if not certpath.endswith('.crt'): + raise InvalidRequest('Invalid certificate file: must have suffix `.crt`') + + logger.debug('Saving custom certificate %s', certpath) + cert_full_path = config_provider.get_volume_path(EXTRA_CA_DIRECTORY, certpath) + config_provider.save_volume_file(cert_full_path, uploaded_file) + logger.debug('Saved custom certificate %s', certpath) + + # Validate the certificate. + try: + logger.debug('Loading custom certificate %s', certpath) + with config_provider.get_volume_file(cert_full_path) as f: + load_certificate(f.read()) + except CertInvalidException: + logger.exception('Got certificate invalid error for cert %s', certpath) + return '', 204 + except IOError: + logger.exception('Got IO error for cert %s', certpath) + return '', 204 + + # Call the update script with config dir location to install the certificate immediately. + if not app.config['TESTING']: + cert_dir = os.path.join(config_provider.get_config_dir_path(), EXTRA_CA_DIRECTORY) + if subprocess.call([os.path.join(INIT_SCRIPTS_LOCATION, 'certs_install.sh')], env={ 'CERTDIR': cert_dir }) != 0: + raise Exception('Could not install certificates') + + return '', 204 + + @nickname('deleteCustomCertificate') + def delete(self, certpath): + cert_full_path = config_provider.get_volume_path(EXTRA_CA_DIRECTORY, certpath) + config_provider.remove_volume_file(cert_full_path) + return '', 204 + + +@resource('/v1/superuser/customcerts') +class SuperUserCustomCertificates(ApiResource): + """ Resource for managing custom certificates. """ + + @nickname('getCustomCertificates') + def get(self): + has_extra_certs_path = config_provider.volume_file_exists(EXTRA_CA_DIRECTORY) + extra_certs_found = config_provider.list_volume_directory(EXTRA_CA_DIRECTORY) + if extra_certs_found is None: + return { + 'status': 'file' if has_extra_certs_path else 'none', + } + + cert_views = [] + for extra_cert_path in extra_certs_found: + try: + cert_full_path = config_provider.get_volume_path(EXTRA_CA_DIRECTORY, extra_cert_path) + with config_provider.get_volume_file(cert_full_path) as f: + certificate = load_certificate(f.read()) + cert_views.append({ + 'path': extra_cert_path, + 'names': list(certificate.names), + 'expired': certificate.expired, + }) + except CertInvalidException as cie: + cert_views.append({ + 'path': extra_cert_path, + 'error': cie.message, + }) + except IOError as ioe: + cert_views.append({ + 'path': extra_cert_path, + 'error': ioe.message, + }) + + return { + 'status': 'directory', + 'certs': cert_views, + } + + +@resource('/v1/superuser/keys') +class SuperUserServiceKeyManagement(ApiResource): + """ Resource for managing service keys.""" + schemas = { + 'CreateServiceKey': { + 'id': 'CreateServiceKey', + 'type': 'object', + 'description': 'Description of creation of a service key', + 'required': ['service', 'expiration'], + 'properties': { + 'service': { + 'type': 'string', + 'description': 'The service authenticating with this key', + }, + 'name': { + 'type': 'string', + 'description': 'The friendly name of a service key', + }, + 'metadata': { + 'type': 'object', + 'description': 'The key/value pairs of this key\'s metadata', + }, + 'notes': { + 'type': 'string', + 'description': 'If specified, the extra notes for the key', + }, + 'expiration': { + 'description': 'The expiration date as a unix timestamp', + 'anyOf': [{'type': 'number'}, {'type': 'null'}], + }, + }, + }, + } + + @nickname('listServiceKeys') + def get(self): + keys = pre_oci_model.list_all_service_keys() + + return jsonify({ + 'keys': [key.to_dict() for key in keys], + }) + + @nickname('createServiceKey') + @validate_json_request('CreateServiceKey') + def post(self): + body = request.get_json() + + # Ensure we have a valid expiration date if specified. + expiration_date = body.get('expiration', None) + if expiration_date is not None: + try: + expiration_date = datetime.utcfromtimestamp(float(expiration_date)) + except ValueError as ve: + raise InvalidRequest('Invalid expiration date: %s' % ve) + + if expiration_date <= datetime.now(): + raise InvalidRequest('Expiration date cannot be in the past') + + # Create the metadata for the key. + metadata = body.get('metadata', {}) + metadata.update({ + 'created_by': 'Quay Superuser Panel', + 'ip': request.remote_addr, + }) + + # Generate a key with a private key that we *never save*. + (private_key, key_id) = pre_oci_model.generate_service_key(body['service'], expiration_date, + metadata=metadata, + name=body.get('name', '')) + # Auto-approve the service key. + pre_oci_model.approve_service_key(key_id, ServiceKeyApprovalType.SUPERUSER, + notes=body.get('notes', '')) + + # Log the creation and auto-approval of the service key. + key_log_metadata = { + 'kid': key_id, + 'preshared': True, + 'service': body['service'], + 'name': body.get('name', ''), + 'expiration_date': expiration_date, + 'auto_approved': True, + } + + log_action('service_key_create', None, key_log_metadata) + log_action('service_key_approve', None, key_log_metadata) + + return jsonify({ + 'kid': key_id, + 'name': body.get('name', ''), + 'service': body['service'], + 'public_key': private_key.publickey().exportKey('PEM'), + 'private_key': private_key.exportKey('PEM'), + }) + +@resource('/v1/superuser/approvedkeys/') +class SuperUserServiceKeyApproval(ApiResource): + """ Resource for approving service keys. """ + + schemas = { + 'ApproveServiceKey': { + 'id': 'ApproveServiceKey', + 'type': 'object', + 'description': 'Information for approving service keys', + 'properties': { + 'notes': { + 'type': 'string', + 'description': 'Optional approval notes', + }, + }, + }, + } + + @nickname('approveServiceKey') + @validate_json_request('ApproveServiceKey') + def post(self, kid): + notes = request.get_json().get('notes', '') + try: + key = pre_oci_model.approve_service_key(kid, ServiceKeyApprovalType.SUPERUSER, notes=notes) + + # Log the approval of the service key. + key_log_metadata = { + 'kid': kid, + 'service': key.service, + 'name': key.name, + 'expiration_date': key.expiration_date, + } + + # Note: this may not actually be the current person modifying the config, but if they're in the config tool, + # they have full access to the DB and could pretend to be any user, so pulling any superuser is likely fine + super_user = app.config.get('SUPER_USERS', [None])[0] + log_action('service_key_approve', super_user, key_log_metadata) + except ServiceKeyDoesNotExist: + raise NotFound() + except ServiceKeyAlreadyApproved: + pass + + return make_response('', 201) diff --git a/config_app/config_endpoints/api/superuser_models_interface.py b/config_app/config_endpoints/api/superuser_models_interface.py new file mode 100644 index 000000000..53efc9aec --- /dev/null +++ b/config_app/config_endpoints/api/superuser_models_interface.py @@ -0,0 +1,173 @@ +from abc import ABCMeta, abstractmethod +from collections import namedtuple +from six import add_metaclass + +from config_app.config_endpoints.api import format_date + + +def user_view(user): + return { + 'name': user.username, + 'kind': 'user', + 'is_robot': user.robot, + } + + +class RepositoryBuild(namedtuple('RepositoryBuild', + ['uuid', 'logs_archived', 'repository_namespace_user_username', + 'repository_name', + 'can_write', 'can_read', 'pull_robot', 'resource_key', 'trigger', + 'display_name', + 'started', 'job_config', 'phase', 'status', 'error', + 'archive_url'])): + """ + RepositoryBuild represents a build associated with a repostiory + :type uuid: string + :type logs_archived: boolean + :type repository_namespace_user_username: string + :type repository_name: string + :type can_write: boolean + :type can_write: boolean + :type pull_robot: User + :type resource_key: string + :type trigger: Trigger + :type display_name: string + :type started: boolean + :type job_config: {Any -> Any} + :type phase: string + :type status: string + :type error: string + :type archive_url: string + """ + + def to_dict(self): + + resp = { + 'id': self.uuid, + 'phase': self.phase, + 'started': format_date(self.started), + 'display_name': self.display_name, + 'status': self.status or {}, + 'subdirectory': self.job_config.get('build_subdir', ''), + 'dockerfile_path': self.job_config.get('build_subdir', ''), + 'context': self.job_config.get('context', ''), + 'tags': self.job_config.get('docker_tags', []), + 'manual_user': self.job_config.get('manual_user', None), + 'is_writer': self.can_write, + 'trigger': self.trigger.to_dict(), + 'trigger_metadata': self.job_config.get('trigger_metadata', None) if self.can_read else None, + 'resource_key': self.resource_key, + 'pull_robot': user_view(self.pull_robot) if self.pull_robot else None, + 'repository': { + 'namespace': self.repository_namespace_user_username, + 'name': self.repository_name + }, + 'error': self.error, + } + + if self.can_write: + if self.resource_key is not None: + resp['archive_url'] = self.archive_url + elif self.job_config.get('archive_url', None): + resp['archive_url'] = self.job_config['archive_url'] + + return resp + + +class Approval(namedtuple('Approval', ['approver', 'approval_type', 'approved_date', 'notes'])): + """ + Approval represents whether a key has been approved or not + :type approver: User + :type approval_type: string + :type approved_date: Date + :type notes: string + """ + + def to_dict(self): + return { + 'approver': self.approver.to_dict() if self.approver else None, + 'approval_type': self.approval_type, + 'approved_date': self.approved_date, + 'notes': self.notes, + } + + +class ServiceKey( + namedtuple('ServiceKey', ['name', 'kid', 'service', 'jwk', 'metadata', 'created_date', + 'expiration_date', 'rotation_duration', 'approval'])): + """ + ServiceKey is an apostille signing key + :type name: string + :type kid: int + :type service: string + :type jwk: string + :type metadata: string + :type created_date: Date + :type expiration_date: Date + :type rotation_duration: Date + :type approval: Approval + + """ + + def to_dict(self): + return { + 'name': self.name, + 'kid': self.kid, + 'service': self.service, + 'jwk': self.jwk, + 'metadata': self.metadata, + 'created_date': self.created_date, + 'expiration_date': self.expiration_date, + 'rotation_duration': self.rotation_duration, + 'approval': self.approval.to_dict() if self.approval is not None else None, + } + + +class User(namedtuple('User', ['username', 'email', 'verified', 'enabled', 'robot'])): + """ + User represents a single user. + :type username: string + :type email: string + :type verified: boolean + :type enabled: boolean + :type robot: User + """ + + def to_dict(self): + user_data = { + 'kind': 'user', + 'name': self.username, + 'username': self.username, + 'email': self.email, + 'verified': self.verified, + 'enabled': self.enabled, + } + + return user_data + + +class Organization(namedtuple('Organization', ['username', 'email'])): + """ + Organization represents a single org. + :type username: string + :type email: string + """ + + def to_dict(self): + return { + 'name': self.username, + 'email': self.email, + } + + +@add_metaclass(ABCMeta) +class SuperuserDataInterface(object): + """ + Interface that represents all data store interactions required by a superuser api. + """ + + @abstractmethod + def list_all_service_keys(self): + """ + Returns a list of service keys + """ diff --git a/config_app/config_endpoints/api/superuser_models_pre_oci.py b/config_app/config_endpoints/api/superuser_models_pre_oci.py new file mode 100644 index 000000000..c35b94243 --- /dev/null +++ b/config_app/config_endpoints/api/superuser_models_pre_oci.py @@ -0,0 +1,60 @@ +from data import model + +from config_app.config_endpoints.api.superuser_models_interface import (SuperuserDataInterface, User, ServiceKey, + Approval) + + +def _create_user(user): + if user is None: + return None + return User(user.username, user.email, user.verified, user.enabled, user.robot) + + +def _create_key(key): + approval = None + if key.approval is not None: + approval = Approval(_create_user(key.approval.approver), key.approval.approval_type, + key.approval.approved_date, + key.approval.notes) + + return ServiceKey(key.name, key.kid, key.service, key.jwk, key.metadata, key.created_date, + key.expiration_date, + key.rotation_duration, approval) + + +class ServiceKeyDoesNotExist(Exception): + pass + + +class ServiceKeyAlreadyApproved(Exception): + pass + + +class PreOCIModel(SuperuserDataInterface): + """ + PreOCIModel implements the data model for the SuperUser using a database schema + before it was changed to support the OCI specification. + """ + + def list_all_service_keys(self): + keys = model.service_keys.list_all_keys() + return [_create_key(key) for key in keys] + + def approve_service_key(self, kid, approval_type, notes=''): + try: + key = model.service_keys.approve_service_key(kid, approval_type, notes=notes) + return _create_key(key) + except model.ServiceKeyDoesNotExist: + raise ServiceKeyDoesNotExist + except model.ServiceKeyAlreadyApproved: + raise ServiceKeyAlreadyApproved + + def generate_service_key(self, service, expiration_date, kid=None, name='', metadata=None, + rotation_duration=None): + (private_key, key) = model.service_keys.generate_service_key(service, expiration_date, + metadata=metadata, name=name) + + return private_key, key.kid + + +pre_oci_model = PreOCIModel() diff --git a/config_app/config_endpoints/api/tar_config_loader.py b/config_app/config_endpoints/api/tar_config_loader.py new file mode 100644 index 000000000..8944d9092 --- /dev/null +++ b/config_app/config_endpoints/api/tar_config_loader.py @@ -0,0 +1,62 @@ +import os +import tempfile +import tarfile + +from contextlib import closing + +from flask import request, make_response, send_file + +from data.database import configure + +from config_app.c_app import app, config_provider +from config_app.config_endpoints.api import resource, ApiResource, nickname +from config_app.config_util.tar import tarinfo_filter_partial, strip_absolute_path_and_add_trailing_dir + + +@resource('/v1/configapp/initialization') +class ConfigInitialization(ApiResource): + """ + Resource for dealing with any initialization logic for the config app + """ + + @nickname('scStartNewConfig') + def post(self): + config_provider.new_config_dir() + return make_response('OK') + + +@resource('/v1/configapp/tarconfig') +class TarConfigLoader(ApiResource): + """ + Resource for dealing with configuration as a tarball, + including loading and generating functions + """ + + @nickname('scGetConfigTarball') + def get(self): + config_path = config_provider.get_config_dir_path() + tar_dir_prefix = strip_absolute_path_and_add_trailing_dir(config_path) + temp = tempfile.NamedTemporaryFile() + + with closing(tarfile.open(temp.name, mode="w|gz")) as tar: + for name in os.listdir(config_path): + tar.add(os.path.join(config_path, name), filter=tarinfo_filter_partial(tar_dir_prefix)) + return send_file(temp.name, mimetype='application/gzip') + + @nickname('scUploadTarballConfig') + def put(self): + """ Loads tarball config into the config provider """ + # Generate a new empty dir to load the config into + config_provider.new_config_dir() + input_stream = request.stream + with tarfile.open(mode="r|gz", fileobj=input_stream) as tar_stream: + tar_stream.extractall(config_provider.get_config_dir_path()) + + config_provider.create_copy_of_config_dir() + + # now try to connect to the db provided in their config to validate it works + combined = dict(**app.config) + combined.update(config_provider.get_config()) + configure(combined) + + return make_response('OK') diff --git a/config_app/config_endpoints/api/user.py b/config_app/config_endpoints/api/user.py new file mode 100644 index 000000000..85008c87e --- /dev/null +++ b/config_app/config_endpoints/api/user.py @@ -0,0 +1,14 @@ +from auth.auth_context import get_authenticated_user +from config_app.config_endpoints.api import resource, ApiResource, nickname +from config_app.config_endpoints.api.superuser_models_interface import user_view + + +@resource('/v1/user/') +class User(ApiResource): + """ Operations related to users. """ + + @nickname('getLoggedInUser') + def get(self): + """ Get user information for the authenticated user. """ + user = get_authenticated_user() + return user_view(user) diff --git a/config_app/config_endpoints/common.py b/config_app/config_endpoints/common.py new file mode 100644 index 000000000..c277f3b35 --- /dev/null +++ b/config_app/config_endpoints/common.py @@ -0,0 +1,73 @@ +import logging +import os +import re + +from flask import make_response, render_template +from flask_restful import reqparse + +from config import frontend_visible_config +from external_libraries import get_external_javascript, get_external_css + +from config_app.c_app import app, IS_KUBERNETES +from config_app._init_config import ROOT_DIR +from config_app.config_util.k8sconfig import get_k8s_namespace + + +def truthy_bool(param): + return param not in {False, 'false', 'False', '0', 'FALSE', '', 'null'} + + +DEFAULT_JS_BUNDLE_NAME = 'configapp' +PARAM_REGEX = re.compile(r'<([^:>]+:)*([\w]+)>') +logger = logging.getLogger(__name__) +TYPE_CONVERTER = { + truthy_bool: 'boolean', + str: 'string', + basestring: 'string', + reqparse.text_type: 'string', + int: 'integer', +} + + +def _list_files(path, extension, contains=""): + """ Returns a list of all the files with the given extension found under the given path. """ + + def matches(f): + return os.path.splitext(f)[1] == '.' + extension and contains in os.path.splitext(f)[0] + + def join_path(dp, f): + # Remove the static/ prefix. It is added in the template. + return os.path.join(dp, f)[len(ROOT_DIR) + 1 + len('config_app/static/'):] + + filepath = os.path.join(os.path.join(ROOT_DIR, 'config_app/static/'), path) + return [join_path(dp, f) for dp, _, files in os.walk(filepath) for f in files if matches(f)] + + +FONT_AWESOME_4 = 'netdna.bootstrapcdn.com/font-awesome/4.7.0/css/font-awesome.css' + + +def render_page_template(name, route_data=None, js_bundle_name=DEFAULT_JS_BUNDLE_NAME, **kwargs): + """ Renders the page template with the given name as the response and returns its contents. """ + main_scripts = _list_files('build', 'js', js_bundle_name) + + use_cdn = os.getenv('TESTING') == 'true' + + external_styles = get_external_css(local=not use_cdn, exclude=FONT_AWESOME_4) + external_scripts = get_external_javascript(local=not use_cdn) + + contents = render_template(name, + route_data=route_data, + main_scripts=main_scripts, + external_styles=external_styles, + external_scripts=external_scripts, + config_set=frontend_visible_config(app.config), + kubernetes_namespace=IS_KUBERNETES and get_k8s_namespace(), + **kwargs) + + resp = make_response(contents) + resp.headers['X-FRAME-OPTIONS'] = 'DENY' + return resp + + +def fully_qualified_name(method_view_class): + return '%s.%s' % (method_view_class.__module__, method_view_class.__name__) diff --git a/config_app/config_endpoints/exception.py b/config_app/config_endpoints/exception.py new file mode 100644 index 000000000..7f7f75a41 --- /dev/null +++ b/config_app/config_endpoints/exception.py @@ -0,0 +1,66 @@ +from enum import Enum + +from flask import url_for +from werkzeug.exceptions import HTTPException + + +class ApiErrorType(Enum): + invalid_request = 'invalid_request' + + +class ApiException(HTTPException): + """ + Represents an error in the application/problem+json format. + + See: https://tools.ietf.org/html/rfc7807 + + - "type" (string) - A URI reference that identifies the + problem type. + + - "title" (string) - A short, human-readable summary of the problem + type. It SHOULD NOT change from occurrence to occurrence of the + problem, except for purposes of localization + + - "status" (number) - The HTTP status code + + - "detail" (string) - A human-readable explanation specific to this + occurrence of the problem. + + - "instance" (string) - A URI reference that identifies the specific + occurrence of the problem. It may or may not yield further + information if dereferenced. + """ + + def __init__(self, error_type, status_code, error_description, payload=None): + Exception.__init__(self) + self.error_description = error_description + self.code = status_code + self.payload = payload + self.error_type = error_type + self.data = self.to_dict() + + super(ApiException, self).__init__(error_description, None) + + def to_dict(self): + rv = dict(self.payload or ()) + + if self.error_description is not None: + rv['detail'] = self.error_description + rv['error_message'] = self.error_description # TODO: deprecate + + rv['error_type'] = self.error_type.value # TODO: deprecate + rv['title'] = self.error_type.value + rv['type'] = url_for('api.error', error_type=self.error_type.value, _external=True) + rv['status'] = self.code + + return rv + + +class InvalidRequest(ApiException): + def __init__(self, error_description, payload=None): + ApiException.__init__(self, ApiErrorType.invalid_request, 400, error_description, payload) + + +class InvalidResponse(ApiException): + def __init__(self, error_description, payload=None): + ApiException.__init__(self, ApiErrorType.invalid_response, 400, error_description, payload) diff --git a/config_app/config_endpoints/setup_web.py b/config_app/config_endpoints/setup_web.py new file mode 100644 index 000000000..32dda15e2 --- /dev/null +++ b/config_app/config_endpoints/setup_web.py @@ -0,0 +1,23 @@ +from flask import Blueprint +from cachetools.func import lru_cache + +from config_app.config_endpoints.common import render_page_template +from config_app.config_endpoints.api.discovery import generate_route_data +from config_app.config_endpoints.api import no_cache + +setup_web = Blueprint('setup_web', __name__, template_folder='templates') + + +@lru_cache(maxsize=1) +def _get_route_data(): + return generate_route_data() + + +def render_page_template_with_routedata(name, *args, **kwargs): + return render_page_template(name, _get_route_data(), *args, **kwargs) + + +@no_cache +@setup_web.route('/', methods=['GET'], defaults={'path': ''}) +def index(path, **kwargs): + return render_page_template_with_routedata('index.html', js_bundle_name='configapp', **kwargs) diff --git a/config_app/config_test/__init__.py b/config_app/config_test/__init__.py new file mode 100644 index 000000000..bb3463e2d --- /dev/null +++ b/config_app/config_test/__init__.py @@ -0,0 +1,149 @@ +import json as py_json +import unittest +from contextlib import contextmanager +from urllib import urlencode +from urlparse import urlparse, parse_qs, urlunparse + +from config_app.c_app import app, config_provider +from config_app.config_endpoints.api import api +from initdb import setup_database_for_testing, finished_database_for_testing + + +CSRF_TOKEN_KEY = '_csrf_token' +CSRF_TOKEN = '123csrfforme' + +READ_ACCESS_USER = 'reader' +ADMIN_ACCESS_USER = 'devtable' +ADMIN_ACCESS_EMAIL = 'jschorr@devtable.com' + +# OVERRIDES FROM PORTING FROM OLD APP: +all_queues = [] # the config app doesn't have any queues + +class ApiTestCase(unittest.TestCase): + maxDiff = None + + @staticmethod + def _add_csrf(without_csrf): + parts = urlparse(without_csrf) + query = parse_qs(parts[4]) + query[CSRF_TOKEN_KEY] = CSRF_TOKEN + return urlunparse(list(parts[0:4]) + [urlencode(query)] + list(parts[5:])) + + def url_for(self, resource_name, params=None, skip_csrf=False): + params = params or {} + url = api.url_for(resource_name, **params) + if not skip_csrf: + url = ApiTestCase._add_csrf(url) + return url + + def setUp(self): + setup_database_for_testing(self) + self.app = app.test_client() + self.ctx = app.test_request_context() + self.ctx.__enter__() + self.setCsrfToken(CSRF_TOKEN) + + def tearDown(self): + finished_database_for_testing(self) + config_provider.clear() + self.ctx.__exit__(True, None, None) + + def setCsrfToken(self, token): + with self.app.session_transaction() as sess: + sess[CSRF_TOKEN_KEY] = token + + @contextmanager + def toggleFeature(self, name, enabled): + import features + previous_value = getattr(features, name) + setattr(features, name, enabled) + yield + setattr(features, name, previous_value) + + def getJsonResponse(self, resource_name, params={}, expected_code=200): + rv = self.app.get(api.url_for(resource_name, **params)) + self.assertEquals(expected_code, rv.status_code) + data = rv.data + parsed = py_json.loads(data) + return parsed + + def postResponse(self, resource_name, params={}, data={}, file=None, headers=None, + expected_code=200): + data = py_json.dumps(data) + + headers = headers or {} + headers.update({"Content-Type": "application/json"}) + + if file is not None: + data = {'file': file} + headers = None + + rv = self.app.post(self.url_for(resource_name, params), data=data, headers=headers) + self.assertEquals(rv.status_code, expected_code) + return rv.data + + def getResponse(self, resource_name, params={}, expected_code=200): + rv = self.app.get(api.url_for(resource_name, **params)) + self.assertEquals(rv.status_code, expected_code) + return rv.data + + def putResponse(self, resource_name, params={}, data={}, expected_code=200): + rv = self.app.put( + self.url_for(resource_name, params), data=py_json.dumps(data), + headers={"Content-Type": "application/json"}) + self.assertEquals(rv.status_code, expected_code) + return rv.data + + def deleteResponse(self, resource_name, params={}, expected_code=204): + rv = self.app.delete(self.url_for(resource_name, params)) + + if rv.status_code != expected_code: + print 'Mismatch data for resource DELETE %s: %s' % (resource_name, rv.data) + + self.assertEquals(rv.status_code, expected_code) + return rv.data + + def deleteEmptyResponse(self, resource_name, params={}, expected_code=204): + rv = self.app.delete(self.url_for(resource_name, params)) + self.assertEquals(rv.status_code, expected_code) + self.assertEquals(rv.data, '') # ensure response body empty + return + + def postJsonResponse(self, resource_name, params={}, data={}, expected_code=200): + rv = self.app.post( + self.url_for(resource_name, params), data=py_json.dumps(data), + headers={"Content-Type": "application/json"}) + + if rv.status_code != expected_code: + print 'Mismatch data for resource POST %s: %s' % (resource_name, rv.data) + + self.assertEquals(rv.status_code, expected_code) + data = rv.data + parsed = py_json.loads(data) + return parsed + + def putJsonResponse(self, resource_name, params={}, data={}, expected_code=200, skip_csrf=False): + rv = self.app.put( + self.url_for(resource_name, params, skip_csrf), data=py_json.dumps(data), + headers={"Content-Type": "application/json"}) + + if rv.status_code != expected_code: + print 'Mismatch data for resource PUT %s: %s' % (resource_name, rv.data) + + self.assertEquals(rv.status_code, expected_code) + data = rv.data + parsed = py_json.loads(data) + return parsed + + def assertNotInTeam(self, data, membername): + for memberData in data['members']: + if memberData['name'] == membername: + self.fail(membername + ' found in team: ' + data['name']) + + def assertInTeam(self, data, membername): + for member_data in data['members']: + if member_data['name'] == membername: + return + + self.fail(membername + ' not found in team: ' + data['name']) + diff --git a/config_app/config_test/test_api_usage.py b/config_app/config_test/test_api_usage.py new file mode 100644 index 000000000..aa34b3495 --- /dev/null +++ b/config_app/config_test/test_api_usage.py @@ -0,0 +1,208 @@ +from StringIO import StringIO +from mockldap import MockLdap + +from data import database, model +from util.security.test.test_ssl_util import generate_test_cert + +from config_app.c_app import app +from config_app.config_test import ApiTestCase, all_queues, ADMIN_ACCESS_USER, ADMIN_ACCESS_EMAIL +from config_app.config_endpoints.api import api_bp +from config_app.config_endpoints.api.superuser import SuperUserCustomCertificate, SuperUserCustomCertificates +from config_app.config_endpoints.api.suconfig import SuperUserConfig, SuperUserCreateInitialSuperUser, \ + SuperUserConfigFile, SuperUserRegistryStatus + +try: + app.register_blueprint(api_bp, url_prefix='/api') +except ValueError: + # This blueprint was already registered + pass + + +class TestSuperUserCreateInitialSuperUser(ApiTestCase): + def test_create_superuser(self): + data = { + 'username': 'newsuper', + 'password': 'password', + 'email': 'jschorr+fake@devtable.com', + } + + # Add some fake config. + fake_config = { + 'AUTHENTICATION_TYPE': 'Database', + 'SECRET_KEY': 'fakekey', + } + + self.putJsonResponse(SuperUserConfig, data=dict(config=fake_config, hostname='fakehost')) + + # Try to write with config. Should 403 since there are users in the DB. + self.postResponse(SuperUserCreateInitialSuperUser, data=data, expected_code=403) + + # Delete all users in the DB. + for user in list(database.User.select()): + model.user.delete_user(user, all_queues) + + # Create the superuser. + self.postJsonResponse(SuperUserCreateInitialSuperUser, data=data) + + # Ensure the user exists in the DB. + self.assertIsNotNone(model.user.get_user('newsuper')) + + # Ensure that the current user is a superuser in the config. + json = self.getJsonResponse(SuperUserConfig) + self.assertEquals(['newsuper'], json['config']['SUPER_USERS']) + + # Ensure that the current user is a superuser in memory by trying to call an API + # that will fail otherwise. + self.getResponse(SuperUserConfigFile, params=dict(filename='ssl.cert')) + + +class TestSuperUserConfig(ApiTestCase): + def test_get_status_update_config(self): + # With no config the status should be 'config-db'. + json = self.getJsonResponse(SuperUserRegistryStatus) + self.assertEquals('config-db', json['status']) + + # Add some fake config. + fake_config = { + 'AUTHENTICATION_TYPE': 'Database', + 'SECRET_KEY': 'fakekey', + } + + json = self.putJsonResponse(SuperUserConfig, data=dict(config=fake_config, + hostname='fakehost')) + self.assertEquals('fakekey', json['config']['SECRET_KEY']) + self.assertEquals('fakehost', json['config']['SERVER_HOSTNAME']) + self.assertEquals('Database', json['config']['AUTHENTICATION_TYPE']) + + # With config the status should be 'setup-db'. + # TODO: fix this test + # json = self.getJsonResponse(SuperUserRegistryStatus) + # self.assertEquals('setup-db', json['status']) + + def test_config_file(self): + # Try for an invalid file. Should 404. + self.getResponse(SuperUserConfigFile, params=dict(filename='foobar'), expected_code=404) + + # Try for a valid filename. Should not exist. + json = self.getJsonResponse(SuperUserConfigFile, params=dict(filename='ssl.cert')) + self.assertFalse(json['exists']) + + # Add the file. + self.postResponse(SuperUserConfigFile, params=dict(filename='ssl.cert'), + file=(StringIO('my file contents'), 'ssl.cert')) + + # Should now exist. + json = self.getJsonResponse(SuperUserConfigFile, params=dict(filename='ssl.cert')) + self.assertTrue(json['exists']) + + def test_update_with_external_auth(self): + # Run a mock LDAP. + mockldap = MockLdap({ + 'dc=quay,dc=io': { + 'dc': ['quay', 'io'] + }, + 'ou=employees,dc=quay,dc=io': { + 'dc': ['quay', 'io'], + 'ou': 'employees' + }, + 'uid=' + ADMIN_ACCESS_USER + ',ou=employees,dc=quay,dc=io': { + 'dc': ['quay', 'io'], + 'ou': 'employees', + 'uid': [ADMIN_ACCESS_USER], + 'userPassword': ['password'], + 'mail': [ADMIN_ACCESS_EMAIL], + }, + }) + + config = { + 'AUTHENTICATION_TYPE': 'LDAP', + 'LDAP_BASE_DN': ['dc=quay', 'dc=io'], + 'LDAP_ADMIN_DN': 'uid=devtable,ou=employees,dc=quay,dc=io', + 'LDAP_ADMIN_PASSWD': 'password', + 'LDAP_USER_RDN': ['ou=employees'], + 'LDAP_UID_ATTR': 'uid', + 'LDAP_EMAIL_ATTR': 'mail', + } + + mockldap.start() + try: + # Write the config with the valid password. + self.putResponse(SuperUserConfig, + data={'config': config, + 'password': 'password', + 'hostname': 'foo'}, expected_code=200) + + # Ensure that the user row has been linked. + # TODO: fix this test + # self.assertEquals(ADMIN_ACCESS_USER, + # model.user.verify_federated_login('ldap', ADMIN_ACCESS_USER).username) + finally: + mockldap.stop() + +class TestSuperUserCustomCertificates(ApiTestCase): + def test_custom_certificates(self): + + # Upload a certificate. + cert_contents, _ = generate_test_cert(hostname='somecoolhost', san_list=['DNS:bar', 'DNS:baz']) + self.postResponse(SuperUserCustomCertificate, params=dict(certpath='testcert.crt'), + file=(StringIO(cert_contents), 'testcert.crt'), expected_code=204) + + # Make sure it is present. + json = self.getJsonResponse(SuperUserCustomCertificates) + self.assertEquals(1, len(json['certs'])) + + cert_info = json['certs'][0] + self.assertEquals('testcert.crt', cert_info['path']) + + self.assertEquals(set(['somecoolhost', 'bar', 'baz']), set(cert_info['names'])) + self.assertFalse(cert_info['expired']) + + # Remove the certificate. + self.deleteResponse(SuperUserCustomCertificate, params=dict(certpath='testcert.crt')) + + # Make sure it is gone. + json = self.getJsonResponse(SuperUserCustomCertificates) + self.assertEquals(0, len(json['certs'])) + + def test_expired_custom_certificate(self): + # Upload a certificate. + cert_contents, _ = generate_test_cert(hostname='somecoolhost', expires=-10) + self.postResponse(SuperUserCustomCertificate, params=dict(certpath='testcert.crt'), + file=(StringIO(cert_contents), 'testcert.crt'), expected_code=204) + + # Make sure it is present. + json = self.getJsonResponse(SuperUserCustomCertificates) + self.assertEquals(1, len(json['certs'])) + + cert_info = json['certs'][0] + self.assertEquals('testcert.crt', cert_info['path']) + + self.assertEquals(set(['somecoolhost']), set(cert_info['names'])) + self.assertTrue(cert_info['expired']) + + def test_invalid_custom_certificate(self): + # Upload an invalid certificate. + self.postResponse(SuperUserCustomCertificate, params=dict(certpath='testcert.crt'), + file=(StringIO('some contents'), 'testcert.crt'), expected_code=204) + + # Make sure it is present but invalid. + json = self.getJsonResponse(SuperUserCustomCertificates) + self.assertEquals(1, len(json['certs'])) + + cert_info = json['certs'][0] + self.assertEquals('testcert.crt', cert_info['path']) + self.assertEquals('no start line', cert_info['error']) + + def test_path_sanitization(self): + # Upload a certificate. + cert_contents, _ = generate_test_cert(hostname='somecoolhost', expires=-10) + self.postResponse(SuperUserCustomCertificate, params=dict(certpath='testcert/../foobar.crt'), + file=(StringIO(cert_contents), 'testcert/../foobar.crt'), expected_code=204) + + # Make sure it is present. + json = self.getJsonResponse(SuperUserCustomCertificates) + self.assertEquals(1, len(json['certs'])) + + cert_info = json['certs'][0] + self.assertEquals('foobar.crt', cert_info['path']) + diff --git a/test/test_suconfig_api.py b/config_app/config_test/test_suconfig_api.py similarity index 58% rename from test/test_suconfig_api.py rename to config_app/config_test/test_suconfig_api.py index 6cbdcafa8..408b96a8b 100644 --- a/test/test_suconfig_api.py +++ b/config_app/config_test/test_suconfig_api.py @@ -1,14 +1,25 @@ -from test.test_api_usage import ApiTestCase, READ_ACCESS_USER, ADMIN_ACCESS_USER -from endpoints.api.suconfig import (SuperUserRegistryStatus, SuperUserConfig, SuperUserConfigFile, - SuperUserCreateInitialSuperUser, SuperUserConfigValidate) -from app import config_provider -from data.database import User - import unittest +import mock +from data.database import User +from data import model -class ConfigForTesting(object): +from config_app.config_endpoints.api.suconfig import SuperUserConfig, SuperUserConfigValidate, SuperUserConfigFile, \ + SuperUserRegistryStatus, SuperUserCreateInitialSuperUser +from config_app.config_endpoints.api import api_bp +from config_app.config_test import ApiTestCase, READ_ACCESS_USER, ADMIN_ACCESS_USER +from config_app.c_app import app, config_provider +try: + app.register_blueprint(api_bp, url_prefix='/api') +except ValueError: + # This blueprint was already registered + pass + +# OVERRIDES FROM PORTING FROM OLD APP: +all_queues = [] # the config app doesn't have any queues + +class FreshConfigProvider(object): def __enter__(self): config_provider.reset_for_test() return config_provider @@ -18,68 +29,74 @@ class ConfigForTesting(object): class TestSuperUserRegistryStatus(ApiTestCase): - def test_registry_status(self): - with ConfigForTesting(): + def test_registry_status_no_config(self): + with FreshConfigProvider(): json = self.getJsonResponse(SuperUserRegistryStatus) self.assertEquals('config-db', json['status']) + @mock.patch("config_app.config_endpoints.api.suconfig.database_is_valid", mock.Mock(return_value=False)) + def test_registry_status_no_database(self): + with FreshConfigProvider(): + config_provider.save_config({'key': 'value'}) + json = self.getJsonResponse(SuperUserRegistryStatus) + self.assertEquals('setup-db', json['status']) + + @mock.patch("config_app.config_endpoints.api.suconfig.database_is_valid", mock.Mock(return_value=True)) + def test_registry_status_db_has_superuser(self): + with FreshConfigProvider(): + config_provider.save_config({'key': 'value'}) + json = self.getJsonResponse(SuperUserRegistryStatus) + self.assertEquals('config', json['status']) + + @mock.patch("config_app.config_endpoints.api.suconfig.database_is_valid", mock.Mock(return_value=True)) + @mock.patch("config_app.config_endpoints.api.suconfig.database_has_users", mock.Mock(return_value=False)) + def test_registry_status_db_no_superuser(self): + with FreshConfigProvider(): + config_provider.save_config({'key': 'value'}) + json = self.getJsonResponse(SuperUserRegistryStatus) + self.assertEquals('create-superuser', json['status']) + + @mock.patch("config_app.config_endpoints.api.suconfig.database_is_valid", mock.Mock(return_value=True)) + @mock.patch("config_app.config_endpoints.api.suconfig.database_has_users", mock.Mock(return_value=True)) + def test_registry_status_setup_complete(self): + with FreshConfigProvider(): + config_provider.save_config({'key': 'value', 'SETUP_COMPLETE': True}) + json = self.getJsonResponse(SuperUserRegistryStatus) + self.assertEquals('config', json['status']) class TestSuperUserConfigFile(ApiTestCase): - def test_get_non_superuser(self): - with ConfigForTesting(): - # No user. - self.getResponse(SuperUserConfigFile, params=dict(filename='ssl.cert'), expected_code=403) - - # Non-superuser. - self.login(READ_ACCESS_USER) - self.getResponse(SuperUserConfigFile, params=dict(filename='ssl.cert'), expected_code=403) - def test_get_superuser_invalid_filename(self): - with ConfigForTesting(): - self.login(ADMIN_ACCESS_USER) + with FreshConfigProvider(): self.getResponse(SuperUserConfigFile, params=dict(filename='somefile'), expected_code=404) def test_get_superuser(self): - with ConfigForTesting(): - self.login(ADMIN_ACCESS_USER) + with FreshConfigProvider(): result = self.getJsonResponse(SuperUserConfigFile, params=dict(filename='ssl.cert')) self.assertFalse(result['exists']) - def test_post_non_superuser(self): - with ConfigForTesting(): - # No user, before config.yaml exists. + def test_post_no_file(self): + with FreshConfigProvider(): + # No file self.postResponse(SuperUserConfigFile, params=dict(filename='ssl.cert'), expected_code=400) - # Write some config. - self.putJsonResponse(SuperUserConfig, data=dict(config={}, hostname='foobar')) - - # No user, with config.yaml. - self.postResponse(SuperUserConfigFile, params=dict(filename='ssl.cert'), expected_code=403) - - # Non-superuser. - self.login(READ_ACCESS_USER) - self.postResponse(SuperUserConfigFile, params=dict(filename='ssl.cert'), expected_code=403) - def test_post_superuser_invalid_filename(self): - with ConfigForTesting(): - self.login(ADMIN_ACCESS_USER) + with FreshConfigProvider(): self.postResponse(SuperUserConfigFile, params=dict(filename='somefile'), expected_code=404) def test_post_superuser(self): - with ConfigForTesting(): - self.login(ADMIN_ACCESS_USER) + with FreshConfigProvider(): self.postResponse(SuperUserConfigFile, params=dict(filename='ssl.cert'), expected_code=400) class TestSuperUserCreateInitialSuperUser(ApiTestCase): def test_no_config_file(self): - with ConfigForTesting(): + with FreshConfigProvider(): # If there is no config.yaml, then this method should security fail. data = dict(username='cooluser', password='password', email='fake@example.com') self.postResponse(SuperUserCreateInitialSuperUser, data=data, expected_code=403) def test_config_file_with_db_users(self): - with ConfigForTesting(): + with FreshConfigProvider(): # Write some config. self.putJsonResponse(SuperUserConfig, data=dict(config={}, hostname='foobar')) @@ -89,13 +106,13 @@ class TestSuperUserCreateInitialSuperUser(ApiTestCase): self.postResponse(SuperUserCreateInitialSuperUser, data=data, expected_code=403) def test_config_file_with_no_db_users(self): - with ConfigForTesting(): + with FreshConfigProvider(): # Write some config. self.putJsonResponse(SuperUserConfig, data=dict(config={}, hostname='foobar')) # Delete all the users in the DB. for user in list(User.select()): - user.delete_instance(recursive=True) + model.user.delete_user(user, all_queues) # This method should now succeed. data = dict(username='cooluser', password='password', email='fake@example.com') @@ -112,8 +129,7 @@ class TestSuperUserCreateInitialSuperUser(ApiTestCase): class TestSuperUserConfigValidate(ApiTestCase): def test_nonsuperuser_noconfig(self): - with ConfigForTesting(): - self.login(ADMIN_ACCESS_USER) + with FreshConfigProvider(): result = self.postJsonResponse(SuperUserConfigValidate, params=dict(service='someservice'), data=dict(config={})) @@ -121,18 +137,13 @@ class TestSuperUserConfigValidate(ApiTestCase): def test_nonsuperuser_config(self): - with ConfigForTesting(): + with FreshConfigProvider(): # The validate config call works if there is no config.yaml OR the user is a superuser. # Add a config, and verify it breaks when unauthenticated. json = self.putJsonResponse(SuperUserConfig, data=dict(config={}, hostname='foobar')) self.assertTrue(json['exists']) - self.postResponse(SuperUserConfigValidate, params=dict(service='someservice'), - data=dict(config={}), - expected_code=403) - # Now login as a superuser. - self.login(ADMIN_ACCESS_USER) result = self.postJsonResponse(SuperUserConfigValidate, params=dict(service='someservice'), data=dict(config={})) @@ -140,18 +151,8 @@ class TestSuperUserConfigValidate(ApiTestCase): class TestSuperUserConfig(ApiTestCase): - def test_get_non_superuser(self): - with ConfigForTesting(): - # No user. - self.getResponse(SuperUserConfig, expected_code=401) - - # Non-superuser. - self.login(READ_ACCESS_USER) - self.getResponse(SuperUserConfig, expected_code=403) - def test_get_superuser(self): - with ConfigForTesting(): - self.login(ADMIN_ACCESS_USER) + with FreshConfigProvider(): json = self.getJsonResponse(SuperUserConfig) # Note: We expect the config to be none because a config.yaml should never be checked into @@ -159,27 +160,13 @@ class TestSuperUserConfig(ApiTestCase): self.assertIsNone(json['config']) def test_put(self): - with ConfigForTesting() as config: - # The update config call works if there is no config.yaml OR the user is a superuser. First - # try writing it without a superuser present. + with FreshConfigProvider() as config: json = self.putJsonResponse(SuperUserConfig, data=dict(config={}, hostname='foobar')) self.assertTrue(json['exists']) # Verify the config file exists. self.assertTrue(config.config_exists()) - # Try writing it again. This should now fail, since the config.yaml exists. - self.putResponse(SuperUserConfig, data=dict(config={}, hostname='barbaz'), expected_code=403) - - # Login as a non-superuser. - self.login(READ_ACCESS_USER) - - # Try writing it again. This should fail. - self.putResponse(SuperUserConfig, data=dict(config={}, hostname='barbaz'), expected_code=403) - - # Login as a superuser. - self.login(ADMIN_ACCESS_USER) - # This should succeed. json = self.putJsonResponse(SuperUserConfig, data=dict(config={}, hostname='barbaz')) self.assertTrue(json['exists']) @@ -189,4 +176,4 @@ class TestSuperUserConfig(ApiTestCase): if __name__ == '__main__': - unittest.main() \ No newline at end of file + unittest.main() diff --git a/config_app/config_util/__init__.py b/config_app/config_util/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/config_app/config_util/config/TransientDirectoryProvider.py b/config_app/config_util/config/TransientDirectoryProvider.py new file mode 100644 index 000000000..5ac685592 --- /dev/null +++ b/config_app/config_util/config/TransientDirectoryProvider.py @@ -0,0 +1,62 @@ +import os + +from shutil import copytree +from backports.tempfile import TemporaryDirectory + +from config_app.config_util.config.fileprovider import FileConfigProvider + +OLD_CONFIG_SUBDIR = 'old/' + +class TransientDirectoryProvider(FileConfigProvider): + """ Implementation of the config provider that reads and writes the data + from/to the file system, only using temporary directories, + deleting old dirs and creating new ones as requested. + """ + + def __init__(self, config_volume, yaml_filename, py_filename): + # Create a temp directory that will be cleaned up when we change the config path + # This should ensure we have no "pollution" of different configs: + # no uploaded config should ever affect subsequent config modifications/creations + temp_dir = TemporaryDirectory() + self.temp_dir = temp_dir + self.old_config_dir = None + super(TransientDirectoryProvider, self).__init__(temp_dir.name, yaml_filename, py_filename) + + @property + def provider_id(self): + return 'transient' + + def new_config_dir(self): + """ + Update the path with a new temporary directory, deleting the old one in the process + """ + self.temp_dir.cleanup() + temp_dir = TemporaryDirectory() + + self.config_volume = temp_dir.name + self.temp_dir = temp_dir + self.yaml_path = os.path.join(temp_dir.name, self.yaml_filename) + + def create_copy_of_config_dir(self): + """ + Create a directory to store loaded/populated configuration (for rollback if necessary) + """ + if self.old_config_dir is not None: + self.old_config_dir.cleanup() + + temp_dir = TemporaryDirectory() + self.old_config_dir = temp_dir + + # Python 2.7's shutil.copy() doesn't allow for copying to existing directories, + # so when copying/reading to the old saved config, we have to talk to a subdirectory, + # and use the shutil.copytree() function + copytree(self.config_volume, os.path.join(temp_dir.name, OLD_CONFIG_SUBDIR)) + + def get_config_dir_path(self): + return self.config_volume + + def get_old_config_dir(self): + if self.old_config_dir is None: + raise Exception('Cannot return a configuration that was no old configuration') + + return os.path.join(self.old_config_dir.name, OLD_CONFIG_SUBDIR) diff --git a/config_app/config_util/config/__init__.py b/config_app/config_util/config/__init__.py new file mode 100644 index 000000000..d39d0ea1c --- /dev/null +++ b/config_app/config_util/config/__init__.py @@ -0,0 +1,39 @@ +import base64 +import os + +from config_app.config_util.config.fileprovider import FileConfigProvider +from config_app.config_util.config.testprovider import TestConfigProvider +from config_app.config_util.config.TransientDirectoryProvider import TransientDirectoryProvider +from util.config.validator import EXTRA_CA_DIRECTORY, EXTRA_CA_DIRECTORY_PREFIX + + +def get_config_provider(config_volume, yaml_filename, py_filename, testing=False): + """ Loads and returns the config provider for the current environment. """ + + if testing: + return TestConfigProvider() + + return TransientDirectoryProvider(config_volume, yaml_filename, py_filename) + + +def get_config_as_kube_secret(config_path): + data = {} + + # Kubernetes secrets don't have sub-directories, so for the extra_ca_certs dir + # we have to put the extra certs in with a prefix, and then one of our init scripts + # (02_get_kube_certs.sh) will expand the prefixed certs into the equivalent directory + # so that they'll be installed correctly on startup by the certs_install script + certs_dir = os.path.join(config_path, EXTRA_CA_DIRECTORY) + if os.path.exists(certs_dir): + for extra_cert in os.listdir(certs_dir): + with open(os.path.join(certs_dir, extra_cert)) as f: + data[EXTRA_CA_DIRECTORY_PREFIX + extra_cert] = base64.b64encode(f.read()) + + + for name in os.listdir(config_path): + file_path = os.path.join(config_path, name) + if not os.path.isdir(file_path): + with open(file_path) as f: + data[name] = base64.b64encode(f.read()) + + return data diff --git a/config_app/config_util/config/basefileprovider.py b/config_app/config_util/config/basefileprovider.py new file mode 100644 index 000000000..caf231321 --- /dev/null +++ b/config_app/config_util/config/basefileprovider.py @@ -0,0 +1,72 @@ +import os +import logging + +from config_app.config_util.config.baseprovider import (BaseProvider, import_yaml, export_yaml, + CannotWriteConfigException) + +logger = logging.getLogger(__name__) + + +class BaseFileProvider(BaseProvider): + """ Base implementation of the config provider that reads the data from the file system. """ + + def __init__(self, config_volume, yaml_filename, py_filename): + self.config_volume = config_volume + self.yaml_filename = yaml_filename + self.py_filename = py_filename + + self.yaml_path = os.path.join(config_volume, yaml_filename) + self.py_path = os.path.join(config_volume, py_filename) + + def update_app_config(self, app_config): + if os.path.exists(self.py_path): + logger.debug('Applying config file: %s', self.py_path) + app_config.from_pyfile(self.py_path) + + if os.path.exists(self.yaml_path): + logger.debug('Applying config file: %s', self.yaml_path) + import_yaml(app_config, self.yaml_path) + + def get_config(self): + if not self.config_exists(): + return None + + config_obj = {} + import_yaml(config_obj, self.yaml_path) + return config_obj + + def config_exists(self): + return self.volume_file_exists(self.yaml_filename) + + def volume_exists(self): + return os.path.exists(self.config_volume) + + def volume_file_exists(self, filename): + return os.path.exists(os.path.join(self.config_volume, filename)) + + def get_volume_file(self, filename, mode='r'): + return open(os.path.join(self.config_volume, filename), mode=mode) + + def get_volume_path(self, directory, filename): + return os.path.join(directory, filename) + + def list_volume_directory(self, path): + dirpath = os.path.join(self.config_volume, path) + if not os.path.exists(dirpath): + return None + + if not os.path.isdir(dirpath): + return None + + return os.listdir(dirpath) + + def requires_restart(self, app_config): + file_config = self.get_config() + if not file_config: + return False + + for key in file_config: + if app_config.get(key) != file_config[key]: + return True + + return False diff --git a/config_app/config_util/config/baseprovider.py b/config_app/config_util/config/baseprovider.py new file mode 100644 index 000000000..17ae7e86b --- /dev/null +++ b/config_app/config_util/config/baseprovider.py @@ -0,0 +1,128 @@ +import logging +import yaml + +from abc import ABCMeta, abstractmethod +from six import add_metaclass + +from jsonschema import validate, ValidationError + +from util.config.schema import CONFIG_SCHEMA + +logger = logging.getLogger(__name__) + + +class CannotWriteConfigException(Exception): + """ Exception raised when the config cannot be written. """ + pass + + +class SetupIncompleteException(Exception): + """ Exception raised when attempting to verify config that has not yet been setup. """ + pass + + +def import_yaml(config_obj, config_file): + with open(config_file) as f: + c = yaml.safe_load(f) + if not c: + logger.debug('Empty YAML config file') + return + + if isinstance(c, str): + raise Exception('Invalid YAML config file: ' + str(c)) + + for key in c.iterkeys(): + if key.isupper(): + config_obj[key] = c[key] + + if config_obj.get('SETUP_COMPLETE', False): + try: + validate(config_obj, CONFIG_SCHEMA) + except ValidationError: + # TODO: Change this into a real error + logger.exception('Could not validate config schema') + else: + logger.debug('Skipping config schema validation because setup is not complete') + + return config_obj + + +def get_yaml(config_obj): + return yaml.safe_dump(config_obj, encoding='utf-8', allow_unicode=True) + + +def export_yaml(config_obj, config_file): + try: + with open(config_file, 'w') as f: + f.write(get_yaml(config_obj)) + except IOError as ioe: + raise CannotWriteConfigException(str(ioe)) + + +@add_metaclass(ABCMeta) +class BaseProvider(object): + """ A configuration provider helps to load, save, and handle config override in the application. + """ + + @property + def provider_id(self): + raise NotImplementedError + + @abstractmethod + def update_app_config(self, app_config): + """ Updates the given application config object with the loaded override config. """ + + @abstractmethod + def get_config(self): + """ Returns the contents of the config override file, or None if none. """ + + @abstractmethod + def save_config(self, config_object): + """ Updates the contents of the config override file to those given. """ + + @abstractmethod + def config_exists(self): + """ Returns true if a config override file exists in the config volume. """ + + @abstractmethod + def volume_exists(self): + """ Returns whether the config override volume exists. """ + + @abstractmethod + def volume_file_exists(self, filename): + """ Returns whether the file with the given name exists under the config override volume. """ + + @abstractmethod + def get_volume_file(self, filename, mode='r'): + """ Returns a Python file referring to the given name under the config override volume. """ + + @abstractmethod + def write_volume_file(self, filename, contents): + """ Writes the given contents to the config override volumne, with the given filename. """ + + @abstractmethod + def remove_volume_file(self, filename): + """ Removes the config override volume file with the given filename. """ + + @abstractmethod + def list_volume_directory(self, path): + """ Returns a list of strings representing the names of the files found in the config override + directory under the given path. If the path doesn't exist, returns None. + """ + + @abstractmethod + def save_volume_file(self, filename, flask_file): + """ Saves the given flask file to the config override volume, with the given + filename. + """ + + @abstractmethod + def requires_restart(self, app_config): + """ If true, the configuration loaded into memory for the app does not match that on disk, + indicating that this container requires a restart. + """ + + @abstractmethod + def get_volume_path(self, directory, filename): + """ Helper for constructing file paths, which may differ between providers. For example, + kubernetes can't have subfolders in configmaps """ diff --git a/config_app/config_util/config/fileprovider.py b/config_app/config_util/config/fileprovider.py new file mode 100644 index 000000000..74531e581 --- /dev/null +++ b/config_app/config_util/config/fileprovider.py @@ -0,0 +1,60 @@ +import os +import logging + +from config_app.config_util.config.baseprovider import export_yaml, CannotWriteConfigException +from config_app.config_util.config.basefileprovider import BaseFileProvider + +logger = logging.getLogger(__name__) + + +def _ensure_parent_dir(filepath): + """ Ensures that the parent directory of the given file path exists. """ + try: + parentpath = os.path.abspath(os.path.join(filepath, os.pardir)) + if not os.path.isdir(parentpath): + os.makedirs(parentpath) + except IOError as ioe: + raise CannotWriteConfigException(str(ioe)) + + +class FileConfigProvider(BaseFileProvider): + """ Implementation of the config provider that reads and writes the data + from/to the file system. """ + + def __init__(self, config_volume, yaml_filename, py_filename): + super(FileConfigProvider, self).__init__(config_volume, yaml_filename, py_filename) + + @property + def provider_id(self): + return 'file' + + def save_config(self, config_obj): + export_yaml(config_obj, self.yaml_path) + + def write_volume_file(self, filename, contents): + filepath = os.path.join(self.config_volume, filename) + _ensure_parent_dir(filepath) + + try: + with open(filepath, mode='w') as f: + f.write(contents) + except IOError as ioe: + raise CannotWriteConfigException(str(ioe)) + + return filepath + + def remove_volume_file(self, filename): + filepath = os.path.join(self.config_volume, filename) + os.remove(filepath) + + def save_volume_file(self, filename, flask_file): + filepath = os.path.join(self.config_volume, filename) + _ensure_parent_dir(filepath) + + # Write the file. + try: + flask_file.save(filepath) + except IOError as ioe: + raise CannotWriteConfigException(str(ioe)) + + return filepath diff --git a/config_app/config_util/config/test/test_helpers.py b/config_app/config_util/config/test/test_helpers.py new file mode 100644 index 000000000..ceeae51ff --- /dev/null +++ b/config_app/config_util/config/test/test_helpers.py @@ -0,0 +1,75 @@ +import pytest +import os +import base64 + +from backports.tempfile import TemporaryDirectory + +from config_app.config_util.config import get_config_as_kube_secret +from util.config.validator import EXTRA_CA_DIRECTORY + + +def _create_temp_file_structure(file_structure): + temp_dir = TemporaryDirectory() + + for filename, data in file_structure.iteritems(): + if filename == EXTRA_CA_DIRECTORY: + extra_ca_dir_path = os.path.join(temp_dir.name, EXTRA_CA_DIRECTORY) + os.mkdir(extra_ca_dir_path) + + for name, cert_value in data: + with open(os.path.join(extra_ca_dir_path, name), 'w') as f: + f.write(cert_value) + else: + with open(os.path.join(temp_dir.name, filename), 'w') as f: + f.write(data) + + return temp_dir + + +@pytest.mark.parametrize('file_structure, expected_secret', [ + pytest.param({ + 'config.yaml': 'test:true', + }, + { + 'config.yaml': 'dGVzdDp0cnVl', + }, id='just a config value'), + pytest.param({ + 'config.yaml': 'test:true', + 'otherfile.ext': 'im a file' + }, + { + 'config.yaml': 'dGVzdDp0cnVl', + 'otherfile.ext': base64.b64encode('im a file') + }, id='config and another file'), + pytest.param({ + 'config.yaml': 'test:true', + 'extra_ca_certs': [ + ('cert.crt', 'im a cert!'), + ] + }, + { + 'config.yaml': 'dGVzdDp0cnVl', + 'extra_ca_certs_cert.crt': base64.b64encode('im a cert!'), + }, id='config and an extra cert'), + pytest.param({ + 'config.yaml': 'test:true', + 'otherfile.ext': 'im a file', + 'extra_ca_certs': [ + ('cert.crt', 'im a cert!'), + ('another.crt', 'im a different cert!'), + ] + }, + { + 'config.yaml': 'dGVzdDp0cnVl', + 'otherfile.ext': base64.b64encode('im a file'), + 'extra_ca_certs_cert.crt': base64.b64encode('im a cert!'), + 'extra_ca_certs_another.crt': base64.b64encode('im a different cert!'), + }, id='config, files, and extra certs!'), +]) +def test_get_config_as_kube_secret(file_structure, expected_secret): + temp_dir = _create_temp_file_structure(file_structure) + + secret = get_config_as_kube_secret(temp_dir.name) + assert secret == expected_secret + + temp_dir.cleanup() diff --git a/config_app/config_util/config/test/test_transient_dir_provider.py b/config_app/config_util/config/test/test_transient_dir_provider.py new file mode 100644 index 000000000..2d1f3f96c --- /dev/null +++ b/config_app/config_util/config/test/test_transient_dir_provider.py @@ -0,0 +1,68 @@ +import pytest +import os + +from config_app.config_util.config.TransientDirectoryProvider import TransientDirectoryProvider + + +@pytest.mark.parametrize('files_to_write, operations, expected_new_dir', [ + pytest.param({ + 'config.yaml': 'a config', + }, ([], [], []), { + 'config.yaml': 'a config', + }, id='just a config'), + pytest.param({ + 'config.yaml': 'a config', + 'oldfile': 'hmmm' + }, ([], [], ['oldfile']), { + 'config.yaml': 'a config', + }, id='delete a file'), + pytest.param({ + 'config.yaml': 'a config', + 'oldfile': 'hmmm' + }, ([('newfile', 'asdf')], [], ['oldfile']), { + 'config.yaml': 'a config', + 'newfile': 'asdf' + }, id='delete and add a file'), + pytest.param({ + 'config.yaml': 'a config', + 'somefile': 'before' + }, ([('newfile', 'asdf')], [('somefile', 'after')], []), { + 'config.yaml': 'a config', + 'newfile': 'asdf', + 'somefile': 'after', + }, id='add new files and change files'), +]) +def test_transient_dir_copy_config_dir(files_to_write, operations, expected_new_dir): + config_provider = TransientDirectoryProvider('', '', '') + + for name, data in files_to_write.iteritems(): + config_provider.write_volume_file(name, data) + + config_provider.create_copy_of_config_dir() + + for create in operations[0]: + (name, data) = create + config_provider.write_volume_file(name, data) + + for update in operations[1]: + (name, data) = update + config_provider.write_volume_file(name, data) + + for delete in operations[2]: + config_provider.remove_volume_file(delete) + + # check that the new directory matches expected state + for filename, data in expected_new_dir.iteritems(): + with open(os.path.join(config_provider.get_config_dir_path(), filename)) as f: + new_data = f.read() + assert new_data == data + + # Now check that the old dir matches the original state + saved = config_provider.get_old_config_dir() + + for filename, data in files_to_write.iteritems(): + with open(os.path.join(saved, filename)) as f: + new_data = f.read() + assert new_data == data + + config_provider.temp_dir.cleanup() diff --git a/config_app/config_util/config/testprovider.py b/config_app/config_util/config/testprovider.py new file mode 100644 index 000000000..63e563056 --- /dev/null +++ b/config_app/config_util/config/testprovider.py @@ -0,0 +1,83 @@ +import json +import io +import os + +from config_app.config_util.config.baseprovider import BaseProvider + +REAL_FILES = ['test/data/signing-private.gpg', 'test/data/signing-public.gpg', 'test/data/test.pem'] + + +class TestConfigProvider(BaseProvider): + """ Implementation of the config provider for testing. Everything is kept in-memory instead on + the real file system. """ + + def __init__(self): + self.clear() + + def clear(self): + self.files = {} + self._config = {} + + @property + def provider_id(self): + return 'test' + + def update_app_config(self, app_config): + self._config = app_config + + def get_config(self): + if not 'config.yaml' in self.files: + return None + + return json.loads(self.files.get('config.yaml', '{}')) + + def save_config(self, config_obj): + self.files['config.yaml'] = json.dumps(config_obj) + + def config_exists(self): + return 'config.yaml' in self.files + + def volume_exists(self): + return True + + def volume_file_exists(self, filename): + if filename in REAL_FILES: + return True + + return filename in self.files + + def save_volume_file(self, filename, flask_file): + self.files[filename] = flask_file.read() + + def write_volume_file(self, filename, contents): + self.files[filename] = contents + + def get_volume_file(self, filename, mode='r'): + if filename in REAL_FILES: + return open(filename, mode=mode) + + return io.BytesIO(self.files[filename]) + + def remove_volume_file(self, filename): + self.files.pop(filename, None) + + def list_volume_directory(self, path): + paths = [] + for filename in self.files: + if filename.startswith(path): + paths.append(filename[len(path) + 1:]) + + return paths + + def requires_restart(self, app_config): + return False + + def reset_for_test(self): + self._config['SUPER_USERS'] = ['devtable'] + self.files = {} + + def get_volume_path(self, directory, filename): + return os.path.join(directory, filename) + + def get_config_dir_path(self): + return '' diff --git a/config_app/config_util/k8saccessor.py b/config_app/config_util/k8saccessor.py new file mode 100644 index 000000000..dd115681b --- /dev/null +++ b/config_app/config_util/k8saccessor.py @@ -0,0 +1,306 @@ +import logging +import json +import base64 +import datetime +import os + +from requests import Request, Session +from collections import namedtuple +from util.config.validator import EXTRA_CA_DIRECTORY, EXTRA_CA_DIRECTORY_PREFIX + +from config_app.config_util.k8sconfig import KubernetesConfig + +logger = logging.getLogger(__name__) + +QE_DEPLOYMENT_LABEL = 'quay-enterprise-component' +QE_CONTAINER_NAME = 'quay-enterprise-app' + + +# Tuple containing response of the deployment rollout status method. +# status is one of: 'failed' | 'progressing' | 'available' +# message is any string describing the state. +DeploymentRolloutStatus = namedtuple('DeploymentRolloutStatus', ['status', 'message']) + +class K8sApiException(Exception): + pass + + +def _deployment_rollout_status_message(deployment, deployment_name): + """ + Gets the friendly human readable message of the current state of the deployment rollout + :param deployment: python dict matching: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#deployment-v1-apps + :param deployment_name: string + :return: DeploymentRolloutStatus + """ + # Logic for rollout status pulled from the `kubectl rollout status` command: + # https://github.com/kubernetes/kubernetes/blob/d9ba19c751709c8608e09a0537eea98973f3a796/pkg/kubectl/rollout_status.go#L62 + if deployment['metadata']['generation'] <= deployment['status']['observedGeneration']: + for cond in deployment['status']['conditions']: + if cond['type'] == 'Progressing' and cond['reason'] == 'ProgressDeadlineExceeded': + return DeploymentRolloutStatus( + status='failed', + message="Deployment %s's rollout failed. Please try again later." % deployment_name + ) + + desired_replicas = deployment['spec']['replicas'] + current_replicas = deployment['status'].get('replicas', 0) + if current_replicas == 0: + return DeploymentRolloutStatus( + status='available', + message='Deployment %s updated (no replicas, so nothing to roll out)' % deployment_name + ) + + # Some fields are optional in the spec, so if they're omitted, replace with defaults that won't indicate a wrong status + available_replicas = deployment['status'].get('availableReplicas', 0) + updated_replicas = deployment['status'].get('updatedReplicas', 0) + + if updated_replicas < desired_replicas: + return DeploymentRolloutStatus( + status='progressing', + message='Waiting for rollout to finish: %d out of %d new replicas have been updated...' % ( + updated_replicas, desired_replicas) + ) + + if current_replicas > updated_replicas: + return DeploymentRolloutStatus( + status='progressing', + message='Waiting for rollout to finish: %d old replicas are pending termination...' % ( + current_replicas - updated_replicas) + ) + + if available_replicas < updated_replicas: + return DeploymentRolloutStatus( + status='progressing', + message='Waiting for rollout to finish: %d of %d updated replicas are available...' % ( + available_replicas, updated_replicas) + ) + + return DeploymentRolloutStatus( + status='available', + message='Deployment %s successfully rolled out.' % deployment_name + ) + + return DeploymentRolloutStatus( + status='progressing', + message='Waiting for deployment spec to be updated...' + ) + + +class KubernetesAccessorSingleton(object): + """ Singleton allowing access to kubernetes operations """ + _instance = None + + def __init__(self, kube_config=None): + self.kube_config = kube_config + if kube_config is None: + self.kube_config = KubernetesConfig.from_env() + + KubernetesAccessorSingleton._instance = self + + @classmethod + def get_instance(cls, kube_config=None): + """ + Singleton getter implementation, returns the instance if one exists, otherwise creates the + instance and ties it to the class. + :return: KubernetesAccessorSingleton + """ + if cls._instance is None: + return cls(kube_config) + + return cls._instance + + def save_secret_to_directory(self, dir_path): + """ + Saves all files in the kubernetes secret to a local directory. + Assumes the directory is empty. + """ + secret = self._lookup_secret() + + secret_data = secret.get('data', {}) + + # Make the `extra_ca_certs` dir to ensure we can populate extra certs + extra_ca_dir_path = os.path.join(dir_path, EXTRA_CA_DIRECTORY) + os.mkdir(extra_ca_dir_path) + + for secret_filename, data in secret_data.iteritems(): + write_path = os.path.join(dir_path, secret_filename) + + if EXTRA_CA_DIRECTORY_PREFIX in secret_filename: + write_path = os.path.join(extra_ca_dir_path, secret_filename.replace(EXTRA_CA_DIRECTORY_PREFIX, '')) + + with open(write_path, 'w') as f: + f.write(base64.b64decode(data)) + + return 200 + + def save_file_as_secret(self, name, file_pointer): + value = file_pointer.read() + self._update_secret_file(name, value) + + def replace_qe_secret(self, new_secret_data): + """ + Removes the old config and replaces it with the new_secret_data as one action + """ + # Check first that the namespace for Red Hat Quay exists. If it does not, report that + # as an error, as it seems to be a common issue. + namespace_url = 'namespaces/%s' % (self.kube_config.qe_namespace) + response = self._execute_k8s_api('GET', namespace_url) + if response.status_code // 100 != 2: + msg = 'A Kubernetes namespace with name `%s` must be created to save config' % self.kube_config.qe_namespace + raise Exception(msg) + + # Check if the secret exists. If not, then we create an empty secret and then update the file + # inside. + secret_url = 'namespaces/%s/secrets/%s' % (self.kube_config.qe_namespace, self.kube_config.qe_config_secret) + secret = self._lookup_secret() + if secret is None: + self._assert_success(self._execute_k8s_api('POST', secret_url, { + "kind": "Secret", + "apiVersion": "v1", + "metadata": { + "name": self.kube_config.qe_config_secret + }, + "data": {} + })) + + # Update the secret to reflect the file change. + secret['data'] = new_secret_data + + self._assert_success(self._execute_k8s_api('PUT', secret_url, secret)) + + def get_deployment_rollout_status(self, deployment_name): + """" + Returns the status of a rollout of a given deployment + :return _DeploymentRolloutStatus + """ + deployment_selector_url = 'namespaces/%s/deployments/%s' % ( + self.kube_config.qe_namespace, deployment_name + ) + + response = self._execute_k8s_api('GET', deployment_selector_url, api_prefix='apis/apps/v1') + if response.status_code != 200: + return DeploymentRolloutStatus('failed', 'Could not get deployment. Please check that the deployment exists') + + deployment = json.loads(response.text) + + return _deployment_rollout_status_message(deployment, deployment_name) + + def get_qe_deployments(self): + """" + Returns all deployments matching the label selector provided in the KubeConfig + """ + deployment_selector_url = 'namespaces/%s/deployments?labelSelector=%s%%3D%s' % ( + self.kube_config.qe_namespace, QE_DEPLOYMENT_LABEL, self.kube_config.qe_deployment_selector + ) + + response = self._execute_k8s_api('GET', deployment_selector_url, api_prefix='apis/extensions/v1beta1') + if response.status_code != 200: + return None + return json.loads(response.text) + + def cycle_qe_deployments(self, deployment_names): + """" + Triggers a rollout of all desired deployments in the qe namespace + """ + + for name in deployment_names: + logger.debug('Cycling deployment %s', name) + deployment_url = 'namespaces/%s/deployments/%s' % (self.kube_config.qe_namespace, name) + + # There is currently no command to simply rolling restart all the pods: https://github.com/kubernetes/kubernetes/issues/13488 + # Instead, we modify the template of the deployment with a dummy env variable to trigger a cycle of the pods + # (based off this comment: https://github.com/kubernetes/kubernetes/issues/13488#issuecomment-240393845) + self._assert_success(self._execute_k8s_api('PATCH', deployment_url, { + 'spec': { + 'template': { + 'spec': { + 'containers': [{ + # Note: this name MUST match the deployment template's pod template + # (e.g.