diff --git a/conf/init/nginx_conf_create.sh b/conf/init/nginx_conf_create.sh index f2dc9724c..c023f5e4f 100755 --- a/conf/init/nginx_conf_create.sh +++ b/conf/init/nginx_conf_create.sh @@ -28,19 +28,17 @@ def generate_server_config(config): """ Generates server config from the app config """ - if config: - tuf_server = config.get('TUF_SERVER', None) - tuf_host = config.get('TUF_HOST', None) - signing_enabled = config.get('FEATURE_SIGNING', False) - else: - tuf_server = None - tuf_host = None - signing_enabled = False + config = config or {} + tuf_server = config.get('TUF_SERVER', None) + tuf_host = config.get('TUF_HOST', None) + signing_enabled = config.get('FEATURE_SIGNING', False) + maximum_layer_size = config.get('MAXIMUM_LAYER_SIZE', '20G') write_config('conf/nginx/server-base.conf', tuf_server=tuf_server, tuf_host=tuf_host, - signing_enabled=signing_enabled) + signing_enabled=signing_enabled, + maximum_layer_size=maximum_layer_size) if __name__ == "__main__": diff --git a/conf/nginx/server-base.conf.jnj b/conf/nginx/server-base.conf.jnj index ee2fdb259..7b5c69181 100644 --- a/conf/nginx/server-base.conf.jnj +++ b/conf/nginx/server-base.conf.jnj @@ -109,7 +109,7 @@ location ~ ^/v2 { proxy_pass http://registry_app_server; proxy_temp_path /tmp 1 2; - client_max_body_size 20G; + client_max_body_size {{ maximum_layer_size }}; } location ~ ^/v1 { @@ -127,7 +127,7 @@ location ~ ^/v1 { proxy_pass http://registry_app_server; proxy_temp_path /tmp 1 2; - client_max_body_size 20G; + client_max_body_size {{ maximum_layer_size }}; } location /v1/_ping { diff --git a/config.py b/config.py index 67734bf82..9da6fd30d 100644 --- a/config.py +++ b/config.py @@ -426,3 +426,6 @@ class DefaultConfig(object): # Server where TUF metadata can be found TUF_SERVER = None + + # Maximum size allowed for layers in the registry. + MAXIMUM_LAYER_SIZE = '20G' diff --git a/data/database.py b/data/database.py index 5a6c746b0..ff647aeb1 100644 --- a/data/database.py +++ b/data/database.py @@ -1004,12 +1004,12 @@ class RepositoryAuthorizedEmail(BaseModel): class BlobUpload(BaseModel): repository = ForeignKeyField(Repository) uuid = CharField(index=True, unique=True) - byte_count = IntegerField(default=0) + byte_count = BigIntegerField(default=0) sha_state = ResumableSHA256Field(null=True, default=resumablehashlib.sha256) location = ForeignKeyField(ImageStorageLocation) storage_metadata = JSONField(null=True, default={}) chunk_count = IntegerField(default=0) - uncompressed_byte_count = IntegerField(null=True) + uncompressed_byte_count = BigIntegerField(null=True) created = DateTimeField(default=datetime.now, index=True) piece_sha_state = ResumableSHA1Field(null=True) piece_hashes = Base64BinaryField(null=True) diff --git a/data/migrations/versions/b8ae68ad3e52_change_blobupload_fields_to_bigintegers_.py b/data/migrations/versions/b8ae68ad3e52_change_blobupload_fields_to_bigintegers_.py new file mode 100644 index 000000000..90a5b8978 --- /dev/null +++ b/data/migrations/versions/b8ae68ad3e52_change_blobupload_fields_to_bigintegers_.py @@ -0,0 +1,24 @@ +"""Change BlobUpload fields to BigIntegers to allow layers > 8GB + +Revision ID: b8ae68ad3e52 +Revises: 7a525c68eb13 +Create Date: 2017-02-27 11:26:49.182349 + +""" + +# revision identifiers, used by Alembic. +revision = 'b8ae68ad3e52' +down_revision = '7a525c68eb13' + +from alembic import op +import sqlalchemy as sa +from sqlalchemy.dialects import mysql + +def upgrade(tables): + op.alter_column('blobupload', 'byte_count', existing_type=sa.Integer(), type_=sa.BigInteger()) + op.alter_column('blobupload', 'uncompressed_byte_count', existing_type=sa.Integer(), type_=sa.BigInteger()) + + +def downgrade(tables): + op.alter_column('blobupload', 'byte_count', existing_type=sa.BigInteger(), type_=sa.Integer()) + op.alter_column('blobupload', 'uncompressed_byte_count', existing_type=sa.BigInteger(), type_=sa.Integer()) diff --git a/endpoints/v2/blob.py b/endpoints/v2/blob.py index ba0acf9ad..cd77ee2ee 100644 --- a/endpoints/v2/blob.py +++ b/endpoints/v2/blob.py @@ -4,6 +4,7 @@ import time from flask import url_for, request, redirect, Response, abort as flask_abort +import bitmath import resumablehashlib from app import storage, app, get_app_url, metric_queue @@ -14,7 +15,7 @@ from digest import digest_tools from endpoints.common import parse_repository_name from endpoints.v2 import v2_bp, require_repo_read, require_repo_write, get_input_stream from endpoints.v2.errors import (BlobUnknown, BlobUploadInvalid, BlobUploadUnknown, Unsupported, - NameUnknown) + NameUnknown, LayerTooLarge) from endpoints.decorators import anon_protect from util.cache import cache_control from util.registry.filelike import wrap_with_handler, StreamSlice @@ -346,6 +347,8 @@ def _upload_chunk(blob_upload, range_header): Returns a BlobUpload object or None if there was a failure. """ + max_layer_size = bitmath.parse_string_unsafe(app.config['MAXIMUM_LAYER_SIZE']) + # Get the offset and length of the current chunk. start_offset, length = _start_offset_and_length(range_header) if blob_upload is None or None in {start_offset, length}: @@ -356,6 +359,11 @@ def _upload_chunk(blob_upload, range_header): logger.error('start_offset provided to _upload_chunk greater than blob.upload.byte_count') return None + # Check if we should raise 413 before accepting the data. + uploaded = bitmath.Byte(length + start_offset) + if length > -1 and uploaded > max_layer_size: + raise LayerTooLarge(uploaded=uploaded.bytes, max_allowed=max_layer_size.bytes) + location_set = {blob_upload.location_name} upload_error = None @@ -435,6 +443,11 @@ def _upload_chunk(blob_upload, range_header): blob_upload.byte_count += length_written blob_upload.chunk_count += 1 + # Ensure we have not gone beyond the max layer size. + upload_size = bitmath.Byte(blob_upload.byte_count) + if upload_size > max_layer_size: + raise LayerTooLarge(uploaded=upload_size.bytes, max_allowed=max_layer_size.bytes) + return blob_upload diff --git a/endpoints/v2/errors.py b/endpoints/v2/errors.py index 0f8a5284e..e511c0500 100644 --- a/endpoints/v2/errors.py +++ b/endpoints/v2/errors.py @@ -1,3 +1,5 @@ +import bitmath + class V2RegistryException(Exception): def __init__(self, error_code_str, message, detail, http_status_code=400, repository=None, scopes=None): @@ -112,6 +114,21 @@ class TagInvalid(V2RegistryException): 'manifest tag did not match URI', detail) +class LayerTooLarge(V2RegistryException): + def __init__(self, uploaded=None, max_allowed=None): + detail = {} + message = 'Uploaded blob is larger than allowed by this registry' + + if uploaded is not None and max_allowed is not None: + detail = { + 'reason': '%s is greater than maximum allowed size %s' % (uploaded, max_allowed), + 'max_allowed': max_allowed, + 'uploaded': uploaded, + } + + up_str = bitmath.Byte(uploaded).best_prefix().format("{value:.2f} {unit}") + max_str = bitmath.Byte(max_allowed).best_prefix().format("{value:.2f} {unit}") + message = 'Uploaded blob of %s is larger than %s allowed by this registry' % (up_str, max_str) class Unauthorized(V2RegistryException): def __init__(self, detail=None, repository=None, scopes=None): diff --git a/requirements-nover.txt b/requirements-nover.txt index 7fdef7c77..f03fe050d 100644 --- a/requirements-nover.txt +++ b/requirements-nover.txt @@ -20,6 +20,7 @@ autobahn==0.9.3-3 beautifulsoup4 bencode bintrees +bitmath boto cachetools==1.1.6 cryptography diff --git a/requirements.txt b/requirements.txt index eaf37efea..aed0f9659 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,6 +8,7 @@ Babel==2.3.4 beautifulsoup4==4.5.1 bencode==1.0 bintrees==2.0.4 +bitmath==1.3.1.2 blinker==1.4 boto==2.43.0 cachetools==1.1.6