Merge pull request #2388 from coreos-inc/max-layer-size
Make maximum layer size configurable
This commit is contained in:
commit
789e35668c
9 changed files with 71 additions and 14 deletions
|
@ -28,19 +28,17 @@ def generate_server_config(config):
|
||||||
"""
|
"""
|
||||||
Generates server config from the app config
|
Generates server config from the app config
|
||||||
"""
|
"""
|
||||||
if config:
|
config = config or {}
|
||||||
tuf_server = config.get('TUF_SERVER', None)
|
tuf_server = config.get('TUF_SERVER', None)
|
||||||
tuf_host = config.get('TUF_HOST', None)
|
tuf_host = config.get('TUF_HOST', None)
|
||||||
signing_enabled = config.get('FEATURE_SIGNING', False)
|
signing_enabled = config.get('FEATURE_SIGNING', False)
|
||||||
else:
|
maximum_layer_size = config.get('MAXIMUM_LAYER_SIZE', '20G')
|
||||||
tuf_server = None
|
|
||||||
tuf_host = None
|
|
||||||
signing_enabled = False
|
|
||||||
|
|
||||||
write_config('conf/nginx/server-base.conf',
|
write_config('conf/nginx/server-base.conf',
|
||||||
tuf_server=tuf_server,
|
tuf_server=tuf_server,
|
||||||
tuf_host=tuf_host,
|
tuf_host=tuf_host,
|
||||||
signing_enabled=signing_enabled)
|
signing_enabled=signing_enabled,
|
||||||
|
maximum_layer_size=maximum_layer_size)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|
|
@ -109,7 +109,7 @@ location ~ ^/v2 {
|
||||||
proxy_pass http://registry_app_server;
|
proxy_pass http://registry_app_server;
|
||||||
proxy_temp_path /tmp 1 2;
|
proxy_temp_path /tmp 1 2;
|
||||||
|
|
||||||
client_max_body_size 20G;
|
client_max_body_size {{ maximum_layer_size }};
|
||||||
}
|
}
|
||||||
|
|
||||||
location ~ ^/v1 {
|
location ~ ^/v1 {
|
||||||
|
@ -127,7 +127,7 @@ location ~ ^/v1 {
|
||||||
proxy_pass http://registry_app_server;
|
proxy_pass http://registry_app_server;
|
||||||
proxy_temp_path /tmp 1 2;
|
proxy_temp_path /tmp 1 2;
|
||||||
|
|
||||||
client_max_body_size 20G;
|
client_max_body_size {{ maximum_layer_size }};
|
||||||
}
|
}
|
||||||
|
|
||||||
location /v1/_ping {
|
location /v1/_ping {
|
||||||
|
|
|
@ -426,3 +426,6 @@ class DefaultConfig(object):
|
||||||
|
|
||||||
# Server where TUF metadata can be found
|
# Server where TUF metadata can be found
|
||||||
TUF_SERVER = None
|
TUF_SERVER = None
|
||||||
|
|
||||||
|
# Maximum size allowed for layers in the registry.
|
||||||
|
MAXIMUM_LAYER_SIZE = '20G'
|
||||||
|
|
|
@ -1004,12 +1004,12 @@ class RepositoryAuthorizedEmail(BaseModel):
|
||||||
class BlobUpload(BaseModel):
|
class BlobUpload(BaseModel):
|
||||||
repository = ForeignKeyField(Repository)
|
repository = ForeignKeyField(Repository)
|
||||||
uuid = CharField(index=True, unique=True)
|
uuid = CharField(index=True, unique=True)
|
||||||
byte_count = IntegerField(default=0)
|
byte_count = BigIntegerField(default=0)
|
||||||
sha_state = ResumableSHA256Field(null=True, default=resumablehashlib.sha256)
|
sha_state = ResumableSHA256Field(null=True, default=resumablehashlib.sha256)
|
||||||
location = ForeignKeyField(ImageStorageLocation)
|
location = ForeignKeyField(ImageStorageLocation)
|
||||||
storage_metadata = JSONField(null=True, default={})
|
storage_metadata = JSONField(null=True, default={})
|
||||||
chunk_count = IntegerField(default=0)
|
chunk_count = IntegerField(default=0)
|
||||||
uncompressed_byte_count = IntegerField(null=True)
|
uncompressed_byte_count = BigIntegerField(null=True)
|
||||||
created = DateTimeField(default=datetime.now, index=True)
|
created = DateTimeField(default=datetime.now, index=True)
|
||||||
piece_sha_state = ResumableSHA1Field(null=True)
|
piece_sha_state = ResumableSHA1Field(null=True)
|
||||||
piece_hashes = Base64BinaryField(null=True)
|
piece_hashes = Base64BinaryField(null=True)
|
||||||
|
|
|
@ -0,0 +1,24 @@
|
||||||
|
"""Change BlobUpload fields to BigIntegers to allow layers > 8GB
|
||||||
|
|
||||||
|
Revision ID: b8ae68ad3e52
|
||||||
|
Revises: 7a525c68eb13
|
||||||
|
Create Date: 2017-02-27 11:26:49.182349
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
# revision identifiers, used by Alembic.
|
||||||
|
revision = 'b8ae68ad3e52'
|
||||||
|
down_revision = '7a525c68eb13'
|
||||||
|
|
||||||
|
from alembic import op
|
||||||
|
import sqlalchemy as sa
|
||||||
|
from sqlalchemy.dialects import mysql
|
||||||
|
|
||||||
|
def upgrade(tables):
|
||||||
|
op.alter_column('blobupload', 'byte_count', existing_type=sa.Integer(), type_=sa.BigInteger())
|
||||||
|
op.alter_column('blobupload', 'uncompressed_byte_count', existing_type=sa.Integer(), type_=sa.BigInteger())
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(tables):
|
||||||
|
op.alter_column('blobupload', 'byte_count', existing_type=sa.BigInteger(), type_=sa.Integer())
|
||||||
|
op.alter_column('blobupload', 'uncompressed_byte_count', existing_type=sa.BigInteger(), type_=sa.Integer())
|
|
@ -4,6 +4,7 @@ import time
|
||||||
|
|
||||||
from flask import url_for, request, redirect, Response, abort as flask_abort
|
from flask import url_for, request, redirect, Response, abort as flask_abort
|
||||||
|
|
||||||
|
import bitmath
|
||||||
import resumablehashlib
|
import resumablehashlib
|
||||||
|
|
||||||
from app import storage, app, get_app_url, metric_queue
|
from app import storage, app, get_app_url, metric_queue
|
||||||
|
@ -14,7 +15,7 @@ from digest import digest_tools
|
||||||
from endpoints.common import parse_repository_name
|
from endpoints.common import parse_repository_name
|
||||||
from endpoints.v2 import v2_bp, require_repo_read, require_repo_write, get_input_stream
|
from endpoints.v2 import v2_bp, require_repo_read, require_repo_write, get_input_stream
|
||||||
from endpoints.v2.errors import (BlobUnknown, BlobUploadInvalid, BlobUploadUnknown, Unsupported,
|
from endpoints.v2.errors import (BlobUnknown, BlobUploadInvalid, BlobUploadUnknown, Unsupported,
|
||||||
NameUnknown)
|
NameUnknown, LayerTooLarge)
|
||||||
from endpoints.decorators import anon_protect
|
from endpoints.decorators import anon_protect
|
||||||
from util.cache import cache_control
|
from util.cache import cache_control
|
||||||
from util.registry.filelike import wrap_with_handler, StreamSlice
|
from util.registry.filelike import wrap_with_handler, StreamSlice
|
||||||
|
@ -346,6 +347,8 @@ def _upload_chunk(blob_upload, range_header):
|
||||||
|
|
||||||
Returns a BlobUpload object or None if there was a failure.
|
Returns a BlobUpload object or None if there was a failure.
|
||||||
"""
|
"""
|
||||||
|
max_layer_size = bitmath.parse_string_unsafe(app.config['MAXIMUM_LAYER_SIZE'])
|
||||||
|
|
||||||
# Get the offset and length of the current chunk.
|
# Get the offset and length of the current chunk.
|
||||||
start_offset, length = _start_offset_and_length(range_header)
|
start_offset, length = _start_offset_and_length(range_header)
|
||||||
if blob_upload is None or None in {start_offset, length}:
|
if blob_upload is None or None in {start_offset, length}:
|
||||||
|
@ -356,6 +359,11 @@ def _upload_chunk(blob_upload, range_header):
|
||||||
logger.error('start_offset provided to _upload_chunk greater than blob.upload.byte_count')
|
logger.error('start_offset provided to _upload_chunk greater than blob.upload.byte_count')
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
# Check if we should raise 413 before accepting the data.
|
||||||
|
uploaded = bitmath.Byte(length + start_offset)
|
||||||
|
if length > -1 and uploaded > max_layer_size:
|
||||||
|
raise LayerTooLarge(uploaded=uploaded.bytes, max_allowed=max_layer_size.bytes)
|
||||||
|
|
||||||
location_set = {blob_upload.location_name}
|
location_set = {blob_upload.location_name}
|
||||||
|
|
||||||
upload_error = None
|
upload_error = None
|
||||||
|
@ -435,6 +443,11 @@ def _upload_chunk(blob_upload, range_header):
|
||||||
blob_upload.byte_count += length_written
|
blob_upload.byte_count += length_written
|
||||||
blob_upload.chunk_count += 1
|
blob_upload.chunk_count += 1
|
||||||
|
|
||||||
|
# Ensure we have not gone beyond the max layer size.
|
||||||
|
upload_size = bitmath.Byte(blob_upload.byte_count)
|
||||||
|
if upload_size > max_layer_size:
|
||||||
|
raise LayerTooLarge(uploaded=upload_size.bytes, max_allowed=max_layer_size.bytes)
|
||||||
|
|
||||||
return blob_upload
|
return blob_upload
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
import bitmath
|
||||||
|
|
||||||
class V2RegistryException(Exception):
|
class V2RegistryException(Exception):
|
||||||
def __init__(self, error_code_str, message, detail, http_status_code=400,
|
def __init__(self, error_code_str, message, detail, http_status_code=400,
|
||||||
repository=None, scopes=None):
|
repository=None, scopes=None):
|
||||||
|
@ -112,6 +114,21 @@ class TagInvalid(V2RegistryException):
|
||||||
'manifest tag did not match URI',
|
'manifest tag did not match URI',
|
||||||
detail)
|
detail)
|
||||||
|
|
||||||
|
class LayerTooLarge(V2RegistryException):
|
||||||
|
def __init__(self, uploaded=None, max_allowed=None):
|
||||||
|
detail = {}
|
||||||
|
message = 'Uploaded blob is larger than allowed by this registry'
|
||||||
|
|
||||||
|
if uploaded is not None and max_allowed is not None:
|
||||||
|
detail = {
|
||||||
|
'reason': '%s is greater than maximum allowed size %s' % (uploaded, max_allowed),
|
||||||
|
'max_allowed': max_allowed,
|
||||||
|
'uploaded': uploaded,
|
||||||
|
}
|
||||||
|
|
||||||
|
up_str = bitmath.Byte(uploaded).best_prefix().format("{value:.2f} {unit}")
|
||||||
|
max_str = bitmath.Byte(max_allowed).best_prefix().format("{value:.2f} {unit}")
|
||||||
|
message = 'Uploaded blob of %s is larger than %s allowed by this registry' % (up_str, max_str)
|
||||||
|
|
||||||
class Unauthorized(V2RegistryException):
|
class Unauthorized(V2RegistryException):
|
||||||
def __init__(self, detail=None, repository=None, scopes=None):
|
def __init__(self, detail=None, repository=None, scopes=None):
|
||||||
|
|
|
@ -20,6 +20,7 @@ autobahn==0.9.3-3
|
||||||
beautifulsoup4
|
beautifulsoup4
|
||||||
bencode
|
bencode
|
||||||
bintrees
|
bintrees
|
||||||
|
bitmath
|
||||||
boto
|
boto
|
||||||
cachetools==1.1.6
|
cachetools==1.1.6
|
||||||
cryptography
|
cryptography
|
||||||
|
|
|
@ -8,6 +8,7 @@ Babel==2.3.4
|
||||||
beautifulsoup4==4.5.1
|
beautifulsoup4==4.5.1
|
||||||
bencode==1.0
|
bencode==1.0
|
||||||
bintrees==2.0.4
|
bintrees==2.0.4
|
||||||
|
bitmath==1.3.1.2
|
||||||
blinker==1.4
|
blinker==1.4
|
||||||
boto==2.43.0
|
boto==2.43.0
|
||||||
cachetools==1.1.6
|
cachetools==1.1.6
|
||||||
|
|
Reference in a new issue