Backport remaining v2 changes to phase4

This commit is contained in:
Jake Moshenko 2015-10-23 13:49:23 -04:00
parent e965ed9f3b
commit cb7ec2f239
4 changed files with 104 additions and 5 deletions

View file

@ -3,15 +3,16 @@ import logging
import uuid import uuid
import time import time
import toposort import toposort
import resumablehashlib
from random import SystemRandom from random import SystemRandom
from datetime import datetime from datetime import datetime
from peewee import * from peewee import *
from data.read_slave import ReadSlaveModel from data.read_slave import ReadSlaveModel
from data.fields import ResumableSHAField, JSONField
from sqlalchemy.engine.url import make_url from sqlalchemy.engine.url import make_url
from collections import defaultdict from collections import defaultdict
from data.read_slave import ReadSlaveModel
from util.names import urn_generator from util.names import urn_generator
@ -348,7 +349,7 @@ class Repository(BaseModel):
# These models don't need to use transitive deletes, because the referenced objects # These models don't need to use transitive deletes, because the referenced objects
# are cleaned up directly # are cleaned up directly
skip_transitive_deletes = {RepositoryTag, RepositoryBuild, RepositoryBuildTrigger} skip_transitive_deletes = {RepositoryTag, RepositoryBuild, RepositoryBuildTrigger, BlobUpload}
# We need to sort the ops so that models get cleaned in order of their dependencies # We need to sort the ops so that models get cleaned in order of their dependencies
ops = reversed(list(self.dependencies(delete_nullable))) ops = reversed(list(self.dependencies(delete_nullable)))
@ -490,6 +491,7 @@ class ImageStorage(BaseModel):
image_size = BigIntegerField(null=True) image_size = BigIntegerField(null=True)
uncompressed_size = BigIntegerField(null=True) uncompressed_size = BigIntegerField(null=True)
uploading = BooleanField(default=True, null=True) uploading = BooleanField(default=True, null=True)
cas_path = BooleanField(default=True)
class ImageStorageTransformation(BaseModel): class ImageStorageTransformation(BaseModel):
@ -761,6 +763,23 @@ class RepositoryAuthorizedEmail(BaseModel):
) )
class BlobUpload(BaseModel):
repository = ForeignKeyField(Repository, index=True)
uuid = CharField(index=True, unique=True)
byte_count = IntegerField(default=0)
sha_state = ResumableSHAField(null=True, default=resumablehashlib.sha256)
location = ForeignKeyField(ImageStorageLocation)
storage_metadata = JSONField(null=True, default={})
class Meta:
database = db
read_slaves = (read_slave,)
indexes = (
# create a unique index on email and repository
(('repository', 'uuid'), True),
)
class QuayService(BaseModel): class QuayService(BaseModel):
name = CharField(index=True, unique=True) name = CharField(index=True, unique=True)
@ -788,7 +807,6 @@ class QuayRelease(BaseModel):
) )
all_models = [User, Repository, Image, AccessToken, Role, RepositoryPermission, Visibility, all_models = [User, Repository, Image, AccessToken, Role, RepositoryPermission, Visibility,
RepositoryTag, EmailConfirmation, FederatedLogin, LoginService, QueueItem, RepositoryTag, EmailConfirmation, FederatedLogin, LoginService, QueueItem,
RepositoryBuild, Team, TeamMember, TeamRole, LogEntryKind, LogEntry, RepositoryBuild, Team, TeamMember, TeamRole, LogEntryKind, LogEntry,
@ -799,4 +817,4 @@ all_models = [User, Repository, Image, AccessToken, Role, RepositoryPermission,
RepositoryAuthorizedEmail, ImageStorageTransformation, DerivedImageStorage, RepositoryAuthorizedEmail, ImageStorageTransformation, DerivedImageStorage,
TeamMemberInvite, ImageStorageSignature, ImageStorageSignatureKind, TeamMemberInvite, ImageStorageSignature, ImageStorageSignatureKind,
AccessTokenKind, Star, RepositoryActionCount, TagManifest, UserRegion, AccessTokenKind, Star, RepositoryActionCount, TagManifest, UserRegion,
QuayService, QuayRegion, QuayRelease] QuayService, QuayRegion, QuayRelease, BlobUpload]

38
data/fields.py Normal file
View file

@ -0,0 +1,38 @@
import base64
import resumablehashlib
import json
from peewee import TextField
class ResumableSHAField(TextField):
def db_value(self, value):
sha_state = value.state()
# One of the fields is a byte string, let's base64 encode it to make sure
# we can store and fetch it regardless of default collocation.
sha_state[3] = base64.b64encode(sha_state[3])
return json.dumps(sha_state)
def python_value(self, value):
to_resume = resumablehashlib.sha256()
if value is None:
return to_resume
sha_state = json.loads(value)
# We need to base64 decode the data bytestring.
sha_state[3] = base64.b64decode(sha_state[3])
to_resume.set_state(sha_state)
return to_resume
class JSONField(TextField):
def db_value(self, value):
return json.dumps(value)
def python_value(self, value):
if value is None or value == "":
return {}
return json.loads(value)

View file

@ -0,0 +1,43 @@
"""Backport v2 db changes.
Revision ID: 33bd39ef5ed6
Revises: 127905a52fdd
Create Date: 2015-10-23 12:34:22.776542
"""
# revision identifiers, used by Alembic.
revision = '33bd39ef5ed6'
down_revision = '127905a52fdd'
from alembic import op
import sqlalchemy as sa
def upgrade(tables):
### commands auto generated by Alembic - please adjust! ###
op.create_table('blobupload',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('repository_id', sa.Integer(), nullable=False),
sa.Column('uuid', sa.String(length=255), nullable=False),
sa.Column('byte_count', sa.Integer(), nullable=False),
sa.Column('sha_state', sa.Text(), nullable=True),
sa.Column('location_id', sa.Integer(), nullable=False),
sa.Column('storage_metadata', sa.Text(), nullable=True),
sa.ForeignKeyConstraint(['location_id'], ['imagestoragelocation.id'], name=op.f('fk_blobupload_location_id_imagestoragelocation')),
sa.ForeignKeyConstraint(['repository_id'], ['repository.id'], name=op.f('fk_blobupload_repository_id_repository')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_blobupload'))
)
op.create_index('blobupload_location_id', 'blobupload', ['location_id'], unique=False)
op.create_index('blobupload_repository_id', 'blobupload', ['repository_id'], unique=False)
op.create_index('blobupload_repository_id_uuid', 'blobupload', ['repository_id', 'uuid'], unique=True)
op.create_index('blobupload_uuid', 'blobupload', ['uuid'], unique=True)
op.add_column(u'imagestorage', sa.Column('cas_path', sa.Boolean(), nullable=False, server_default="0"))
### end Alembic commands ###
def downgrade(tables):
### commands auto generated by Alembic - please adjust! ###
op.drop_column(u'imagestorage', 'cas_path')
op.drop_table('blobupload')
### end Alembic commands ###

View file

@ -124,7 +124,7 @@ def garbage_collect_storage(storage_id_whitelist):
def create_storage(location_name): def create_storage(location_name):
storage = ImageStorage.create() storage = ImageStorage.create(cas_path=False)
location = ImageStorageLocation.get(name=location_name) location = ImageStorageLocation.get(name=location_name)
ImageStoragePlacement.create(location=location, storage=storage) ImageStoragePlacement.create(location=location, storage=storage)
storage.locations = {location_name} storage.locations = {location_name}