initial v1 refactor to use model methods
This commit is contained in:
		
							parent
							
								
									9cfd6ec452
								
							
						
					
					
						commit
						c14437e54a
					
				
					 2 changed files with 201 additions and 133 deletions
				
			
		
							
								
								
									
										126
									
								
								data/model/v1/__init__.py
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										126
									
								
								data/model/v1/__init__.py
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,126 @@ | |||
| from app import app, storage as store | ||||
| from data import model | ||||
| from util.morecollections import AttrDict | ||||
| 
 | ||||
| 
 | ||||
| # TODO(jzelinskie): implement all of these methods using both legacy and new models. | ||||
| 
 | ||||
| def blob_placement_locations_docker_v1(namespace_name, repo_name, image_id): | ||||
|   repo_image = model.image.get_repo_image_extended(namespace_name, repo_name, image_id) | ||||
|   if repo_image is None: | ||||
|     return None | ||||
|   return repo_image.storage.locations | ||||
| 
 | ||||
| 
 | ||||
| def blob_placement_locations_and_path_docker_v1(namespace_name, repo_name, image_id): | ||||
|   repo_image = model.image.get_repo_image_extended(namespace_name, repo_name, image_id) | ||||
|   if not repo_image: | ||||
|     return None, None | ||||
|   return model.storage.get_layer_path(repo_image.storage), repo_image.storage.locations | ||||
| 
 | ||||
| 
 | ||||
| def docker_v1_metadata(namespace_name, repo_name, image_id): | ||||
|   if not repo_image: | ||||
|     return None | ||||
| 
 | ||||
|   return AttrDict({ | ||||
|     'namespace_name': namespace_name, | ||||
|     'repo_name': repo_name, | ||||
|     'image_id': image_id, | ||||
|     'checksum': repo_image.v1_checksum, | ||||
|     'compat_json': repo_image.v1_json_metadata, | ||||
|   }) | ||||
| 
 | ||||
| 
 | ||||
| def update_docker_v1_metadata(namespace_name, repo_name, image_id, created_date_str, comment, | ||||
|                               command, compat_json, parent_image_id=None): | ||||
|   # Old implementation: | ||||
|   # parent_image = get_repo_extended(namespace_name, repo_name, parent_image_id) | ||||
|   # model.image.set_image_metadata(image_id, namespace_name, repo_name, create_date_str, comment, command, compat_json, parent_image) | ||||
|   pass | ||||
| 
 | ||||
| 
 | ||||
| def storage_exists(namespace_name, repo_name, image_id): | ||||
|   repo_image = model.image.get_repo_image_extended(namespace_name, repo_name, image_id) | ||||
|   try: | ||||
|     layer_path = store.v1_image_layer_path(repo_image.storage.uuid) | ||||
|   except AttributeError: | ||||
|     return False | ||||
| 
 | ||||
|   if (store.exists(repo_image.storage.locations, layer_path) and not | ||||
|       repo_image.storage.uploading): | ||||
|     return True | ||||
|   return False | ||||
| 
 | ||||
| 
 | ||||
| def store_docker_v1_checksum(namespace_name, repo_name, image_id, checksum, content_checksum): | ||||
|   ## Old implementation: | ||||
|   # UPDATE repo_image.storage.content_checksum = content_checksum | ||||
|   # UPDATE repo_image.v1_checksum = checksum | ||||
|   pass | ||||
| 
 | ||||
| 
 | ||||
| def is_image_uploading(namespace_name, repo_name, image_id): | ||||
|   repo_image = model.image.get_repo_image_extended(namespace_name, repo_name, image_id) | ||||
|   if repo_image is None: | ||||
|     return False | ||||
|   return repo_image.storage.uploading | ||||
| 
 | ||||
| 
 | ||||
| def update_image_uploading(namespace_name, repo_name, image_id, is_uploading): | ||||
|   ## Old implementation: | ||||
|   # UPDATE repo_image.storage.uploading = is_uploading | ||||
|   pass | ||||
| 
 | ||||
| 
 | ||||
| def update_image_size(namespace_name, repo_name, image_id, size, uncompressed_size): | ||||
|   model.storage.set_image_storage_metadata( | ||||
|     image_id, | ||||
|     namespace_name, | ||||
|     repo_name, | ||||
|     size, | ||||
|     uncompressed_size, | ||||
|   ) | ||||
| 
 | ||||
| 
 | ||||
| def image_size(namespace_name, repo_name, image_id): | ||||
|   return repo_image.storage.image_size | ||||
| 
 | ||||
| 
 | ||||
| def create_bittorrent_pieces(namespace_name, repo_name, image_id, pieces_bytes): | ||||
|   repo_image = model.image.get_repo_image_extended(namespace_name, repo_name, image_id) | ||||
|   try: | ||||
|     model.storage.save_torrent_info( | ||||
|       repo_image.storage, | ||||
|       app.config['BITTORRENT_PIECE_SIZE'], | ||||
|       pieces_bytes | ||||
|     ) | ||||
|   except AttributeError: | ||||
|     pass | ||||
| 
 | ||||
| 
 | ||||
| def image_ancestry(namespace_name, repo_name, image_id): | ||||
|   try: | ||||
|     image = model.image.get_image_by_id(namespace, repository, image_id) | ||||
|   except model.InvalidImageException: | ||||
|     return None | ||||
| 
 | ||||
|   parents = model.image.get_parent_images(namespace, repository, image) | ||||
|   ancestry_docker_ids = [image.docker_image_id] | ||||
|   ancestry_docker_ids.extend([parent.docker_image_id for parent in parents]) | ||||
| 
 | ||||
| 
 | ||||
| def repository_exists(namespace_name, repo_name): | ||||
|   repo = model.repository.get_repository(namespace_name, repo_name) | ||||
|   return repo is not None | ||||
| 
 | ||||
| 
 | ||||
| def create_or_link_image(username, repo_name, image_id, storage_location): | ||||
|   pass | ||||
| 
 | ||||
| 
 | ||||
| def create_temp_hidden_tag(namespace_name, repo_name, expiration): | ||||
|   # was this code: | ||||
|   # model.tag.create_temporary_hidden_tag(repo, repo_image, | ||||
|   #                                       app.config['PUSH_TEMP_TAG_EXPIRATION_SEC']) | ||||
|   pass | ||||
		Reference in a new issue