2013-09-20 15:55:44 +00:00
|
|
|
import urllib
|
2015-09-24 15:42:56 +00:00
|
|
|
import re
|
2013-09-20 15:55:44 +00:00
|
|
|
|
2017-02-21 19:25:26 +00:00
|
|
|
import anunidecode # Don't listen to pylint's lies. This import is required for unidecode below.
|
|
|
|
|
2014-05-02 17:43:57 +00:00
|
|
|
from uuid import uuid4
|
2013-09-23 16:37:40 +00:00
|
|
|
|
2015-09-24 15:42:56 +00:00
|
|
|
REPOSITORY_NAME_REGEX = re.compile(r'^[\.a-zA-Z0-9_-]+$')
|
2013-09-20 15:55:44 +00:00
|
|
|
|
2016-07-26 00:50:35 +00:00
|
|
|
VALID_TAG_PATTERN = r'[\w][\w.-]{0,127}'
|
|
|
|
FULL_TAG_PATTERN = r'^[\w][\w.-]{0,127}$'
|
|
|
|
|
|
|
|
TAG_REGEX = re.compile(FULL_TAG_PATTERN)
|
2015-10-05 20:36:33 +00:00
|
|
|
TAG_ERROR = ('Invalid tag: must match [A-Za-z0-9_.-], NOT start with "." or "-", '
|
|
|
|
'and can contain 1-128 characters')
|
|
|
|
|
2016-07-26 00:50:35 +00:00
|
|
|
def escape_tag(tag, default='latest'):
|
|
|
|
""" Escapes a Docker tag, ensuring it matches the tag regular expression. """
|
|
|
|
if not tag:
|
|
|
|
return default
|
|
|
|
|
|
|
|
tag = re.sub(r'^[^\w]', '_', tag)
|
|
|
|
tag = re.sub(r'[^\w\.-]', '_', tag)
|
|
|
|
return tag[0:127]
|
|
|
|
|
2016-01-21 20:40:51 +00:00
|
|
|
|
|
|
|
def parse_namespace_repository(repository, library_namespace, include_tag=False):
|
2017-02-21 19:25:26 +00:00
|
|
|
repository = repository.encode('unidecode', 'ignore')
|
|
|
|
|
2013-09-20 15:55:44 +00:00
|
|
|
parts = repository.rstrip('/').split('/', 1)
|
|
|
|
if len(parts) < 2:
|
2016-01-21 20:40:51 +00:00
|
|
|
namespace = library_namespace
|
2013-09-20 15:55:44 +00:00
|
|
|
repository = parts[0]
|
|
|
|
else:
|
|
|
|
(namespace, repository) = parts
|
2014-12-18 21:01:59 +00:00
|
|
|
|
2015-01-05 19:51:00 +00:00
|
|
|
if include_tag:
|
2014-12-18 21:01:59 +00:00
|
|
|
parts = repository.split(':', 1)
|
|
|
|
if len(parts) < 2:
|
2015-01-05 19:51:00 +00:00
|
|
|
tag = 'latest'
|
2014-12-18 21:01:59 +00:00
|
|
|
else:
|
|
|
|
(repository, tag) = parts
|
|
|
|
|
2013-09-20 15:55:44 +00:00
|
|
|
repository = urllib.quote_plus(repository)
|
2015-01-05 19:51:00 +00:00
|
|
|
if include_tag:
|
2014-12-18 21:01:59 +00:00
|
|
|
return (namespace, repository, tag)
|
2013-09-20 15:55:44 +00:00
|
|
|
return (namespace, repository)
|
2013-09-23 16:37:40 +00:00
|
|
|
|
2013-11-20 21:13:03 +00:00
|
|
|
|
|
|
|
def format_robot_username(parent_username, robot_shortname):
|
|
|
|
return '%s+%s' % (parent_username, robot_shortname)
|
2014-03-27 22:33:13 +00:00
|
|
|
|
2014-05-02 17:43:57 +00:00
|
|
|
|
2014-03-27 22:33:13 +00:00
|
|
|
def parse_robot_username(robot_username):
|
2014-04-02 01:49:06 +00:00
|
|
|
if not '+' in robot_username:
|
|
|
|
return None
|
|
|
|
|
2014-03-27 22:33:13 +00:00
|
|
|
return robot_username.split('+', 2)
|
2014-05-02 17:43:57 +00:00
|
|
|
|
|
|
|
|
2014-09-11 19:45:41 +00:00
|
|
|
def parse_urn(urn):
|
|
|
|
""" Parses a URN, returning a pair that contains a list of URN
|
|
|
|
namespace parts, followed by the URN's unique ID.
|
|
|
|
"""
|
|
|
|
if not urn.startswith('urn:'):
|
|
|
|
return None
|
|
|
|
|
|
|
|
parts = urn[len('urn:'):].split(':')
|
|
|
|
return (parts[0:len(parts) - 1], parts[len(parts) - 1])
|
|
|
|
|
|
|
|
|
|
|
|
def parse_single_urn(urn):
|
|
|
|
""" Parses a URN, returning a pair that contains the first
|
|
|
|
namespace part, followed by the URN's unique ID.
|
|
|
|
"""
|
|
|
|
result = parse_urn(urn)
|
|
|
|
if result is None or not len(result[0]):
|
|
|
|
return None
|
|
|
|
|
|
|
|
return (result[0][0], result[1])
|
|
|
|
|
2014-05-02 17:43:57 +00:00
|
|
|
uuid_generator = lambda: str(uuid4())
|
|
|
|
|
|
|
|
|
|
|
|
def urn_generator(namespace_portions, id_generator=uuid_generator):
|
|
|
|
prefix = 'urn:%s:' % ':'.join(namespace_portions)
|
|
|
|
def generate_urn():
|
|
|
|
return prefix + id_generator()
|
|
|
|
return generate_urn
|