diff --git a/Dockerfile b/Dockerfile
index 6a3fa1f2f..7ba5e7501 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -38,19 +38,13 @@ ADD . .
# Run grunt
RUN cd grunt && grunt
-ADD conf/init/svlogd_config /svlogd_config
ADD conf/init/doupdatelimits.sh /etc/my_init.d/
-ADD conf/init/preplogsdir.sh /etc/my_init.d/
+ADD conf/init/copy_syslog_config.sh /etc/my_init.d/
ADD conf/init/runmigration.sh /etc/my_init.d/
-ADD conf/init/gunicorn_web /etc/service/gunicorn_web
-ADD conf/init/gunicorn_registry /etc/service/gunicorn_registry
-ADD conf/init/gunicorn_verbs /etc/service/gunicorn_verbs
-ADD conf/init/nginx /etc/service/nginx
-ADD conf/init/diffsworker /etc/service/diffsworker
-ADD conf/init/notificationworker /etc/service/notificationworker
-ADD conf/init/buildlogsarchiver /etc/service/buildlogsarchiver
-ADD conf/init/buildmanager /etc/service/buildmanager
+ADD conf/init/service/ /etc/service/
+
+RUN rm -rf /etc/service/syslog-forwarder
# Download any external libs.
RUN mkdir static/fonts static/ldn
diff --git a/app.py b/app.py
index 78243de75..33eabf1f8 100644
--- a/app.py
+++ b/app.py
@@ -39,7 +39,6 @@ OVERRIDE_CONFIG_YAML_FILENAME = 'conf/stack/config.yaml'
OVERRIDE_CONFIG_PY_FILENAME = 'conf/stack/config.py'
OVERRIDE_CONFIG_KEY = 'QUAY_OVERRIDE_CONFIG'
-LICENSE_FILENAME = 'conf/stack/license.enc'
CONFIG_PROVIDER = FileConfigProvider(OVERRIDE_CONFIG_DIRECTORY, 'config.yaml', 'config.py')
diff --git a/auth/auth.py b/auth/auth.py
index 79e07e3be..30e2f68db 100644
--- a/auth/auth.py
+++ b/auth/auth.py
@@ -114,7 +114,8 @@ def _process_basic_auth(auth):
logger.debug('Invalid robot or password for robot: %s' % credentials[0])
else:
- authenticated = authentication.verify_user(credentials[0], credentials[1])
+ (authenticated, error_message) = authentication.verify_user(credentials[0], credentials[1],
+ basic_auth=True)
if authenticated:
logger.debug('Successfully validated user: %s' % authenticated.username)
diff --git a/avatars/avatars.py b/avatars/avatars.py
index 40935df10..220cae9cb 100644
--- a/avatars/avatars.py
+++ b/avatars/avatars.py
@@ -1,4 +1,5 @@
import hashlib
+import math
class Avatar(object):
def __init__(self, app=None):
@@ -7,8 +8,7 @@ class Avatar(object):
def _init_app(self, app):
return AVATAR_CLASSES[app.config.get('AVATAR_KIND', 'Gravatar')](
- app.config['SERVER_HOSTNAME'],
- app.config['PREFERRED_URL_SCHEME'])
+ app.config['PREFERRED_URL_SCHEME'], app.config['AVATAR_COLORS'], app.config['HTTPCLIENT'])
def __getattr__(self, name):
return getattr(self.state, name, None)
@@ -16,48 +16,83 @@ class Avatar(object):
class BaseAvatar(object):
""" Base class for all avatar implementations. """
- def __init__(self, server_hostname, preferred_url_scheme):
- self.server_hostname = server_hostname
+ def __init__(self, preferred_url_scheme, colors, http_client):
self.preferred_url_scheme = preferred_url_scheme
+ self.colors = colors
+ self.http_client = http_client
- def get_url(self, email, size=16, name=None):
- """ Returns the full URL for viewing the avatar of the given email address, with
- an optional size.
+ def get_mail_html(self, name, email_or_id, size=16, kind='user'):
+ """ Returns the full HTML and CSS for viewing the avatar of the given name and email address,
+ with an optional size.
"""
- raise NotImplementedError
+ data = self.get_data(name, email_or_id, kind)
+ url = self._get_url(data['hash'], size) if kind != 'team' else None
+ font_size = size - 6
- def compute_hash(self, email, name=None):
- """ Computes the avatar hash for the given email address. If the name is given and a default
- avatar is being computed, the name can be used in place of the email address. """
- raise NotImplementedError
+ if url is not None:
+ # Try to load the gravatar. If we get a non-404 response, then we use it in place of
+ # the CSS avatar.
+ response = self.http_client.get(url)
+ if response.status_code == 200:
+ return """""" % (url, size, size, kind)
+
+ radius = '50%' if kind == 'team' else '0%'
+ letter = 'Ω' if kind == 'team' and data['name'] == 'owners' else data['name'].upper()[0]
+
+ return """
+
+ %s
+
+""" % (size, size, data['color'], font_size, size, radius, letter)
+
+ def get_data_for_user(self, user):
+ return self.get_data(user.username, user.email, 'robot' if user.robot else 'user')
+
+ def get_data_for_team(self, team):
+ return self.get_data(team.name, team.name, 'team')
+
+ def get_data_for_org(self, org):
+ return self.get_data(org.username, org.email, 'org')
+
+ def get_data(self, name, email_or_id, kind='user'):
+ """ Computes and returns the full data block for the avatar:
+ {
+ 'name': name,
+ 'hash': The gravatar hash, if any.
+ 'color': The color for the avatar
+ }
+ """
+ colors = self.colors
+ hash_value = hashlib.md5(email_or_id.strip().lower()).hexdigest()
+
+ byte_count = int(math.ceil(math.log(len(colors), 16)))
+ byte_data = hash_value[0:byte_count]
+ hash_color = colors[int(byte_data, 16) % len(colors)]
+
+ return {
+ 'name': name,
+ 'hash': hash_value,
+ 'color': hash_color,
+ 'kind': kind
+ }
+
+ def _get_url(self, hash_value, size):
+ """ Returns the URL for displaying the overlay avatar. """
+ return None
class GravatarAvatar(BaseAvatar):
""" Avatar system that uses gravatar for generating avatars. """
- def compute_hash(self, email, name=None):
- email = email or ""
- return hashlib.md5(email.strip().lower()).hexdigest()
-
- def get_url(self, email, size=16, name=None):
- computed = self.compute_hash(email, name=name)
- return '%s://www.gravatar.com/avatar/%s?d=identicon&size=%s' % (self.preferred_url_scheme,
- computed, size)
+ def _get_url(self, hash_value, size=16):
+ return '%s://www.gravatar.com/avatar/%s?d=404&size=%s' % (self.preferred_url_scheme,
+ hash_value, size)
class LocalAvatar(BaseAvatar):
""" Avatar system that uses the local system for generating avatars. """
- def compute_hash(self, email, name=None):
- email = email or ""
- if not name and not email:
- return ''
-
- prefix = name if name else email
- return prefix[0] + hashlib.md5(email.strip().lower()).hexdigest()
-
- def get_url(self, email, size=16, name=None):
- computed = self.compute_hash(email, name=name)
- return '%s://%s/avatar/%s?size=%s' % (self.preferred_url_scheme, self.server_hostname,
- computed, size)
-
+ pass
AVATAR_CLASSES = {
'gravatar': GravatarAvatar,
diff --git a/buildman/manager/ephemeral.py b/buildman/manager/ephemeral.py
index 40876cdf5..7e24094c8 100644
--- a/buildman/manager/ephemeral.py
+++ b/buildman/manager/ephemeral.py
@@ -157,8 +157,12 @@ class EphemeralBuilderManager(BaseManager):
etcd_host = self._manager_config.get('ETCD_HOST', '127.0.0.1')
etcd_port = self._manager_config.get('ETCD_PORT', 2379)
- etcd_auth = self._manager_config.get('ETCD_CERT_AND_KEY', None)
etcd_ca_cert = self._manager_config.get('ETCD_CA_CERT', None)
+
+ etcd_auth = self._manager_config.get('ETCD_CERT_AND_KEY', None)
+ if etcd_auth is not None:
+ etcd_auth = tuple(etcd_auth) # Convert YAML list to a tuple
+
etcd_protocol = 'http' if etcd_auth is None else 'https'
logger.debug('Connecting to etcd on %s:%s', etcd_host, etcd_port)
diff --git a/buildman/manager/executor.py b/buildman/manager/executor.py
index b548420f5..b6a293fc0 100644
--- a/buildman/manager/executor.py
+++ b/buildman/manager/executor.py
@@ -69,6 +69,7 @@ class BuilderExecutor(object):
manager_hostname=manager_hostname,
coreos_channel=coreos_channel,
worker_tag=self.executor_config['WORKER_TAG'],
+ logentries_token=self.executor_config.get('LOGENTRIES_TOKEN', None),
)
diff --git a/buildman/templates/cloudconfig.yaml b/buildman/templates/cloudconfig.yaml
index 51bb2f090..2f274361a 100644
--- a/buildman/templates/cloudconfig.yaml
+++ b/buildman/templates/cloudconfig.yaml
@@ -12,6 +12,9 @@ write_files:
REALM={{ realm }}
TOKEN={{ token }}
SERVER=wss://{{ manager_hostname }}
+ {% if logentries_token -%}
+ LOGENTRIES_TOKEN={{ logentries_token }}
+ {%- endif %}
coreos:
update:
@@ -19,6 +22,17 @@ coreos:
group: {{ coreos_channel }}
units:
+ - name: systemd-journal-gatewayd.socket
+ command: start
+ enable: yes
+ content: |
+ [Unit]
+ Description=Journal Gateway Service Socket
+ [Socket]
+ ListenStream=/var/run/journald.sock
+ Service=systemd-journal-gatewayd.service
+ [Install]
+ WantedBy=sockets.target
{{ dockersystemd('quay-builder',
'quay.io/coreos/registry-build-worker',
quay_username,
@@ -29,3 +43,10 @@ coreos:
flattened=True,
restart_policy='no'
) | indent(4) }}
+ {% if logentries_token -%}
+ {{ dockersystemd('builder-logs',
+ 'quay.io/kelseyhightower/journal-2-logentries',
+ extra_args='--env-file /root/overrides.list -v /run/journald.sock:/run/journald.sock',
+ after_units=['quay-builder.service']
+ ) | indent(4) }}
+ {%- endif %}
diff --git a/buildstatus/building.svg b/buildstatus/building.svg
index dc7aeae7b..8e26edf87 100644
--- a/buildstatus/building.svg
+++ b/buildstatus/building.svg
@@ -1 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/buildstatus/failed.svg b/buildstatus/failed.svg
index 069d9f4e4..cc74c2381 100644
--- a/buildstatus/failed.svg
+++ b/buildstatus/failed.svg
@@ -1 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/buildstatus/none.svg b/buildstatus/none.svg
index 3c31d29b1..0e4680acf 100644
--- a/buildstatus/none.svg
+++ b/buildstatus/none.svg
@@ -1 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/buildstatus/ready.svg b/buildstatus/ready.svg
index 111262e3b..50e451a01 100644
--- a/buildstatus/ready.svg
+++ b/buildstatus/ready.svg
@@ -1 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/conf/http-base.conf b/conf/http-base.conf
index 8b7ff9e0b..3c3d57372 100644
--- a/conf/http-base.conf
+++ b/conf/http-base.conf
@@ -4,7 +4,7 @@ types_hash_max_size 2048;
include /usr/local/nginx/conf/mime.types.default;
default_type application/octet-stream;
-access_log /var/log/nginx/nginx.access.log;
+access_log /dev/stdout;
sendfile on;
gzip on;
diff --git a/conf/init/buildlogsarchiver/log/run b/conf/init/buildlogsarchiver/log/run
deleted file mode 100755
index c35fb1fb9..000000000
--- a/conf/init/buildlogsarchiver/log/run
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/sh
-exec svlogd /var/log/buildlogsarchiver/
\ No newline at end of file
diff --git a/conf/init/buildmanager/log/run b/conf/init/buildmanager/log/run
deleted file mode 100755
index 1dd4c3fef..000000000
--- a/conf/init/buildmanager/log/run
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/sh
-exec svlogd /var/log/buildmanager/
\ No newline at end of file
diff --git a/conf/init/copy_syslog_config.sh b/conf/init/copy_syslog_config.sh
new file mode 100755
index 000000000..7acd62b6b
--- /dev/null
+++ b/conf/init/copy_syslog_config.sh
@@ -0,0 +1,6 @@
+#! /bin/sh
+
+if [ -e /conf/stack/syslog-ng-extra.conf ]
+then
+ cp /conf/stack/syslog-ng-extra.conf /etc/syslog-ng/conf.d/
+fi
diff --git a/conf/init/diffsworker/log/run b/conf/init/diffsworker/log/run
deleted file mode 100755
index 066f7415a..000000000
--- a/conf/init/diffsworker/log/run
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/sh
-exec svlogd /var/log/diffsworker/
\ No newline at end of file
diff --git a/conf/init/gunicorn_registry/log/run b/conf/init/gunicorn_registry/log/run
deleted file mode 100755
index 1896ef533..000000000
--- a/conf/init/gunicorn_registry/log/run
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/sh
-exec svlogd /var/log/gunicorn_registry/
\ No newline at end of file
diff --git a/conf/init/gunicorn_verbs/log/run b/conf/init/gunicorn_verbs/log/run
deleted file mode 100755
index 2b061e193..000000000
--- a/conf/init/gunicorn_verbs/log/run
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/sh
-exec svlogd /var/log/gunicorn_verbs/
\ No newline at end of file
diff --git a/conf/init/gunicorn_web/log/run b/conf/init/gunicorn_web/log/run
deleted file mode 100755
index de17cdf61..000000000
--- a/conf/init/gunicorn_web/log/run
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/sh
-exec svlogd /var/log/gunicorn_web/
\ No newline at end of file
diff --git a/conf/init/nginx/log/run b/conf/init/nginx/log/run
deleted file mode 100755
index 30476f6e6..000000000
--- a/conf/init/nginx/log/run
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/sh
-exec svlogd /var/log/nginx/
\ No newline at end of file
diff --git a/conf/init/notificationworker/log/run b/conf/init/notificationworker/log/run
deleted file mode 100755
index 46f8431a7..000000000
--- a/conf/init/notificationworker/log/run
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/sh
-exec svlogd -t /var/log/notificationworker/
\ No newline at end of file
diff --git a/conf/init/preplogsdir.sh b/conf/init/preplogsdir.sh
deleted file mode 100755
index 93c3ee5af..000000000
--- a/conf/init/preplogsdir.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#! /bin/sh
-
-echo 'Linking config files to logs directory'
-for svc in `ls /etc/service/`
-do
- if [ ! -d /var/log/$svc ]; then
- mkdir -p /var/log/$svc
- ln -s /svlogd_config /var/log/$svc/config
- fi
-done
diff --git a/conf/init/service/buildlogsarchiver/log/run b/conf/init/service/buildlogsarchiver/log/run
new file mode 100755
index 000000000..3bcd9ba8a
--- /dev/null
+++ b/conf/init/service/buildlogsarchiver/log/run
@@ -0,0 +1,2 @@
+#!/bin/sh
+exec logger -i -t buildlogsarchiver
\ No newline at end of file
diff --git a/conf/init/buildlogsarchiver/run b/conf/init/service/buildlogsarchiver/run
similarity index 100%
rename from conf/init/buildlogsarchiver/run
rename to conf/init/service/buildlogsarchiver/run
diff --git a/conf/init/service/buildmanager/log/run b/conf/init/service/buildmanager/log/run
new file mode 100755
index 000000000..b35e28af9
--- /dev/null
+++ b/conf/init/service/buildmanager/log/run
@@ -0,0 +1,2 @@
+#!/bin/sh
+exec logger -i -t buildmanager
\ No newline at end of file
diff --git a/conf/init/buildmanager/run b/conf/init/service/buildmanager/run
similarity index 100%
rename from conf/init/buildmanager/run
rename to conf/init/service/buildmanager/run
diff --git a/conf/init/service/diffsworker/log/run b/conf/init/service/diffsworker/log/run
new file mode 100755
index 000000000..8e3dca5f3
--- /dev/null
+++ b/conf/init/service/diffsworker/log/run
@@ -0,0 +1,2 @@
+#!/bin/sh
+exec logger -i -t diffsworker
\ No newline at end of file
diff --git a/conf/init/diffsworker/run b/conf/init/service/diffsworker/run
similarity index 100%
rename from conf/init/diffsworker/run
rename to conf/init/service/diffsworker/run
diff --git a/conf/init/service/gunicorn_registry/log/run b/conf/init/service/gunicorn_registry/log/run
new file mode 100755
index 000000000..5b5b37af9
--- /dev/null
+++ b/conf/init/service/gunicorn_registry/log/run
@@ -0,0 +1,2 @@
+#!/bin/sh
+exec logger -i -t gunicorn_registry
\ No newline at end of file
diff --git a/conf/init/gunicorn_registry/run b/conf/init/service/gunicorn_registry/run
similarity index 100%
rename from conf/init/gunicorn_registry/run
rename to conf/init/service/gunicorn_registry/run
diff --git a/conf/init/service/gunicorn_verbs/log/run b/conf/init/service/gunicorn_verbs/log/run
new file mode 100755
index 000000000..d0bc335d7
--- /dev/null
+++ b/conf/init/service/gunicorn_verbs/log/run
@@ -0,0 +1,2 @@
+#!/bin/sh
+exec logger -i -t gunicorn_verbs
\ No newline at end of file
diff --git a/conf/init/gunicorn_verbs/run b/conf/init/service/gunicorn_verbs/run
similarity index 100%
rename from conf/init/gunicorn_verbs/run
rename to conf/init/service/gunicorn_verbs/run
diff --git a/conf/init/service/gunicorn_web/log/run b/conf/init/service/gunicorn_web/log/run
new file mode 100755
index 000000000..c96d365a5
--- /dev/null
+++ b/conf/init/service/gunicorn_web/log/run
@@ -0,0 +1,2 @@
+#!/bin/sh
+exec logger -i -t gunicorn_web
\ No newline at end of file
diff --git a/conf/init/gunicorn_web/run b/conf/init/service/gunicorn_web/run
similarity index 100%
rename from conf/init/gunicorn_web/run
rename to conf/init/service/gunicorn_web/run
diff --git a/conf/init/service/nginx/log/run b/conf/init/service/nginx/log/run
new file mode 100755
index 000000000..168af6d3e
--- /dev/null
+++ b/conf/init/service/nginx/log/run
@@ -0,0 +1,2 @@
+#!/bin/sh
+exec logger -i -t nginx
\ No newline at end of file
diff --git a/conf/init/nginx/run b/conf/init/service/nginx/run
similarity index 100%
rename from conf/init/nginx/run
rename to conf/init/service/nginx/run
diff --git a/conf/init/service/notificationworker/log/run b/conf/init/service/notificationworker/log/run
new file mode 100755
index 000000000..49747f3ce
--- /dev/null
+++ b/conf/init/service/notificationworker/log/run
@@ -0,0 +1,2 @@
+#!/bin/sh
+exec logger -i -t notificationworker
\ No newline at end of file
diff --git a/conf/init/notificationworker/run b/conf/init/service/notificationworker/run
similarity index 100%
rename from conf/init/notificationworker/run
rename to conf/init/service/notificationworker/run
diff --git a/conf/init/svlogd_config b/conf/init/svlogd_config
deleted file mode 100644
index 2ccee1e0c..000000000
--- a/conf/init/svlogd_config
+++ /dev/null
@@ -1,3 +0,0 @@
-s100000000
-t86400
-n4
diff --git a/conf/proxy-protocol.conf b/conf/proxy-protocol.conf
index 5897f1839..ba00507f5 100644
--- a/conf/proxy-protocol.conf
+++ b/conf/proxy-protocol.conf
@@ -5,4 +5,4 @@ real_ip_header proxy_protocol;
log_format elb_pp '$proxy_protocol_addr - $remote_user [$time_local] '
'"$request" $status $body_bytes_sent '
'"$http_referer" "$http_user_agent"';
-access_log /var/log/nginx/nginx.access.log elb_pp;
+access_log /dev/stdout elb_pp;
diff --git a/conf/root-base.conf b/conf/root-base.conf
index 02c004564..357e6ed03 100644
--- a/conf/root-base.conf
+++ b/conf/root-base.conf
@@ -1,7 +1,7 @@
# vim: ft=nginx
pid /tmp/nginx.pid;
-error_log /var/log/nginx/nginx.error.log;
+error_log /dev/stdout;
worker_processes 2;
worker_priority -10;
diff --git a/conf/server-base.conf b/conf/server-base.conf
index 2f03b11b2..3853fbccf 100644
--- a/conf/server-base.conf
+++ b/conf/server-base.conf
@@ -1,6 +1,5 @@
# vim: ft=nginx
-client_body_temp_path /var/log/nginx/client_body 1 2;
server_name _;
keepalive_timeout 5;
@@ -36,7 +35,7 @@ location /v1/repositories/ {
proxy_pass http://registry_app_server;
proxy_read_timeout 2000;
- proxy_temp_path /var/log/nginx/proxy_temp 1 2;
+ proxy_temp_path /tmp 1 2;
limit_req zone=repositories burst=10;
}
@@ -47,7 +46,7 @@ location /v1/ {
proxy_request_buffering off;
proxy_pass http://registry_app_server;
- proxy_temp_path /var/log/nginx/proxy_temp 1 2;
+ proxy_temp_path /tmp 1 2;
client_max_body_size 20G;
}
@@ -58,7 +57,7 @@ location /c1/ {
proxy_request_buffering off;
proxy_pass http://verbs_app_server;
- proxy_temp_path /var/log/nginx/proxy_temp 1 2;
+ proxy_temp_path /tmp 1 2;
limit_req zone=verbs burst=10;
}
diff --git a/config.py b/config.py
index 339ffca34..6fe1c4042 100644
--- a/config.py
+++ b/config.py
@@ -45,8 +45,6 @@ class DefaultConfig(object):
PREFERRED_URL_SCHEME = 'http'
SERVER_HOSTNAME = 'localhost:5000'
- AVATAR_KIND = 'local'
-
REGISTRY_TITLE = 'CoreOS Enterprise Registry'
REGISTRY_TITLE_SHORT = 'Enterprise Registry'
@@ -165,6 +163,10 @@ class DefaultConfig(object):
# Feature Flag: Whether users can be renamed
FEATURE_USER_RENAME = False
+ # Feature Flag: Whether non-encrypted passwords (as opposed to encrypted tokens) can be used for
+ # basic auth.
+ FEATURE_REQUIRE_ENCRYPTED_BASIC_AUTH = False
+
BUILD_MANAGER = ('enterprise', {})
DISTRIBUTED_STORAGE_CONFIG = {
@@ -201,3 +203,11 @@ class DefaultConfig(object):
# Signed registry grant token expiration in seconds
SIGNED_GRANT_EXPIRATION_SEC = 60 * 60 * 24 # One day to complete a push/pull
+
+ # The various avatar background colors.
+ AVATAR_KIND = 'local'
+ AVATAR_COLORS = ['#969696', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c', '#98df8a', '#d62728',
+ '#ff9896', '#9467bd', '#c5b0d5', '#8c564b', '#c49c94', '#e377c2', '#f7b6d2',
+ '#7f7f7f', '#c7c7c7', '#bcbd22', '#1f77b4', '#17becf', '#9edae5', '#393b79',
+ '#5254a3', '#6b6ecf', '#9c9ede', '#9ecae1', '#31a354', '#b5cf6b', '#a1d99b',
+ '#8c6d31', '#ad494a', '#e7ba52', '#a55194']
diff --git a/data/database.py b/data/database.py
index 82a179325..837309a42 100644
--- a/data/database.py
+++ b/data/database.py
@@ -139,7 +139,7 @@ def uuid_generator():
return str(uuid.uuid4())
-_get_epoch_timestamp = lambda: int(time.time())
+get_epoch_timestamp = lambda: int(time.time())
def close_db_filter(_):
@@ -167,6 +167,17 @@ class BaseModel(ReadSlaveModel):
database = db
read_slaves = (read_slave,)
+ def __getattribute__(self, name):
+ """ Adds _id accessors so that foreign key field IDs can be looked up without making
+ a database roundtrip.
+ """
+ if name.endswith('_id'):
+ field_name = name[0:len(name) - 3]
+ if field_name in self._meta.fields:
+ return self._data.get(field_name)
+
+ return super(BaseModel, self).__getattribute__(name)
+
class User(BaseModel):
uuid = CharField(default=uuid_generator, max_length=36, null=True)
@@ -484,7 +495,7 @@ class RepositoryTag(BaseModel):
name = CharField()
image = ForeignKeyField(Image)
repository = ForeignKeyField(Repository)
- lifetime_start_ts = IntegerField(default=_get_epoch_timestamp)
+ lifetime_start_ts = IntegerField(default=get_epoch_timestamp)
lifetime_end_ts = IntegerField(null=True, index=True)
hidden = BooleanField(default=False)
@@ -493,6 +504,9 @@ class RepositoryTag(BaseModel):
read_slaves = (read_slave,)
indexes = (
(('repository', 'name'), False),
+
+ # This unique index prevents deadlocks when concurrently moving and deleting tags
+ (('repository', 'name', 'lifetime_end_ts'), True),
)
diff --git a/data/migrations/migration.sh b/data/migrations/migration.sh
index 17901e130..1140c02b8 100755
--- a/data/migrations/migration.sh
+++ b/data/migrations/migration.sh
@@ -19,7 +19,7 @@ up_mysql() {
down_mysql() {
docker kill mysql
- docker rm mysql
+ docker rm -v mysql
}
up_mariadb() {
@@ -36,24 +36,24 @@ up_mariadb() {
down_mariadb() {
docker kill mariadb
- docker rm mariadb
+ docker rm -v mariadb
}
up_percona() {
# Run a SQL database on port 3306 inside of Docker.
- docker run --name percona -p 3306:3306 -d dockerfile/percona
+ docker run --name percona -p 3306:3306 -d percona
# Sleep for 10s
echo 'Sleeping for 10...'
sleep 10
# Add the daabase to mysql.
- docker run --rm --link percona:percona dockerfile/percona sh -c 'echo "create database genschema" | mysql -h $PERCONA_PORT_3306_TCP_ADDR'
+ docker run --rm --link percona:percona percona sh -c 'echo "create database genschema" | mysql -h $PERCONA_PORT_3306_TCP_ADDR'
}
down_percona() {
docker kill percona
- docker rm percona
+ docker rm -v percona
}
up_postgres() {
@@ -70,7 +70,7 @@ up_postgres() {
down_postgres() {
docker kill postgres
- docker rm postgres
+ docker rm -v postgres
}
gen_migrate() {
diff --git a/data/migrations/versions/2b4dc0818a5e_add_a_unique_index_to_prevent_deadlocks_.py b/data/migrations/versions/2b4dc0818a5e_add_a_unique_index_to_prevent_deadlocks_.py
new file mode 100644
index 000000000..8efe0c123
--- /dev/null
+++ b/data/migrations/versions/2b4dc0818a5e_add_a_unique_index_to_prevent_deadlocks_.py
@@ -0,0 +1,26 @@
+"""Add a unique index to prevent deadlocks with tags.
+
+Revision ID: 2b4dc0818a5e
+Revises: 2b2529fd23ff
+Create Date: 2015-03-20 23:37:10.558179
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '2b4dc0818a5e'
+down_revision = '2b2529fd23ff'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.create_index('repositorytag_repository_id_name_lifetime_end_ts', 'repositorytag', ['repository_id', 'name', 'lifetime_end_ts'], unique=True)
+ ### end Alembic commands ###
+
+
+def downgrade(tables):
+ ### commands auto generated by Alembic - please adjust! ###
+ op.drop_index('repositorytag_repository_id_name_lifetime_end_ts', table_name='repositorytag')
+ ### end Alembic commands ###
diff --git a/data/model/legacy.py b/data/model/legacy.py
index 2d076c5cc..7ec27eed9 100644
--- a/data/model/legacy.py
+++ b/data/model/legacy.py
@@ -18,7 +18,7 @@ from data.database import (User, Repository, Image, AccessToken, Role, Repositor
DerivedImageStorage, ImageStorageTransformation, random_string_generator,
db, BUILD_PHASE, QuayUserField, ImageStorageSignature, QueueItem,
ImageStorageSignatureKind, validate_database_url, db_for_update,
- AccessTokenKind, Star)
+ AccessTokenKind, Star, get_epoch_timestamp)
from peewee import JOIN_LEFT_OUTER, fn
from util.validation import (validate_username, validate_email, validate_password,
INVALID_PASSWORD_MESSAGE)
@@ -310,11 +310,54 @@ def _list_entity_robots(entity_name):
.where(User.robot == True, User.username ** (entity_name + '+%')))
-def list_entity_robot_tuples(entity_name):
- return (_list_entity_robots(entity_name)
- .select(User.username, FederatedLogin.service_ident)
- .tuples())
+class _TupleWrapper(object):
+ def __init__(self, data, fields):
+ self._data = data
+ self._fields = fields
+ def get(self, field):
+ return self._data[self._fields.index(field.name + ':' + field.model_class.__name__)]
+
+
+class TupleSelector(object):
+ """ Helper class for selecting tuples from a peewee query and easily accessing
+ them as if they were objects.
+ """
+ def __init__(self, query, fields):
+ self._query = query.select(*fields).tuples()
+ self._fields = [field.name + ':' + field.model_class.__name__ for field in fields]
+
+ def __iter__(self):
+ return self._build_iterator()
+
+ def _build_iterator(self):
+ for tuple_data in self._query:
+ yield _TupleWrapper(tuple_data, self._fields)
+
+
+
+def list_entity_robot_permission_teams(entity_name):
+ query = (_list_entity_robots(entity_name)
+ .join(RepositoryPermission, JOIN_LEFT_OUTER,
+ on=(RepositoryPermission.user == FederatedLogin.user))
+ .join(Repository, JOIN_LEFT_OUTER)
+ .switch(User)
+ .join(TeamMember, JOIN_LEFT_OUTER)
+ .join(Team, JOIN_LEFT_OUTER))
+
+ fields = [User.username, FederatedLogin.service_ident, Repository.name, Team.name]
+ return TupleSelector(query, fields)
+
+
+def list_robot_permissions(robot_name):
+ return (RepositoryPermission.select(RepositoryPermission, User, Repository)
+ .join(Repository)
+ .join(Visibility)
+ .switch(RepositoryPermission)
+ .join(Role)
+ .switch(RepositoryPermission)
+ .join(User)
+ .where(User.username == robot_name, User.robot == True))
def convert_user_to_organization(user, admin_user):
# Change the user to an organization.
@@ -636,6 +679,73 @@ def get_user_or_org_by_customer_id(customer_id):
except User.DoesNotExist:
return None
+def get_matching_user_namespaces(namespace_prefix, username, limit=10):
+ query = (Repository
+ .select()
+ .join(Namespace, on=(Repository.namespace_user == Namespace.id))
+ .switch(Repository)
+ .join(Visibility)
+ .switch(Repository)
+ .join(RepositoryPermission, JOIN_LEFT_OUTER)
+ .where(Namespace.username ** (namespace_prefix + '%'))
+ .group_by(Repository.namespace_user, Repository))
+
+ count = 0
+ namespaces = {}
+ for repo in _filter_to_repos_for_user(query, username):
+ if not repo.namespace_user.username in namespaces:
+ namespaces[repo.namespace_user.username] = repo.namespace_user
+ count = count + 1
+ if count >= limit:
+ break
+
+ return namespaces.values()
+
+def get_matching_user_teams(team_prefix, user, limit=10):
+ query = (Team.select()
+ .join(User)
+ .switch(Team)
+ .join(TeamMember)
+ .where(TeamMember.user == user, Team.name ** (team_prefix + '%'))
+ .distinct(Team.id)
+ .limit(limit))
+
+ return query
+
+
+def get_matching_robots(name_prefix, username, limit=10):
+ admined_orgs = (get_user_organizations(username)
+ .switch(Team)
+ .join(TeamRole)
+ .where(TeamRole.name == 'admin'))
+
+ prefix_checks = False
+
+ for org in admined_orgs:
+ prefix_checks = prefix_checks | (User.username ** (org.username + '+' + name_prefix + '%'))
+
+ prefix_checks = prefix_checks | (User.username ** (username + '+' + name_prefix + '%'))
+
+ return User.select().where(prefix_checks).limit(limit)
+
+
+def get_matching_admined_teams(team_prefix, user, limit=10):
+ admined_orgs = (get_user_organizations(user.username)
+ .switch(Team)
+ .join(TeamRole)
+ .where(TeamRole.name == 'admin'))
+
+ query = (Team.select()
+ .join(User)
+ .switch(Team)
+ .join(TeamMember)
+ .where(Team.name ** (team_prefix + '%'), Team.organization << (admined_orgs))
+ .distinct(Team.id)
+ .limit(limit))
+
+ return query
+
+
def get_matching_teams(team_prefix, organization):
query = Team.select().where(Team.name ** (team_prefix + '%'),
Team.organization == organization)
@@ -654,13 +764,13 @@ def get_matching_users(username_prefix, robot_namespace=None,
(User.robot == True)))
query = (User
- .select(User.username, User.robot)
- .group_by(User.username, User.robot)
+ .select(User.username, User.email, User.robot)
+ .group_by(User.username, User.email, User.robot)
.where(direct_user_query))
if organization:
query = (query
- .select(User.username, User.robot, fn.Sum(Team.id))
+ .select(User.username, User.email, User.robot, fn.Sum(Team.id))
.join(TeamMember, JOIN_LEFT_OUTER)
.join(Team, JOIN_LEFT_OUTER, on=((Team.id == TeamMember.team) &
(Team.organization == organization))))
@@ -669,9 +779,11 @@ def get_matching_users(username_prefix, robot_namespace=None,
class MatchingUserResult(object):
def __init__(self, *args):
self.username = args[0]
- self.is_robot = args[1]
+ self.email = args[1]
+ self.robot = args[2]
+
if organization:
- self.is_org_member = (args[2] != None)
+ self.is_org_member = (args[3] != None)
else:
self.is_org_member = None
@@ -787,7 +899,7 @@ def get_visible_repository_count(username=None, include_public=True,
def get_visible_repositories(username=None, include_public=True, page=None,
- limit=None, sort=False, namespace=None):
+ limit=None, sort=False, namespace=None, namespace_only=False):
query = _visible_repository_query(username=username, include_public=include_public, page=page,
limit=limit, namespace=namespace,
select_models=[Repository, Namespace, Visibility])
@@ -798,6 +910,9 @@ def get_visible_repositories(username=None, include_public=True, page=None,
if limit:
query = query.limit(limit)
+ if namespace and namespace_only:
+ query = query.where(Namespace.username == namespace)
+
return query
@@ -876,11 +991,73 @@ def _get_public_repo_visibility():
return _public_repo_visibility_cache
-def get_matching_repositories(repo_term, username=None):
+def get_sorted_matching_repositories(prefix, only_public, checker, limit=10):
+ """ Returns repositories matching the given prefix string and passing the given checker
+ function.
+ """
+
+ last_week = datetime.now() - timedelta(weeks=1)
+ results = []
+ existing_ids = []
+
+ def get_search_results(search_clause, with_count):
+ if len(results) >= limit:
+ return
+
+ selected = [Repository, Namespace]
+ if with_count:
+ selected.append(fn.Count(LogEntry.id).alias('count'))
+
+ query = (Repository.select(*selected)
+ .join(Namespace, JOIN_LEFT_OUTER, on=(Namespace.id == Repository.namespace_user))
+ .switch(Repository)
+ .where(search_clause)
+ .group_by(Repository, Namespace))
+
+ if only_public:
+ query = query.where(Repository.visibility == _get_public_repo_visibility())
+
+ if existing_ids:
+ query = query.where(~(Repository.id << existing_ids))
+
+ if with_count:
+ query = (query.join(LogEntry, JOIN_LEFT_OUTER)
+ .where(LogEntry.datetime >= last_week)
+ .order_by(fn.Count(LogEntry.id).desc()))
+
+ for result in query:
+ if len(results) >= limit:
+ return results
+
+ # Note: We compare IDs here, instead of objects, because calling .visibility on the
+ # Repository will kick off a new SQL query to retrieve that visibility enum value. We don't
+ # join the visibility table in SQL, as well, because it is ungodly slow in MySQL :-/
+ result.is_public = result.visibility_id == _get_public_repo_visibility().id
+ result.count = result.count if with_count else 0
+
+ if not checker(result):
+ continue
+
+ results.append(result)
+ existing_ids.append(result.id)
+
+ # For performance reasons, we conduct the repo name and repo namespace searches on their
+ # own, and with and without counts on their own. This also affords us the ability to give
+ # higher precedence to repository names matching over namespaces, which is semantically correct.
+ get_search_results((Repository.name ** (prefix + '%')), with_count=True)
+ get_search_results((Repository.name ** (prefix + '%')), with_count=False)
+
+ get_search_results((Namespace.username ** (prefix + '%')), with_count=True)
+ get_search_results((Namespace.username ** (prefix + '%')), with_count=False)
+
+ return results
+
+
+def get_matching_repositories(repo_term, username=None, limit=10, include_public=True):
namespace_term = repo_term
name_term = repo_term
- visible = get_visible_repositories(username)
+ visible = get_visible_repositories(username, include_public=include_public)
search_clauses = (Repository.name ** ('%' + name_term + '%') |
Namespace.username ** ('%' + namespace_term + '%'))
@@ -894,8 +1071,7 @@ def get_matching_repositories(repo_term, username=None):
search_clauses = (Repository.name ** ('%' + name_term + '%') &
Namespace.username ** ('%' + namespace_term + '%'))
- final = visible.where(search_clauses).limit(10)
- return list(final)
+ return visible.where(search_clauses).limit(limit)
def change_password(user, new_password):
@@ -905,6 +1081,7 @@ def change_password(user, new_password):
pw_hash = hash_password(new_password)
user.invalid_login_attempts = 0
user.password_hash = pw_hash
+ user.uuid = str(uuid4())
user.save()
# Remove any password required notifications for the user.
@@ -1038,7 +1215,8 @@ def get_all_repo_teams(namespace_name, repository_name):
def get_all_repo_users(namespace_name, repository_name):
- return (RepositoryPermission.select(User.username, User.robot, Role.name, RepositoryPermission)
+ return (RepositoryPermission.select(User.username, User.email, User.robot, Role.name,
+ RepositoryPermission)
.join(User)
.switch(RepositoryPermission)
.join(Role)
@@ -1577,9 +1755,21 @@ def get_repository_images(namespace_name, repository_name):
return _get_repository_images_base(namespace_name, repository_name, lambda q: q)
-def _tag_alive(query):
+def _tag_alive(query, now_ts=None):
+ if now_ts is None:
+ now_ts = get_epoch_timestamp()
return query.where((RepositoryTag.lifetime_end_ts >> None) |
- (RepositoryTag.lifetime_end_ts > int(time.time())))
+ (RepositoryTag.lifetime_end_ts > now_ts))
+
+
+def list_repository_tag_history(repository, limit=100):
+ query = (RepositoryTag
+ .select(RepositoryTag, Image)
+ .join(Image)
+ .where(RepositoryTag.repository == repository)
+ .order_by(RepositoryTag.lifetime_start_ts.desc())
+ .limit(limit))
+ return query
def list_repository_tags(namespace_name, repository_name, include_hidden=False,
@@ -1610,14 +1800,19 @@ def list_repository_tags(namespace_name, repository_name, include_hidden=False,
def _garbage_collect_tags(namespace_name, repository_name):
# We do this without using a join to prevent holding read locks on the repository table
repo = _get_repository(namespace_name, repository_name)
- now = int(time.time())
+ expired_time = get_epoch_timestamp() - repo.namespace_user.removed_tag_expiration_s
- (RepositoryTag
- .delete()
- .where(RepositoryTag.repository == repo,
- ~(RepositoryTag.lifetime_end_ts >> None),
- (RepositoryTag.lifetime_end_ts + repo.namespace_user.removed_tag_expiration_s) <= now)
- .execute())
+ tags_to_delete = list(RepositoryTag
+ .select(RepositoryTag.id)
+ .where(RepositoryTag.repository == repo,
+ ~(RepositoryTag.lifetime_end_ts >> None),
+ (RepositoryTag.lifetime_end_ts <= expired_time))
+ .order_by(RepositoryTag.id))
+ if len(tags_to_delete) > 0:
+ (RepositoryTag
+ .delete()
+ .where(RepositoryTag.id << tags_to_delete)
+ .execute())
def garbage_collect_repository(namespace_name, repository_name):
@@ -1713,46 +1908,39 @@ def _garbage_collect_storage(storage_id_whitelist):
logger.debug('Garbage collecting storages from candidates: %s', storage_id_whitelist)
with config.app_config['DB_TRANSACTION_FACTORY'](db):
# Track all of the data that should be removed from blob storage
- placements_to_remove = orphaned_storage_query(ImageStoragePlacement
- .select(ImageStoragePlacement,
- ImageStorage,
- ImageStorageLocation)
- .join(ImageStorageLocation)
- .switch(ImageStoragePlacement)
- .join(ImageStorage),
- storage_id_whitelist,
- (ImageStorage, ImageStoragePlacement,
- ImageStorageLocation))
+ placements_to_remove = list(orphaned_storage_query(ImageStoragePlacement
+ .select(ImageStoragePlacement,
+ ImageStorage,
+ ImageStorageLocation)
+ .join(ImageStorageLocation)
+ .switch(ImageStoragePlacement)
+ .join(ImageStorage),
+ storage_id_whitelist,
+ (ImageStorage, ImageStoragePlacement,
+ ImageStorageLocation)))
- paths_to_remove = placements_query_to_paths_set(placements_to_remove.clone())
+ paths_to_remove = placements_query_to_paths_set(placements_to_remove)
# Remove the placements for orphaned storages
- placements_subquery = (placements_to_remove
- .clone()
- .select(ImageStoragePlacement.id)
- .alias('ps'))
- inner = (ImageStoragePlacement
- .select(placements_subquery.c.id)
- .from_(placements_subquery))
- placements_removed = (ImageStoragePlacement
- .delete()
- .where(ImageStoragePlacement.id << inner)
- .execute())
- logger.debug('Removed %s image storage placements', placements_removed)
+ if len(placements_to_remove) > 0:
+ placement_ids_to_remove = [placement.id for placement in placements_to_remove]
+ placements_removed = (ImageStoragePlacement
+ .delete()
+ .where(ImageStoragePlacement.id << placement_ids_to_remove)
+ .execute())
+ logger.debug('Removed %s image storage placements', placements_removed)
# Remove all orphaned storages
# The comma after ImageStorage.id is VERY important, it makes it a tuple, which is a sequence
- orphaned_storages = orphaned_storage_query(ImageStorage.select(ImageStorage.id),
- storage_id_whitelist,
- (ImageStorage.id,)).alias('osq')
- orphaned_storage_inner = (ImageStorage
- .select(orphaned_storages.c.id)
- .from_(orphaned_storages))
- storages_removed = (ImageStorage
- .delete()
- .where(ImageStorage.id << orphaned_storage_inner)
- .execute())
- logger.debug('Removed %s image storage records', storages_removed)
+ orphaned_storages = list(orphaned_storage_query(ImageStorage.select(ImageStorage.id),
+ storage_id_whitelist,
+ (ImageStorage.id,)).alias('osq'))
+ if len(orphaned_storages) > 0:
+ storages_removed = (ImageStorage
+ .delete()
+ .where(ImageStorage.id << orphaned_storages)
+ .execute())
+ logger.debug('Removed %s image storage records', storages_removed)
# We are going to make the conscious decision to not delete image storage blobs inside
# transactions.
@@ -1803,40 +1991,34 @@ def get_parent_images(namespace_name, repository_name, image_obj):
def create_or_update_tag(namespace_name, repository_name, tag_name,
tag_docker_image_id):
+ try:
+ repo = _get_repository(namespace_name, repository_name)
+ except Repository.DoesNotExist:
+ raise DataModelException('Invalid repository %s/%s' % (namespace_name, repository_name))
+
+ now_ts = get_epoch_timestamp()
with config.app_config['DB_TRANSACTION_FACTORY'](db):
try:
- repo = _get_repository(namespace_name, repository_name)
- except Repository.DoesNotExist:
- raise DataModelException('Invalid repository %s/%s' % (namespace_name, repository_name))
+ tag = db_for_update(_tag_alive(RepositoryTag
+ .select()
+ .where(RepositoryTag.repository == repo,
+ RepositoryTag.name == tag_name), now_ts)).get()
+ tag.lifetime_end_ts = now_ts
+ tag.save()
+ except RepositoryTag.DoesNotExist:
+ pass
try:
image = Image.get(Image.docker_image_id == tag_docker_image_id, Image.repository == repo)
except Image.DoesNotExist:
raise DataModelException('Invalid image with id: %s' % tag_docker_image_id)
- now_ts = int(time.time())
-
- created = RepositoryTag.create(repository=repo, image=image, name=tag_name,
- lifetime_start_ts=now_ts)
-
- try:
- # When we move a tag, we really end the timeline of the old one and create a new one
- query = _tag_alive(RepositoryTag
- .select()
- .where(RepositoryTag.repository == repo, RepositoryTag.name == tag_name,
- RepositoryTag.id != created.id))
- tag = query.get()
- tag.lifetime_end_ts = now_ts
- tag.save()
- except RepositoryTag.DoesNotExist:
- # No tag that needs to be ended
- pass
-
- return created
-
+ return RepositoryTag.create(repository=repo, image=image, name=tag_name,
+ lifetime_start_ts=now_ts)
def delete_tag(namespace_name, repository_name, tag_name):
+ now_ts = get_epoch_timestamp()
with config.app_config['DB_TRANSACTION_FACTORY'](db):
try:
query = _tag_alive(RepositoryTag
@@ -1845,21 +2027,21 @@ def delete_tag(namespace_name, repository_name, tag_name):
.join(Namespace, on=(Repository.namespace_user == Namespace.id))
.where(Repository.name == repository_name,
Namespace.username == namespace_name,
- RepositoryTag.name == tag_name))
+ RepositoryTag.name == tag_name), now_ts)
found = db_for_update(query).get()
except RepositoryTag.DoesNotExist:
msg = ('Invalid repository tag \'%s\' on repository \'%s/%s\'' %
(tag_name, namespace_name, repository_name))
raise DataModelException(msg)
- found.lifetime_end_ts = int(time.time())
+ found.lifetime_end_ts = now_ts
found.save()
def create_temporary_hidden_tag(repo, image, expiration_s):
""" Create a tag with a defined timeline, that will not appear in the UI or CLI. Returns the name
of the temporary tag. """
- now_ts = int(time.time())
+ now_ts = get_epoch_timestamp()
expire_ts = now_ts + expiration_s
tag_name = str(uuid4())
RepositoryTag.create(repository=repo, image=image, name=tag_name, lifetime_start_ts=now_ts,
diff --git a/data/userevent.py b/data/userevent.py
index 5523da2e5..aea34226a 100644
--- a/data/userevent.py
+++ b/data/userevent.py
@@ -1,6 +1,9 @@
import redis
import json
import threading
+import logging
+
+logger = logging.getLogger(__name__)
class UserEventBuilder(object):
"""
@@ -68,8 +71,9 @@ class UserEvent(object):
def conduct():
try:
self.publish_event_data_sync(event_id, data_obj)
- except Exception as e:
- print e
+ logger.debug('Published user event %s: %s', event_id, data_obj)
+ except Exception:
+ logger.exception('Could not publish user event')
thread = threading.Thread(target=conduct)
thread.start()
diff --git a/data/users.py b/data/users.py
index 9e01e4d45..3d763c9b6 100644
--- a/data/users.py
+++ b/data/users.py
@@ -1,6 +1,11 @@
import ldap
import logging
+import json
+import itertools
+import uuid
+import struct
+from util.aes import AESCipher
from util.validation import generate_valid_usernames
from data import model
@@ -106,6 +111,7 @@ class LDAPUsers(object):
return found_user is not None
+
class UserAuthentication(object):
def __init__(self, app=None):
self.app = app
@@ -138,5 +144,81 @@ class UserAuthentication(object):
app.extensions['authentication'] = users
return users
+ def _get_secret_key(self):
+ """ Returns the secret key to use for encrypting and decrypting. """
+ from app import app
+ app_secret_key = app.config['SECRET_KEY']
+ secret_key = None
+
+ # First try parsing the key as an int.
+ try:
+ big_int = int(app_secret_key)
+ secret_key = str(bytearray.fromhex('{:02x}'.format(big_int)))
+ except ValueError:
+ pass
+
+ # Next try parsing it as an UUID.
+ if secret_key is None:
+ try:
+ secret_key = uuid.UUID(app_secret_key).bytes
+ except ValueError:
+ pass
+
+ if secret_key is None:
+ secret_key = str(bytearray(map(ord, app_secret_key)))
+
+ # Otherwise, use the bytes directly.
+ return ''.join(itertools.islice(itertools.cycle(secret_key), 32))
+
+ def encrypt_user_password(self, password):
+ """ Returns an encrypted version of the user's password. """
+ data = {
+ 'password': password
+ }
+
+ message = json.dumps(data)
+ cipher = AESCipher(self._get_secret_key())
+ return cipher.encrypt(message)
+
+ def _decrypt_user_password(self, encrypted):
+ """ Attempts to decrypt the given password and returns it. """
+ cipher = AESCipher(self._get_secret_key())
+
+ try:
+ message = cipher.decrypt(encrypted)
+ except ValueError:
+ return None
+ except TypeError:
+ return None
+
+ try:
+ data = json.loads(message)
+ except ValueError:
+ return None
+
+ return data.get('password', encrypted)
+
+ def verify_user(self, username_or_email, password, basic_auth=False):
+ # First try to decode the password as a signed token.
+ if basic_auth:
+ import features
+
+ decrypted = self._decrypt_user_password(password)
+ if decrypted is None:
+ # This is a normal password.
+ if features.REQUIRE_ENCRYPTED_BASIC_AUTH:
+ msg = ('Client login with unecrypted passwords is disabled. Please generate an ' +
+ 'encrypted password in the user admin panel for use here.')
+ return (None, msg)
+ else:
+ password = decrypted
+
+ result = self.state.verify_user(username_or_email, password)
+ if result:
+ return (result, '')
+ else:
+ return (result, 'Invalid password.')
+
+
def __getattr__(self, name):
return getattr(self.state, name, None)
diff --git a/emails/teaminvite.html b/emails/teaminvite.html
index 3d8ff9c14..128bbe00f 100644
--- a/emails/teaminvite.html
+++ b/emails/teaminvite.html
@@ -4,7 +4,7 @@
Invitation to join team: {{ teamname }}
-{{ inviter | user_reference }} has invited you to join the team {{ teamname }} under organization {{ organization | user_reference }}.
+{{ inviter | user_reference }} has invited you to join the team {{ teamname | team_reference }} under organization {{ organization | user_reference }}.
+ The OAuth Applications panel allows organizations to define custom OAuth applications that can be used by internal or external customers to access data on behalf of the customers. More information about the API can be found by contacting support.
+
+
+
+
No OAuth applications defined.
+
+ Click the "Create New Application" button above to create a new OAuth application under
+ this organization.
+
+
+
+
-
Application Name
-
Application URI
+
Application Name
+
Application URI
diff --git a/static/directives/authorized-apps-manager.html b/static/directives/authorized-apps-manager.html
new file mode 100644
index 000000000..cc050346b
--- /dev/null
+++ b/static/directives/authorized-apps-manager.html
@@ -0,0 +1,56 @@
+
+
+
Authorized Applications
+
+
+
+ The authorized applications panel lists applications you have authorized to view information and perform actions on your behalf. You can revoke any of your authorizations here by clicking the gear icon and clicking "Revoke Authorization".
+
+
+
+
+
+
You have not authorized any external applications.
+
\ No newline at end of file
diff --git a/static/directives/avatar.html b/static/directives/avatar.html
index 46c56afe5..b55f0405a 100644
--- a/static/directives/avatar.html
+++ b/static/directives/avatar.html
@@ -1 +1,12 @@
-
\ No newline at end of file
+
+
+
+ {{ data.name.charAt(0).toUpperCase() }}
+ Ω
+
+
\ No newline at end of file
diff --git a/static/directives/billing-invoices.html b/static/directives/billing-invoices.html
index e022fef03..802abc358 100644
--- a/static/directives/billing-invoices.html
+++ b/static/directives/billing-invoices.html
@@ -1,25 +1,26 @@
+ It is highly recommended to require encrypted client passwords. LDAP passwords used in the Docker client will be stored in plaintext!
+ Enable this requirement now.
+
+
+
+ Note: The "Require Encrypted Client Passwords" feature is currently enabled which will
+ prevent LDAP passwords from being saved as plaintext by the Docker client.
+
+
Authentication:
@@ -305,7 +332,6 @@
-
LDAP URI:
@@ -406,6 +432,28 @@
+
+
Organization Filtering:
+
+
+
+
+
+
+
+ If enabled, only members of specified GitHub
+ Enterprise organizations will be allowed to login via GitHub
+ Enterprise.
+
+
+
+
+
+
diff --git a/static/directives/convert-user-to-org.html b/static/directives/convert-user-to-org.html
new file mode 100644
index 000000000..4dc143c20
--- /dev/null
+++ b/static/directives/convert-user-to-org.html
@@ -0,0 +1,100 @@
+
+
+
+
+
+ Cannot convert this account into an organization, as it is a member of {{user.organizations.length}} other
+ organization{{user.organizations.length > 1 ? 's' : ''}}. Please leave
+ {{user.organizations.length > 1 ? 'those organizations' : 'that organization'}} first.
+
+
+
+
+
+ Note: Converting a user account into an organization cannot be undone
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Cannot convert account
+
+
+ Your account could not be converted. Please try again in a moment.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Convert to organization?
+
+
+
You will not be able to login to this account once converted!
+
Are you absolutely sure you would like to convert this account to an organization? Once done, there is no going back.
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/static/directives/cor-checkable-item.html b/static/directives/cor-checkable-item.html
index f3e65e39b..4dde44d92 100644
--- a/static/directives/cor-checkable-item.html
+++ b/static/directives/cor-checkable-item.html
@@ -1,3 +1,3 @@
-
\ No newline at end of file
+
diff --git a/static/directives/cor-checkable-menu-item.html b/static/directives/cor-checkable-menu-item.html
index 452e37ea7..3fc5f7c25 100644
--- a/static/directives/cor-checkable-menu-item.html
+++ b/static/directives/cor-checkable-menu-item.html
@@ -1 +1 @@
-
-
\ No newline at end of file
+
diff --git a/static/directives/cor-confirm-dialog.html b/static/directives/cor-confirm-dialog.html
index 330729390..c6aa9d2fd 100644
--- a/static/directives/cor-confirm-dialog.html
+++ b/static/directives/cor-confirm-dialog.html
@@ -22,4 +22,4 @@
-
\ No newline at end of file
+
diff --git a/static/directives/cor-floating-bottom-bar.html b/static/directives/cor-floating-bottom-bar.html
index 2e5337fd2..11615e6a8 100644
--- a/static/directives/cor-floating-bottom-bar.html
+++ b/static/directives/cor-floating-bottom-bar.html
@@ -1,3 +1,3 @@
-
\ No newline at end of file
+
diff --git a/static/directives/cor-loader-inline.html b/static/directives/cor-loader-inline.html
index 39ffb5b99..3a2c42c1d 100644
--- a/static/directives/cor-loader-inline.html
+++ b/static/directives/cor-loader-inline.html
@@ -2,4 +2,4 @@
-
\ No newline at end of file
+
diff --git a/static/directives/cor-loader.html b/static/directives/cor-loader.html
index 112680a22..f0aab7afc 100644
--- a/static/directives/cor-loader.html
+++ b/static/directives/cor-loader.html
@@ -2,4 +2,4 @@
-
\ No newline at end of file
+
diff --git a/static/directives/cor-log-box.html b/static/directives/cor-log-box.html
index c5442d0f7..6d3157db3 100644
--- a/static/directives/cor-log-box.html
+++ b/static/directives/cor-log-box.html
@@ -8,4 +8,4 @@
New Logs
-
\ No newline at end of file
+
diff --git a/static/directives/cor-option.html b/static/directives/cor-option.html
index 0eb57170b..727e3dda3 100644
--- a/static/directives/cor-option.html
+++ b/static/directives/cor-option.html
@@ -1,3 +1,3 @@
-
\ No newline at end of file
+
diff --git a/static/directives/cor-options-menu.html b/static/directives/cor-options-menu.html
index 8b6cf1e26..7e5f43cc3 100644
--- a/static/directives/cor-options-menu.html
+++ b/static/directives/cor-options-menu.html
@@ -3,4 +3,4 @@
-
\ No newline at end of file
+
diff --git a/static/directives/cor-step-bar.html b/static/directives/cor-step-bar.html
index 274a2c924..117f8185d 100644
--- a/static/directives/cor-step-bar.html
+++ b/static/directives/cor-step-bar.html
@@ -1,3 +1,3 @@
-
\ No newline at end of file
+
diff --git a/static/directives/cor-step.html b/static/directives/cor-step.html
index 5339db30e..acc9baee4 100644
--- a/static/directives/cor-step.html
+++ b/static/directives/cor-step.html
@@ -3,4 +3,4 @@
{{ text }}
-
\ No newline at end of file
+
diff --git a/static/directives/cor-tab-content.html b/static/directives/cor-tab-content.html
index 997ae5af1..747ccb2c8 100644
--- a/static/directives/cor-tab-content.html
+++ b/static/directives/cor-tab-content.html
@@ -1 +1 @@
-
\ No newline at end of file
+
diff --git a/static/directives/cor-tab-panel.html b/static/directives/cor-tab-panel.html
index f92d683ab..d041c9466 100644
--- a/static/directives/cor-tab-panel.html
+++ b/static/directives/cor-tab-panel.html
@@ -1,3 +1,3 @@
-
\ No newline at end of file
+
diff --git a/static/directives/cor-tab.html b/static/directives/cor-tab.html
index f22d3bdac..07d4e0e92 100644
--- a/static/directives/cor-tab.html
+++ b/static/directives/cor-tab.html
@@ -1,11 +1,13 @@
\ No newline at end of file
+
diff --git a/static/directives/cor-tabs.html b/static/directives/cor-tabs.html
index 1a965932e..5ab85ecb1 100644
--- a/static/directives/cor-tabs.html
+++ b/static/directives/cor-tabs.html
@@ -1 +1 @@
-
\ No newline at end of file
+
diff --git a/static/directives/cor-title-content.html b/static/directives/cor-title-content.html
index 6acbe47b3..5b2077d08 100644
--- a/static/directives/cor-title-content.html
+++ b/static/directives/cor-title-content.html
@@ -1,3 +1,3 @@
\ No newline at end of file
diff --git a/static/directives/fetch-tag-dialog.html b/static/directives/fetch-tag-dialog.html
new file mode 100644
index 000000000..54480562c
--- /dev/null
+++ b/static/directives/fetch-tag-dialog.html
@@ -0,0 +1,70 @@
+
\ No newline at end of file
diff --git a/static/directives/notification-view.html b/static/directives/notification-view.html
index 6b9dda7a3..fd346609d 100644
--- a/static/directives/notification-view.html
+++ b/static/directives/notification-view.html
@@ -3,7 +3,7 @@
\ No newline at end of file
diff --git a/static/directives/organization-header.html b/static/directives/organization-header.html
index 163819b6e..397c8e161 100644
--- a/static/directives/organization-header.html
+++ b/static/directives/organization-header.html
@@ -1,5 +1,5 @@
You are using more private repositories than your plan allows. Please
upgrade your subscription to avoid disruptions in your organization's service.
-
+
You are at your current plan's number of allowed private repositories. It might be time to think about
upgrading your subscription to avoid future disruptions in your organization's service.
-
+
You are nearing the number of allowed private repositories. It might be time to think about
upgrading your subscription to avoid future disruptions in your organization's service.
-
+
Free trial until {{ parseDate(subscription.trialEnd) | date }}