import logging
import daemon
import argparse
import os
import requests
import re
import json
import shutil

from docker import Client, APIError
from tempfile import TemporaryFile, mkdtemp
from zipfile import ZipFile

from data.queue import dockerfile_build_queue
from data import model
from workers.worker import Worker
from app import app


root_logger = logging.getLogger('')
root_logger.setLevel(logging.DEBUG)

FORMAT = '%(asctime)-15s - %(levelname)s - %(pathname)s - %(funcName)s - %(message)s'
formatter = logging.Formatter(FORMAT)

logger = logging.getLogger(__name__)

user_files = app.config['USERFILES']
build_logs = app.config['BUILDLOGS']


class StatusWrapper(object):
  def __init__(self, build_uuid):
    self._uuid = build_uuid
    self._status = {
      'total_commands': None,
      'current_command': None,
      'push_completion': 0.0,
      'image_completion': {},
    }

    self.__exit__(None, None, None)

  def __enter__(self):
    return self._status

  def __exit__(self, exc_type, value, traceback):
    build_logs.set_status(self._uuid, self._status)


class DockerfileBuildContext(object):
  def __init__(self, build_context_dir, tag_name, push_token, build_uuid):
    self._build_dir = build_context_dir
    self._tag_name = tag_name
    self._push_token = push_token
    self._build_uuid = build_uuid
    self._cl = Client(timeout=1200, version='1.7')
    self._status = StatusWrapper(self._build_uuid)

    dockerfile_path = os.path.join(self._build_dir, "Dockerfile")
    self._num_steps = DockerfileBuildContext.__count_steps(dockerfile_path)

    logger.debug('Will build and push to tag named: %s' % self._tag_name)

  def __enter__(self):
    return self

  def __exit__(self, exc_type, value, traceback):
    self.__cleanup()

    shutil.rmtree(self._build_dir)

  @staticmethod
  def __count_steps(dockerfile_path):
    with open(dockerfile_path, 'r') as dockerfileobj:
      steps = 0
      for line in dockerfileobj.readlines():
        stripped = line.strip()
        if stripped and stripped[0] is not '#':
          steps += 1
      return steps

  @staticmethod
  def __total_completion(statuses, total_images):
    percentage_with_sizes = float(len(statuses.values()))/total_images
    sent_bytes = sum([status[u'current'] for status in statuses.values()])
    total_bytes = sum([status[u'total'] for status in statuses.values()])
    return float(sent_bytes)/total_bytes*percentage_with_sizes

  def build(self):
    logger.debug('Starting build.')

    with self._status as status:
      status['total_commands'] = self._num_steps

    logger.debug('Building to tag names: %s' % self._tag_name)
    build_status = self._cl.build(path=self._build_dir, tag=self._tag_name,
                                  stream=True)

    current_step = 0
    built_image = None
    for status in build_status:
      logger.debug('Status: %s', str(status.encode('utf-8')))
      build_logs.append_log_message(self._build_uuid, str(status))
      step_increment = re.search(r'Step ([0-9]+) :', status)
      if step_increment:
        current_step = int(step_increment.group(1))
        logger.debug('Step now: %s/%s' % (current_step, self._num_steps))
        with self._status as status:
          status['current_command'] = current_step
        continue

      complete = re.match(r'Successfully built ([a-z0-9]+)$', status)
      if complete:
        built_image = complete.group(1)
        logger.debug('Final image ID is: %s' % built_image)
        continue

    # Get the image count
    if not built_image:
      return

    return built_image

  def push(self, built_image):
    # Login to the registry
    host = re.match(r'([a-z0-9.:]+)/.+/.+$', self._tag_name)
    if not host:
      raise RuntimeError('Invalid tag name: %s' % self._tag_name)

    for protocol in ['https', 'http']:
      registry_endpoint = '%s://%s/v1/' % (protocol, host.group(1))
      logger.debug('Attempting login to registry: %s' % registry_endpoint)

      try:
        self._cl.login('$token', self._push_token, registry=registry_endpoint)
        break
      except APIError:
        pass  # Probably the wrong protocol

    history = json.loads(self._cl.history(built_image))
    num_images = len(history)
    with self._status as status:
      status['total_images'] = num_images

    logger.debug('Pushing to tag name: %s' % self._tag_name)
    resp = self._cl.push(self._tag_name, stream=True)

    for status_str in resp:
      status = json.loads(status_str)
      logger.debug('Status: %s', status_str)
      if u'status' in status:
        status_msg = status[u'status']

        if status_msg == 'Pushing':
          if u'progressDetail' in status and u'id' in status:
            image_id = status[u'id']
            detail = status[u'progressDetail']

            if u'current' in detail and 'total' in detail:
              with self._status as status:
                images = status['image_completion']

                images[image_id] = detail
                status['push_completion'] = \
                  DockerfileBuildContext.__total_completion(images, num_images)

      elif u'errorDetail' in status:
        message = 'Error pushing image.'
        if u'message' in status[u'errorDetail']:
          message = str(status[u'errorDetail'][u'message'])

        raise RuntimeError(message)

  def __cleanup(self):
    # First clean up any containers that might be holding the images
    for running in self._cl.containers(quiet=True):
      logger.debug('Killing container: %s' % running['Id'])
      self._cl.kill(running['Id'])

    # Next, remove all of the containers (which should all now be killed)
    for container in self._cl.containers(all=True, quiet=True):
      logger.debug('Removing container: %s' % container['Id'])
      self._cl.remove_container(container['Id'])

    # Iterate all of the images and remove the ones that the public registry
    # doesn't know about, this should preserve base images.
    images_to_remove = set()
    repos = set()
    for image in self._cl.images():
      images_to_remove.add(image['Id'])

      for tag in image['RepoTags']:
        tag_repo = tag.split(':')[0]
        if tag_repo != '<none>':
          repos.add(tag_repo)

    for repo in repos:
      repo_url = 'https://index.docker.io/v1/repositories/%s/images' % repo
      repo_info = requests.get(repo_url)
      if repo_info.status_code / 100 == 2:      
        for repo_image in repo_info.json():
          if repo_image['id'] in images_to_remove:
            logger.debug('Image was deemed public: %s' % repo_image['id'])
            images_to_remove.remove(repo_image['id'])

    for to_remove in images_to_remove:
      logger.debug('Removing private image: %s' % to_remove)
      try:
        self._cl.remove_image(to_remove)
      except APIError:
        # Sometimes an upstream image removed this one
        pass

    # Verify that our images were actually removed
    for image in self._cl.images():
      if image['Id'] in images_to_remove:
        raise RuntimeError('Image was not removed: %s' % image['Id'])


class DockerfileBuildWorker(Worker):
  def __init__(self, *vargs, **kwargs):
    super(DockerfileBuildWorker, self).__init__(*vargs, **kwargs)

    self._mime_processors = {
      'application/zip': DockerfileBuildWorker.__prepare_zip,
      'text/plain': DockerfileBuildWorker.__prepare_dockerfile,
      'application/octet-stream': DockerfileBuildWorker.__prepare_dockerfile,
    }

  @staticmethod
  def __prepare_zip(request_file):
    build_dir = mkdtemp(prefix='docker-build-')

    # Save the zip file to temp somewhere
    with TemporaryFile() as zip_file:
      zip_file.write(request_file.content)
      to_extract = ZipFile(zip_file)
      to_extract.extractall(build_dir)

    return build_dir

  @staticmethod
  def __prepare_dockerfile(request_file):
    build_dir = mkdtemp(prefix='docker-build-')
    dockerfile_path = os.path.join(build_dir, "Dockerfile")
    with open(dockerfile_path, 'w') as dockerfile:
      dockerfile.write(request_file.content)

    return build_dir

  def process_queue_item(self, job_details):
    repository_build = model.get_repository_build(job_details['namespace'],
                                                  job_details['repository'],
                                                  job_details['build_uuid'])

    resource_url = user_files.get_file_url(repository_build.resource_key)
    tag_name = repository_build.tag
    access_token = repository_build.access_token.code

    start_msg = ('Starting job with resource url: %s tag: %s and token: %s' %
                 (resource_url, tag_name, access_token))
    logger.debug(start_msg)
    build_logs.append_log_message(repository_build.uuid, start_msg)

    docker_resource = requests.get(resource_url)
    c_type = docker_resource.headers['content-type']

    filetype_msg = ('Request to build file of type: %s with tag: %s' %
                    (c_type, tag_name))
    logger.info(filetype_msg)
    build_logs.append_log_message(repository_build.uuid, filetype_msg)

    if c_type not in self._mime_processors:
      raise RuntimeError('Invalid dockerfile content type: %s' % c_type)

    build_dir = self._mime_processors[c_type](docker_resource)
    uuid = repository_build.uuid
    repository_build.phase = 'building'
    repository_build.save()

    with DockerfileBuildContext(build_dir, tag_name, access_token,
                                repository_build.uuid) as build_ctxt:
      try:
        built_image = build_ctxt.build()

        if not built_image:
          repository_build.phase = 'error'
          repository_build.save()
          build_logs.append_log_message(uuid, 'Unable to build dockerfile.')
          return False

        repository_build.phase = 'pushing'
        repository_build.save()

        build_ctxt.push(built_image)

        repository_build.phase = 'complete'
        repository_build.save()

      except Exception as exc:
        logger.exception('Exception when processing request.')
        repository_build.phase = 'error'
        repository_build.save()
        build_logs.append_log_message(uuid, exc.message)
        return False

    return True


desc = 'Worker daemon to monitor dockerfile build'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-D', action='store_true', default=False,
                    help='Run the worker in daemon mode.')
parser.add_argument('--log', default='dockerfilebuild.log',
                    help='Specify the log file for the worker as a daemon.')
args = parser.parse_args()


worker = DockerfileBuildWorker(dockerfile_build_queue)

if args.D:
  handler = logging.FileHandler(args.log)
  handler.setFormatter(formatter)
  root_logger.addHandler(handler)
  with daemon.DaemonContext(files_preserve=[handler.stream]):
    worker.start()

else:
  handler = logging.StreamHandler()
  handler.setFormatter(formatter)
  root_logger.addHandler(handler)
  worker.start()