# coding: utf-8
#
# blogc: A blog compiler.
# Copyright (C) 2014-2017 Rafael G. Martins <rafael@rafaelmartins.eng.br>
#
# This program can be distributed under the terms of the BSD License.
# See the license for details.
#

from contextlib import closing
from StringIO import StringIO

import base64
import boto3
import hashlib
import json
import mimetypes
import os
import subprocess
import tarfile
import urllib2
import shutil

BLOGC_VERSION = '@PACKAGE_VERSION@'

cwd = os.path.dirname(os.path.abspath(__file__))
os.environ['PATH'] = '%s:%s' % (cwd, os.environ.get('PATH', ''))

s3 = boto3.resource('s3')

GITHUB_AUTH = os.environ.get('GITHUB_AUTH')
if GITHUB_AUTH is not None and ':' not in GITHUB_AUTH:
    GITHUB_AUTH = boto3.client('kms').decrypt(
        CiphertextBlob=base64.b64decode(GITHUB_AUTH))['Plaintext']


def get_tarball(repo_name):
    tarball_url = 'https://api.github.com/repos/%s/tarball/master' % repo_name
    request = urllib2.Request(tarball_url)

    if GITHUB_AUTH is not None:
        auth = base64.b64encode(GITHUB_AUTH)
        request.add_header("Authorization", "Basic %s" % auth)

    with closing(urllib2.urlopen(request)) as fp:
        tarball = fp.read()

    rootdir = None
    with closing(StringIO(tarball)) as fp:
        with tarfile.open(fileobj=fp, mode='r:gz') as tar:
            for f in tar.getnames():
                if '/' not in f:
                    rootdir = f
                    break
            if rootdir is None:
                raise RuntimeError('Failed to find a directory in tarball')
            rootdir = '/tmp/%s' % rootdir

            if os.path.isdir(rootdir):
                shutil.rmtree(rootdir)

            tar.extractall('/tmp/')

    return rootdir


def translate_filename(filename):
    f = filename.split('/')
    if len(f) == 0:
        return filename
    basename = f[-1]

    # replace any index.$EXT file with index.html, because s3 only allows
    # users to declare one directory index file name.
    p = basename.split('.')
    if len(p) == 2 and p[0] == 'index':
        f[-1] = 'index.html'
        f = '/'.join(f)
        if not os.path.exists(f):
            return f

    return filename


def sync_s3(src, dest, settings_file):
    settings = {}
    if os.path.exists(settings_file):
        with open(settings_file, 'r') as fp:
            settings = json.load(fp)

    content_types = settings.get('content-type', {})
    dest = settings.get('bucket', dest)

    bucket = s3.Bucket(dest)

    remote_files = {}
    for obj in bucket.objects.all():
        if not obj.key.endswith('/'):
            remote_files[obj.key] = obj

    local_files = {}
    for root, dirs, files in os.walk(src):
        real_root = root[len(src):].lstrip('/')
        for filename in files:
            real_filename = os.path.join(real_root, filename)
            data = {'Key': real_filename}

            mime = content_types.get(real_filename,
                                     mimetypes.guess_type(real_filename)[0])
            if mime is not None:
                data['ContentType'] = mime

            with open(os.path.join(src, real_filename), 'rb') as fp:
                data['Body'] = fp.read()

            # always push the original file to its place
            local_files[real_filename] = data

            # if we need a copy on s3 for index or something, push it too
            translated_filename = translate_filename(real_filename)
            if translated_filename != real_filename:
                translated_data = data.copy()
                translated_data['Key'] = translated_filename
                local_files[translated_filename] = translated_data

    to_upload = []
    for filename in local_files:
        if filename not in remote_files:
            to_upload.append(local_files[filename])

    to_delete = []
    for filename in remote_files:
        if filename in local_files:
            l = hashlib.sha1(local_files[filename]['Body'])

            with closing(remote_files[filename].get()['Body']) as fp:
                r = hashlib.sha1(fp.read())

            if l.hexdigest() != r.hexdigest():
                to_upload.append(local_files[filename])
        else:
            to_delete.append(filename)

    for data in to_upload:
        print 'Uploading file: %s; content-type: "%s"' % (
            data['Key'],
            data.get('ContentType'),
        )
        bucket.put_object(**data)

    for filename in to_delete:
        print 'Deleting file:', filename
        remote_files[filename].delete()


def sns_handler(message):
    print 'blogc-github-lambda %s' % BLOGC_VERSION
    payload = json.loads(message)

    if payload['ref'] == 'refs/heads/master':
        print 'Building: %s' % payload['repository']['full_name']
        debug = 'DEBUG' in os.environ

        env = os.environ.copy()
        env['BLOGC'] = os.path.join(cwd, 'blogc')
        env['OUTPUT_DIR'] = '_build_lambda'

        rootdir = get_tarball(payload['repository']['full_name'])
        blogcfile = os.path.join(rootdir, 'blogcfile')

        if os.path.isfile(blogcfile):
            # deploy using blogc-make
            args = [os.path.join(cwd, 'blogc'), '-m', '-f', blogcfile,
                    'all']
            if debug:
                args.append('-V')
            rv = subprocess.call(args, env=env)
        else:
            # fallback to using make. please note that this will break if
            # amazon removes gnu make from lambda images
            stream = None if debug else subprocess.PIPE
            rv = subprocess.call(['make', '-C', rootdir], env=env,
                                 stdout=stream, stderr=stream)
        if rv != 0:
            raise RuntimeError('Failed to run the build tool.')

        sync_s3(os.path.join(rootdir, env['OUTPUT_DIR']),
                payload['repository']['name'],
                os.path.join(rootdir, 's3.json'))

    else:
        print "Commit not for master branch, skipping: %s" % payload['ref']


def lambda_handler(event, context):
    for record in event['Records']:
        if 'Sns' in record:
            sns_handler(record['Sns']['Message'])