This commit is contained in:
Andy McKay 2016-01-22 16:04:51 -08:00
Родитель 2ae96ba68d
Коммит c3da8450ce
15 изменённых файлов: 0 добавлений и 932 удалений

Просмотреть файл

@ -1,33 +0,0 @@
#!/usr/bin/env/python
import os
import re
CSS_DIR = '../media/css'
REGEX = re.compile('({\n(?:\s+[\w-]+: .*?;\n)+)(.*?{)', re.MULTILINE)
def get_css_filenames():
filenames = []
for root, dirs, files in os.walk(CSS_DIR):
for f in files:
if f.endswith('.styl') or f.endswith('.less'):
filenames.append(os.path.join(root, f))
return filenames
def add_linebreak_css(filename):
f = open(filename, 'r+')
contents = f.read()
f.seek(0)
f.write(REGEX.sub(r'\1\n\2', contents))
f.truncate()
def run():
for filename in get_css_filenames():
add_linebreak_css(filename)
if __name__ == '__main__':
run()

Просмотреть файл

@ -1,133 +0,0 @@
"""
Fetch data from the olympia database for validation results and unlisted
addons for use with the validations.py script.
Expected environment variables:
MYSQL_HOST - The MySQL host.
MYSQL_USER - The MySQL username.
MYSQL_PASSWORD - The MySQL password.
Actions supported:
validations - Fetch validation data for the last 30 days and write it to
the filesystem in files named `validations/YYYY-MM-DD.txt`.
unlisted - Fetch all unlisted addon guids and write the results to
`validations/unlisted-addons.txt`.
Usage:
python fetch_validation_data.py <action>
"""
import json
import os
import sys
from datetime import datetime, timedelta
import MySQLdb
date_format = '%Y-%m-%d'
db = MySQLdb.connect(host=os.environ['MYSQL_HOST'],
user=os.environ['MYSQL_USER'],
passwd=os.environ['MYSQL_PASSWORD'],
db="addons_mozilla_org")
cursor = db.cursor()
QUERY_FORMAT = """
SELECT validation
FROM file_uploads
WHERE created LIKE %s
AND validation IS NOT NULL
ORDER BY created DESC;
"""
def single_result_formatter(row):
return row[0]
def write_results(filename, formatter=single_result_formatter):
"""Write the results in the current query to `filename` using the first
column returned or by passing each row to `formatter`."""
with open(filename, 'w') as f:
for row in cursor:
f.write(formatter(row))
f.write('\n')
def fetch_data_for_date(date):
"""Fetch validation results for a certain date."""
date_string = date.strftime(date_format)
print 'Fetching for {date}'.format(date=date_string)
cursor.execute(QUERY_FORMAT, [date_string + '%'])
write_results('validations/{date}.txt'.format(date=date_string))
def fetch_unlisted_addon_ids():
"""Fetch the guid for each unlisted addon on AMO right now."""
print 'Fetching unlisted addons'
cursor.execute('SELECT guid FROM addons WHERE is_listed=0 '
'AND guid IS NOT NULL;')
write_results('validations/unlisted-addons.txt')
def fetch_lite_addon_ids():
"""Fetch the guid for each lite addon on AMO right now."""
print 'Fetching STATUS_LITE addons'
cursor.execute('SELECT guid FROM addons WHERE status=8 '
'AND guid IS NOT NULL;')
write_results('validations/lite-addons.txt')
def fetch_validations():
"""Fetch the last 30 days of validations."""
today = datetime.today()
for i in range(30, 0, -1):
date = today - timedelta(days=i)
fetch_data_for_date(date)
def fetch_manual_reviews():
"""Fetch all manual review results for unlisted addons."""
def formatter(row):
return json.dumps(
{'guid': row[0], 'version': row[1], 'action': row[2],
'created': row[3].strftime('%Y-%m-%dT%H:%M%S')})
query = """
SELECT a.guid, v.version, la.action, la.created
FROM versions v
JOIN files f on f.version_id=v.id
JOIN file_validation fv ON fv.file_id=f.id
JOIN addons a on a.id=v.addon_id
JOIN log_activity_version lav ON lav.version_id=v.id
JOIN log_activity la ON la.id=lav.activity_log_id
WHERE a.guid IS NOT NULL
AND a.is_listed=0
AND la.action IN (42, 43) -- (PRELIMINARY_VERSION, REJECT_VERSION)
AND fv.passed_auto_validation=0
;"""
print 'Fetching manual reviews'
cursor.execute(query)
write_results('validations/manual-reviews.txt', formatter=formatter)
def fetch_all():
"""Helper function to run all fetch commands."""
for name, action in ACTIONS.items():
if name != 'all':
action()
ACTIONS = {
'validations': fetch_validations,
'unlisted': fetch_unlisted_addon_ids,
'lite': fetch_lite_addon_ids,
'manual_reviews': fetch_manual_reviews,
'all': fetch_all,
}
if __name__ == '__main__':
action = len(sys.argv) == 2 and sys.argv[1]
if action in ACTIONS:
ACTIONS[action]()
else:
print 'Unknown action "{action}". Known actions are {actions}'.format(
action=action or '', actions=', '.join(ACTIONS.keys()))

Просмотреть файл

@ -1,35 +0,0 @@
import logging
import os
import site
# Add the parent dir to the python path so we can import manage.
parent_dir = os.path.dirname(__file__)
site.addsitedir(os.path.abspath(os.path.join(parent_dir, '../')))
# manage adds /apps and /lib to the Python path.
import manage # noqa: we need this so it's a standalone script.
import amo # noqa
from files.models import File # noqa
from files.utils import parse_addon # noqa
log = logging.getLogger('backfill-files-is_multi_package')
"""Walk the themes and addons files to check if they're multi-package XPIs.
https://developer.mozilla.org/en-US/docs/Multiple_Item_Packaging
If they are, set File.is_multi_package = True
"""
# Only (complete) themes and addons can have multi-package XPIs.
for file_ in File.objects.filter(
version__addon__type__in=[amo.ADDON_EXTENSION, amo.ADDON_THEME]):
try:
data = parse_addon(file_.file_path, addon=file_.version.addon)
if data.get('is_multi_package'):
log.info('Found multi-package: {0}'.format(file_.file_path))
file_.update(is_multi_package=True)
except:
log.error('Failed checking file {0}'.format(file_.pk))

Просмотреть файл

@ -1,39 +0,0 @@
import logging
import os
import site
import zipfile
# Add the parent dir to the python path so we can import manage.
parent_dir = os.path.dirname(__file__)
site.addsitedir(os.path.abspath(os.path.join(parent_dir, '../')))
# manage adds /apps and /lib to the Python path.
import manage # noqa: we need this so it's a standalone script.
import amo # noqa
from files.models import File # noqa
from files.utils import parse_addon # noqa
log = logging.getLogger('find-MANIFEST-files-in-xpi')
"""Walk all the XPI files to find those that have META-INF/MANIFEST.mf file.
This file with was not removed by the signing_clients<=0.1.13 on signing, and
thus would result in a "signature not recognizable" error. See bug
https://bugzilla.mozilla.org/show_bug.cgi?id=1169574.
"""
addons = set()
# Only (complete) themes and addons can have XPI files.
for file_ in File.objects.filter(
version__addon__type__in=[amo.ADDON_EXTENSION, amo.ADDON_THEME],
is_signed=True):
try:
with zipfile.ZipFile(file_.file_path, mode='r') as zf:
filenames = zf.namelist()
if u'META-INF/MANIFEST.MF' in filenames:
addons.add(file_.version.addon.pk)
except (zipfile.BadZipfile, IOError):
pass
print ' '.join((str(addon_id) for addon_id in addons))

Просмотреть файл

@ -1,11 +0,0 @@
#!/bin/sh
#
# Runs "pip install" whenever we detect changes to the requirements after
# a pull.
#
# To install, symlink inside your repo's .git/hooks/ directory.
if [ $(git diff HEAD@{1} HEAD --name-only | grep 'requirements/' -c) -ne 0 ]
then
$VIRTUAL_ENV/bin/pip install --no-deps -r requirements/dev.txt
fi

Просмотреть файл

@ -1,16 +0,0 @@
-- This file is run from cron before the database dump each day. This resets a
-- couple core things that random people might change on the site.
REPLACE INTO config (`key`,`value`)
VALUES (
"site_notice",
"This is a public test server. <b>Any information on this site is public including passwords. Don't put private information here!</b> For more information: <a href=\"http://micropipes.com/blog/2011/03/29/welcome-to-the-landfill/\">What this server is for</a> or the <a href=\"http://olympia.readthedocs.org/en/latest/\">development documentation</a>");
-- password is "nobody"
UPDATE users SET
email="nobody@mozilla.org",
username="landfilladmin",
display_name="Landfill Admin",
password="sha512$3cd0cddefc8711c73b9b7190e13e755bd1c00e9dcbf6d837956fa9dc92dab2e1$5669268c0f604520f13b5b956580bf137914df81f99702b77d462ac24f7b63e60611560ee754ad729674149543d11e54d7596453d9a739c40a0a5a4ca4b062e1",
homepage="https://landfill-addons.allizom.org"
WHERE id=1;

Просмотреть файл

@ -1,52 +0,0 @@
#!/usr/bin/env python
import os
import re
import time
towatch = []
includes = {}
def say(s):
t = time.strftime('%X')
print '[%s] %s' % (t, s)
def render_list(files):
for f in files:
os.system('lessc %s %s.css' % (f, f))
if (f in includes):
say('re-compiling %d dependencies' % len(includes[f]))
render_list(includes[f])
say('re-compiled %d files' % len(files))
def watch():
say('watching %d files...' % len(towatch))
before = set([(f, os.stat(f).st_mtime) for f in towatch])
while 1:
after = set([(f, os.stat(f).st_mtime) for f in towatch])
changed = [f for (f, d) in before.difference(after)]
if len(changed):
render_list(changed)
before = after
time.sleep(.5)
for root, dirs, files in os.walk('./media'):
less = filter(lambda x: re.search('\.less$', x), files)
less = [(root + '/' + f) for f in less]
for f in less:
body = post_file = open(f, 'r').read()
m = re.search('@import \'([a-zA-Z0-9_-]+)\';', body)
if m:
k = root + '/' + m.group(1) + '.less'
if k not in includes:
includes[k] = []
includes[k].append(f)
if '.git' in dirs:
dirs.remove('.git')
towatch += less
watch()

Просмотреть файл

@ -1,11 +0,0 @@
#!/bin/sh
# Generates the filename for the next available migration.
# usage: ./scripts/newmig.sh <migration-slug>
# % ./scripts/newmig.sh foobar
# > switch generated to 301-foobar.sql
NUM=`ls migrations | sort -n | tail -n1 | awk -F- '{print $1}'`
NUM=$(($NUM+1))
echo "$NUM-$1.sql"

Просмотреть файл

@ -1,13 +0,0 @@
#!/bin/sh
# Generates a waffle switch migration for a given switch slug.
# usage: ./scripts/newwaffle.sh <waffle-switch-name>
# % ./scripts/newwaffle.sh foobar
# > switch generated to 301-waffle-foobar.sql
NEWFILE=`./scripts/newmig.sh waffle-$1`
echo "INSERT INTO waffle_switch (name, active) VALUES ('$1', 0);
" > ./migrations/$NEWFILE
echo "switch generated to $NEWFILE"

Просмотреть файл

@ -1,6 +0,0 @@
#!/bin/bash
VENV=$WORKSPACE/venv
source $VENV/bin/activate
export PYTHONPATH="$WORKSPACE/..:$WORKSPACE/apps:$WORKSPACE/lib"
pylint --rcfile scripts/pylintrc $WORKSPACE 2>/dev/null > pylint.txt
echo "pylint complete"

Просмотреть файл

@ -1,49 +0,0 @@
[MASTER]
ignore=migrations,settings_local.py,test_buttons.py
[REPORTS]
output_format=parseable
[MESSAGES CONTROL]
# :C0111: *Missing docstring*
# This is far too noisy, ideally we should ignore it in specific places
# but pylint isn't very good at letting us specify certain rules for certain
# files
# :C0103: *Invalid name "%s" (should match %s)*
# This is rather annoying in some contexts.
# :W0403: *Relative import %r, should be %r*
# We don't know where zamboni is going to be running, so relative imports
# are necessary for now.
# :W0142: *Used * or ** magic*
# This isn't harry potter... there's no such thing as magic.
# :W0232: *Class has no __init__ method*
# :E1101: *%s %r has no %r member*
# This is too often wrong to be useful.
# :R0201: Method could be a function
# Useful, but there's a number of places where we use methods
# since we're extending Django classes that require specific
# methods.
# :W0402: String is actually still useful
# :W0141: *Used builtin function %r*
# Used when a black listed builtin function is used (see the bad-function
# option). Usual black listed functions are the ones like map, or filter,
# where Python offers now some cleaner alternative like list comprehension.
# :W0212: *Access to a protected member %s of a client class*
# Used when a protected member (i.e. class member with a name beginning
# with an underscore) is access outside the class or a descendant of the
# class where it's defined.
# :W0201: Attribute %r defined outside __init__
disable-msg=C0111,C0103,W0403,W0142,W0232,E1101,R0201,W0402,W0141,W0212,W0201
[DESIGN]
max-args=6
max-public-methods=45
min-public-methods=0
[REPORTS]
output-format=parseable
include-ids=yes

Просмотреть файл

@ -1,97 +0,0 @@
"""
A script for generating siege files with a bunch of URL variations.
"""
import re
import sys
part_re = re.compile(r'\{([-\w]+)\}')
AMO_LANGUAGES = (
'af', 'ar', 'ca', 'cs', 'da', 'de', 'el', 'en-US', 'es', 'eu', 'fa', 'fi',
'fr', 'ga-IE', 'he', 'hu', 'id', 'it', 'ja', 'ko', 'mn', 'nl', 'pl',
'pt-BR', 'pt-PT', 'ro', 'ru', 'sk', 'sq', 'sr', 'sv-SE', 'uk', 'vi',
'zh-CN', 'zh-TW',
)
config = {
'base': [],
'locale': AMO_LANGUAGES,
'app': ['firefox'],
'extension-slug': [''] + """
alerts-and-updates appearance bookmarks download-management
feeds-news-blogging language-support photos-music-videos
privacy-security social-communication tabs toolbars web-development
other""".split(),
'theme-slug': [''] + """
animals compact large miscellaneous modern nature os-integration retro
sports""".split(),
'theme-sort': 'name updated created downloads rating'.split(),
'page': '1 2'.split(),
'exp': 'on off'.split(),
'personas-slug': [''] + """
abstract causes fashion firefox foxkeh holiday music nature other
scenery seasonal solid sports websites""".split(),
'personas-sort': """up-and-coming created popular rating""".split()
}
root = '{base}/{locale}/{app}'
templates = t = {
'root': '/',
'extensions': '/extensions/{extension-slug}/',
'language-tools': '/language-tools',
'themes': '/themes/{theme-slug}?sort={theme-sort}&page={page}',
'personas': '/personas/{personas-slug}',
}
t['themes-unreviewed'] = t['themes'] + '&unreviewed={exp}'
t['personas-sort'] = t['personas'] + '?sort={personas-sort}'
t['extensions-sort'] = t['extensions'] + '?sort={theme-sort}'
t['extensions-featured'] = t['extensions'] + 'featured'
for key, value in templates.items():
templates[key] = root + value
def combos(s, parts):
def _rec(s, parts, kw):
key, rest = parts[0], parts[1:]
rv = []
for opt in config[key]:
kw[key] = opt
if not rest:
rv.append(s.format(**kw))
else:
rv.extend(_rec(s, rest, kw))
return rv
return _rec(s, parts, {})
def gen(choices=templates):
rv = []
for template in choices:
parts = part_re.findall(template)
rv.extend(combos(template, parts))
return rv
def main():
args = sys.argv
try:
base, choices = sys.argv[1], args[2:] or templates.keys()
except IndexError:
print 'Usage: python siege.py <BASE> [%s]' % (', '.join(templates))
print '\nBASE should be something like "http://localhost:8000/z".'
print 'The remaining arguments are names of url templates.'
sys.exit(1)
config['base'] = [base.rstrip('/')]
print '\n'.join(gen(templates[k] for k in choices))
if __name__ == '__main__':
main()

Просмотреть файл

@ -1,85 +0,0 @@
from datetime import datetime
from random import shuffle
from validations import (automated_count_pipeline, automated_validations,
parse_validations, reduce_pipeline,
severe_validations, unlisted_validations)
TEST_ADDON_LISTED_FALSE = {'metadata': {'listed': False, 'id': 'wat'}}
TEST_ADDON_UNLISTED_ID = {'metadata': {'id': 'baz'}}
TEST_ADDONS = [
{'metadata': {'listed': True, 'id': 'yo'}},
TEST_ADDON_LISTED_FALSE,
{'metadata': {'id': 'foobar'}},
TEST_ADDON_UNLISTED_ID,
]
def test_parse_validations():
results = parse_validations([
'{"foo":"bar"}\n',
'["baz",1,{"wat":99}]\n'
])
assert list(results) == [{'foo': 'bar'}, ['baz', 1, {'wat': 99}]]
def test_unlisted_validations_without_unlisted_addons():
unlisted = unlisted_validations(TEST_ADDONS, set())
assert list(unlisted) == [TEST_ADDON_LISTED_FALSE]
def test_unlisted_validations_with_unlisted_addons():
unlisted = unlisted_validations(TEST_ADDONS, set(['baz', 'wat']))
assert list(unlisted) == [TEST_ADDON_LISTED_FALSE, TEST_ADDON_UNLISTED_ID]
def test_severe_validations():
nope = {'signing_summary':
{'high': 0, 'medium': 0, 'trivial': 0, 'low': 0}}
minor = {'signing_summary':
{'high': 0, 'medium': 0, 'trivial': 0, 'low': 1}}
trivial = {'signing_summary':
{'high': 0, 'medium': 0, 'trivial': 1, 'low': 0}}
severe = {'signing_summary':
{'high': 10, 'medium': 0, 'trivial': 0, 'low': 0}}
results = severe_validations([nope, trivial, minor, nope, severe, nope])
assert list(results) == [minor, severe]
LISTED1 = {'metadata': {'id': '10', 'listed': True}}
LISTED2 = {'metadata': {'id': '33', 'listed': True}}
AUTO1 = {'metadata': {'id': '25', 'listed': True},
'passed_auto_validation': False}
AUTO2 = {'metadata': {'id': '50', 'listed': False},
'passed_auto_validation': True}
AUTO3 = {'metadata': {'id': '71', 'listed': True},
'passed_auto_validation': True}
UNLISTED1 = {'metadata': {'id': '90', 'listed': False}}
UNLISTED2 = {'metadata': {'id': '81', 'listed': True}}
VALIDATIONS = [LISTED1, LISTED2, AUTO1, AUTO2, AUTO3, UNLISTED1, UNLISTED2]
now = datetime.now().date()
for validation in VALIDATIONS:
validation['date'] = now
UNLISTED_ADDONS_SET = set(['25', '50', '71', '81'])
LITE_ADDONS_SET = set(['33', '25', '50', '71'])
shuffle(VALIDATIONS)
def test_automated_validations():
results = automated_validations(
VALIDATIONS,
unlisted_addons=UNLISTED_ADDONS_SET,
lite_addons=LITE_ADDONS_SET)
def addon_id(v):
return v['metadata']['id']
assert sorted(results, key=addon_id) == [AUTO1, AUTO2, AUTO3]
def test_automated_signing_count():
count = reduce_pipeline(
automated_count_pipeline(unlisted_addons=UNLISTED_ADDONS_SET,
lite_addons=LITE_ADDONS_SET,
load_file=lambda f: VALIDATIONS),
['file1.txt'])
assert count == [{'total': 3, 'passed': 2, 'failed': 1, 'date': now}]

Просмотреть файл

@ -1,314 +0,0 @@
"""
Process validation data retrieved using fetch_validation_data.py. Two types
of data are expected. A file at `validations/unlisted-addons.txt` that contains
the guid of each unlisted addon and input on STDIN which has the validation
JSON data for each validation to check. See fetch_validation_data.py for how
this data is retrieved. Results are returned on STDOUT.
The following reports are supported:
* count - Return signing errors ordered by addon unique frequency in the
format: `error.id.dot.separated total_count unique_addon_count`.
* context - Return the context for 5 most common signing errors in the JSON
format: `{"context": ["", ...], "error": "error.id"}`.
* automated_count - Return daily totals for automated reviews.
Usage:
cat my-test-data-*.txt | python validations.py <report> > results.txt
"""
import csv
import functools
import itertools
import json
import os
import sys
from collections import defaultdict
from datetime import datetime
LOG_ACTION_PRELIMINARY = 42
LOG_ACTION_REJECTED = 43
def parse_validations(results):
"""Load each item in `results` as JSON."""
return (json.loads(result) for result in results)
def load_validations_by_dated_filename(filenames, load_file=None):
"""Load JSON validations and include the date from text files of the format
YYYY-MM-DD.txt."""
def date_from_filename(filename):
basename = os.path.basename(filename)
return datetime.strptime(basename, '%Y-%m-%d.txt').date()
def load_with_date(filename):
date = date_from_filename(filename)
with open(filename) as f:
for result in parse_validations(f):
result['date'] = date
yield result
if load_file is None:
load_file = load_with_date
return (result
for filename in filenames
for result in load_file(filename.strip()))
def unlisted_validations(results, unlisted_addons=None):
"""Filter `results` to only include validations for unlisted addons."""
if unlisted_addons is None:
unlisted_addons = get_unlisted_addons()
return (result
for result in results
if ('id' in result['metadata'] and
(not result['metadata'].get('listed', True)
or result['metadata']['id'] in unlisted_addons)))
def severe_validations(results):
"""Filter `results` to only include validations with low or higher signing
severity."""
return (result
for result in results
if (result['signing_summary']['high'] > 0 or
result['signing_summary']['medium'] > 0 or
result['signing_summary']['low'] > 0))
def automated_validations(results, unlisted_addons=None, lite_addons=None):
"""Filter `results` to only include validations that could potentially be
automatically signed (whether or not they passed)."""
if unlisted_addons is None:
unlisted_addons = get_unlisted_addons()
if lite_addons is None:
lite_addons = get_lite_addons()
return (result
for result in results
if ('id' in result['metadata']
and result['metadata']['id'] in unlisted_addons
and result['metadata']['id'] in lite_addons))
def error_messages(results):
"""Format validations to include all signing severity errors."""
return ({'addon': result['metadata']['id'],
'message_id': '.'.join(message['id']),
'context': message['context']}
for result in results
for message in result['messages']
if 'signing_severity' in message)
def sort_by_message(results):
"""Sort `results` by the message_id of the error messages."""
return sorted(results, key=lambda r: r['message_id'])
def group_by_message(results):
"""Group `results` by message_id."""
return itertools.groupby(results, lambda r: r['message_id'])
def extract_error_results(results):
"""Aggregate some data about validation errors."""
for error, messages in results:
all_messages = list(messages)
yield {
'error': error,
'total': len(all_messages),
'unique': len(set(msg['addon'] for msg in all_messages)),
'contexts': [msg['context'] for msg in all_messages],
}
def sort_results_by_unique(results):
"""Sort validation errors but number of unique occurrences."""
return sorted(results, reverse=True, key=lambda r: r['unique'])
def format_error_count(results):
"""Basic output format for error messages."""
return ('{error} {total} {unique}'.format(**result)
for result in results)
def format_contexts(results):
"""Limit error messages to just error and context."""
for result in results:
for context in result['contexts']:
yield json.dumps({
'error': result['error'],
'context': context,
})
def set_from_file(filename):
"""Create a set from a file containing line separated strings."""
with open(filename) as f:
return set(guid.strip() for guid in f)
def get_unlisted_addons():
"""Load the unlisted addons file as a set."""
return set_from_file('validations/unlisted-addons.txt')
def get_lite_addons():
"""Load the lite addons file as a set."""
return set_from_file('validations/lite-addons.txt')
def context_pipeline():
"""Pipeline for generating error context messages."""
return [
parse_validations,
unlisted_validations,
severe_validations,
error_messages,
sort_by_message,
group_by_message,
extract_error_results,
sort_results_by_unique,
# Only get context for the top 5 errors (they're already sorted by
# unique occurrences so we can just take the first 5).
lambda results: itertools.islice(results, 5),
format_contexts,
]
def count_pipeline():
"""Pipeline for getting error counts."""
return [
parse_validations,
unlisted_validations,
severe_validations,
error_messages,
sort_by_message,
group_by_message,
extract_error_results,
sort_results_by_unique,
format_error_count,
]
def automated_count(results):
"""Total automated review pass/fail for each day."""
by_date = defaultdict(lambda: {'passed': 0, 'failed': 0})
for result in results:
if result['passed_auto_validation']:
by_date[result['date']]['passed'] += 1
else:
by_date[result['date']]['failed'] += 1
return ({'date': day, 'passed': totals['passed'],
'failed': totals['failed'],
'total': totals['passed'] + totals['failed']}
for day, totals in by_date.iteritems())
def manual_count(results):
"""Total manual review pass/fail for each day."""
by_date = defaultdict(lambda: {'passed': 0, 'failed': 0})
for result in results:
day = datetime.strptime(result['created'], '%Y-%m-%dT%H:%M%S').date()
if result['action'] == LOG_ACTION_PRELIMINARY:
by_date[day]['passed'] += 1
elif result['action'] == LOG_ACTION_REJECTED:
by_date[day]['failed'] += 1
else:
raise ValueError('Unexpected action {action}'.format(**result))
return ({'date': day, 'passed': totals['passed'],
'failed': totals['failed'],
'total': totals['passed'] + totals['failed']}
for day, totals in by_date.iteritems())
def print_tsv(results):
"""Format `results` as tab separated values to STDOUT."""
writer = None
for row in results:
if writer is None:
writer = csv.DictWriter(sys.stdout, fieldnames=row.keys(),
dialect='excel-tab')
writer.writeheader()
writer.writerow(row)
sort_by_date = functools.partial(sorted, key=lambda count: count['date'])
def automated_count_pipeline(unlisted_addons=None, lite_addons=None,
load_file=None):
"""Pipeline for generating daily pass/fail for automated reviews."""
return [
functools.partial(load_validations_by_dated_filename,
load_file=load_file),
functools.partial(automated_validations,
unlisted_addons=unlisted_addons,
lite_addons=lite_addons),
automated_count,
sort_by_date,
]
def manual_count_pipeline():
"""Pipeline for generating daily pass/fail for manual reviews."""
return [
parse_validations,
manual_count,
sort_by_date,
]
def reduce_pipeline(pipeline, iterable):
"""Run through a pipeline."""
val = iterable
# Process through the pipeline.
for fn in pipeline:
val = fn(val)
return val
def print_results(results):
"""Print the results of a pipeline."""
if hasattr(results, '__iter__'):
for line in results:
print line
else:
print results
ACTIONS = {
'context': context_pipeline,
'count': count_pipeline,
'automated_count': automated_count_pipeline,
'manual_count': manual_count_pipeline,
}
FORMATTERS = {
'print': print_results,
'tsv': print_tsv,
}
def main(action):
"""Run the specified pipeline."""
format = os.environ.get('FORMAT', 'print')
if action not in ACTIONS:
raise ValueError('{0} is not a valid action'.format(action))
elif format not in FORMATTERS:
raise ValueError('{0} is not a valid formatter'.format(format))
else:
FORMATTERS[format](reduce_pipeline(ACTIONS[action](), sys.stdin))
if __name__ == '__main__':
if len(sys.argv) != 2 or sys.argv[1] not in ACTIONS:
print """Usage: python {name} <action>
action: {actions}
values are read from STDIN""".format(
name=sys.argv[0], actions='|'.join(ACTIONS))
sys.exit(1)
else:
main(sys.argv[1])

Просмотреть файл

@ -1,38 +0,0 @@
#!/usr/bin/env python
import optparse
import os
import subprocess
def main():
p = optparse.OptionParser(
usage='%prog [options] [-x addon-1.0.xpi] [-c /path/to/addon-1.0/]')
p.add_option('-x', '--extract',
help='Extracts xpi into current directory',
action='store_true')
p.add_option('-c', '--recreate',
help='Zips an extracted xpi into current directory',
action='store_true')
(options, args) = p.parse_args()
if len(args) != 1:
p.error("Incorrect usage")
addon = os.path.abspath(args[0])
if options.extract:
d = os.path.splitext(addon)[0]
os.mkdir(d)
os.chdir(d)
subprocess.check_call(['unzip', addon])
print "Extracted to %s" % d
elif options.recreate:
xpi = "%s.xpi" % addon
if os.path.exists(xpi):
p.error("Refusing to overwrite %r" % xpi)
os.chdir(addon)
subprocess.check_call(['zip', '-r', xpi] + os.listdir(os.getcwd()))
print "Created %s" % xpi
else:
p.error("Incorrect usage")
if __name__ == '__main__':
main()