2016-05-17 01:53:22 +03:00
|
|
|
# This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
|
|
|
|
|
|
from __future__ import absolute_import, print_function, unicode_literals
|
|
|
|
|
2016-06-07 21:06:12 +03:00
|
|
|
import concurrent.futures as futures
|
2016-05-17 01:53:22 +03:00
|
|
|
import requests
|
2016-06-07 21:06:12 +03:00
|
|
|
import requests.adapters
|
2016-05-17 01:53:22 +03:00
|
|
|
import json
|
|
|
|
import collections
|
2016-06-06 20:23:03 +03:00
|
|
|
import os
|
2016-05-18 18:58:21 +03:00
|
|
|
import logging
|
2016-05-17 01:53:22 +03:00
|
|
|
|
|
|
|
from slugid import nice as slugid
|
|
|
|
|
2016-05-18 18:58:21 +03:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2016-06-05 22:49:41 +03:00
|
|
|
def create_tasks(taskgraph, label_to_taskid):
|
2016-05-17 01:53:22 +03:00
|
|
|
# TODO: use the taskGroupId of the decision task
|
|
|
|
task_group_id = slugid()
|
2016-06-05 22:49:41 +03:00
|
|
|
taskid_to_label = {t: l for l, t in label_to_taskid.iteritems()}
|
2016-05-17 01:53:22 +03:00
|
|
|
|
|
|
|
session = requests.Session()
|
|
|
|
|
2016-06-06 20:23:03 +03:00
|
|
|
decision_task_id = os.environ.get('TASK_ID')
|
|
|
|
|
2016-06-07 21:06:12 +03:00
|
|
|
with futures.ThreadPoolExecutor(requests.adapters.DEFAULT_POOLSIZE) as e:
|
|
|
|
fs = {}
|
2016-06-06 20:23:03 +03:00
|
|
|
|
2016-06-07 21:06:12 +03:00
|
|
|
# We can't submit a task until its dependencies have been submitted.
|
|
|
|
# So our strategy is to walk the graph and submit tasks once all
|
|
|
|
# their dependencies have been submitted.
|
|
|
|
#
|
|
|
|
# Using visit_postorder() here isn't the most efficient: we'll
|
|
|
|
# block waiting for dependencies of task N to submit even though
|
|
|
|
# dependencies for task N+1 may be finished. If we need to optimize
|
|
|
|
# this further, we can build a graph of task dependencies and walk
|
|
|
|
# that.
|
|
|
|
for task_id in taskgraph.graph.visit_postorder():
|
|
|
|
task_def = taskgraph.tasks[task_id].task
|
2016-06-06 20:23:03 +03:00
|
|
|
|
2016-06-07 21:06:12 +03:00
|
|
|
# if this task has no dependencies, make it depend on this decision
|
|
|
|
# task so that it does not start immediately; and so that if this loop
|
|
|
|
# fails halfway through, none of the already-created tasks run.
|
|
|
|
if decision_task_id and not task_def.get('dependencies'):
|
|
|
|
task_def['dependencies'] = [decision_task_id]
|
|
|
|
|
|
|
|
task_def['taskGroupId'] = task_group_id
|
|
|
|
|
|
|
|
# Wait for dependencies before submitting this.
|
|
|
|
deps_fs = [fs[dep] for dep in task_def['dependencies'] if dep in fs]
|
|
|
|
for f in futures.as_completed(deps_fs):
|
|
|
|
f.result()
|
|
|
|
|
|
|
|
fs[task_id] = e.submit(_create_task, session, task_id,
|
|
|
|
taskid_to_label[task_id], task_def)
|
|
|
|
|
|
|
|
# Wait for all futures to complete.
|
|
|
|
for f in futures.as_completed(fs.values()):
|
|
|
|
f.result()
|
2016-05-17 01:53:22 +03:00
|
|
|
|
|
|
|
def _create_task(session, task_id, label, task_def):
|
|
|
|
# create the task using 'http://taskcluster/queue', which is proxied to the queue service
|
|
|
|
# with credentials appropriate to this job.
|
2016-05-18 18:58:21 +03:00
|
|
|
logger.debug("Creating task with taskId {} for {}".format(task_id, label))
|
2016-05-17 01:53:22 +03:00
|
|
|
res = session.put('http://taskcluster/queue/v1/task/{}'.format(task_id), data=json.dumps(task_def))
|
|
|
|
if res.status_code != 200:
|
|
|
|
try:
|
2016-05-18 18:58:21 +03:00
|
|
|
logger.error(res.json()['message'])
|
2016-05-17 01:53:22 +03:00
|
|
|
except:
|
2016-05-18 18:58:21 +03:00
|
|
|
logger.error(res.text)
|
2016-05-17 01:53:22 +03:00
|
|
|
res.raise_for_status()
|