Merge pull request #47 from mozilla/endpoint-filters

Endpoint filters
This commit is contained in:
Mauro Doglio 2013-08-07 13:09:10 -07:00
Родитель a4df134650 d5de5cdf8f
Коммит 1edd9f289e
3 изменённых файлов: 69 добавлений и 18 удалений

Просмотреть файл

@ -131,35 +131,45 @@ class JobsModel(TreeherderModelBase):
return id_iter.get_column_data('id')
def get_result_set_list(self, page, limit):
def get_result_set_list(self, page, limit, **kwargs):
"""
Retrieve a list of ``result_sets`` (also known as ``pushes``)
with associated revisions. No jobs
Mainly used by the restful api to list the pushes in the UI
"""
repl = [""]
if "author" in kwargs:
repl = [" AND `rev`.`author` = '{0}'".format(kwargs["author"])]
proc = "jobs.selects.get_result_set_list"
push_dict = self.get_jobs_dhub().execute(
proc=proc,
placeholders=[page, limit],
debug_show=self.DEBUG,
return_type='iter',
replace=repl,
)
return push_dict
def get_result_set_job_list(self, result_set_id):
def get_result_set_job_list(self, result_set_id, **kwargs):
"""
Retrieve a list of ``jobs`` and results for a result_set.
Mainly used by the restful api to list the job results in the UI
"""
repl = [""]
if "job_type_name" in kwargs:
repl = [" AND jt.`name` = '{0}'".format(kwargs["job_type_name"])]
proc = "jobs.selects.get_result_set_job_list"
push_dict = self.get_jobs_dhub().execute(
proc=proc,
placeholders=[result_set_id],
debug_show=self.DEBUG,
return_type='iter',
replace=repl,
)
return push_dict

Просмотреть файл

@ -177,7 +177,10 @@
ON rs.id = rm.result_set_id
LEFT JOIN revision as rev
ON rm.revision_id = rev.id
LIMIT ?,?",
WHERE 1
REP0
LIMIT ?,?
",
"host": "read_host"
},
"get_result_set_by_id":{
@ -202,10 +205,13 @@
j.`job_guid`,
j.`build_platform_id`,
mp.`platform`,
m.`name`,
jt.`name`,
jt.`symbol`,
jt.`description`,
m.`name` as machine_name,
jt.`name` as job_type_name,
jt.`symbol` as job_type_symbol,
jt.`description` as job_type_description,
jg.`name` as job_group_name,
jg.`symbol` as job_group_symbol,
jg.`description` as job_group_description,
j.`who`,
j.`result_set_id`,
j.`result`,
@ -219,7 +225,11 @@
ON j.`build_platform_id` = bp.id
LEFT JOIN `treeherder`.`job_type` as jt
ON j.`job_type_id` = jt.id
WHERE `result_set_id` = ?",
LEFT JOIN `treeherder`.`job_group` as jg
ON jt.`job_group_id` = jg.id
WHERE `result_set_id` = ?
REP0
",
"host": "read_host"
}
}

Просмотреть файл

@ -144,11 +144,18 @@ class ResultSetViewSet(viewsets.ViewSet):
"""
GET method for list of ``resultset`` records with revisions
"""
filters = ["author"]
try:
page = request.QUERY_PARAMS.get('page', 0)
jm = JobsModel(project)
objs = jm.get_result_set_list(page, 1000)
objs = jm.get_result_set_list(
page,
1000,
**dict((k, v) for k, v in request.QUERY_PARAMS.iteritems() if k in filters)
)
return Response(objs)
except DatasetNotFoundError as e:
return Response({"message": unicode(e)}, status=404)
@ -157,20 +164,30 @@ class ResultSetViewSet(viewsets.ViewSet):
finally:
jm.disconnect()
def get_warning_level(self, jobs):
def get_warning_level(self, groups):
"""
Return the most severe warning level for a list of jobs.
A color-based warning level based on the most severe
level in the list of jobs.
@@@ - This needs a better way.
"""
job_states = set([x["result"] for x in jobs])
job_states = []
for group in groups:
job_states.extend([job["result"] for job in group["jobs"]])
job_states = set(job_states)
if "busted" in job_states:
return "red"
if "fail" in job_states:
return "red"
elif "orange" in job_states:
return "orange"
elif "pending" in job_states:
return "grey"
elif "retry" in job_states:
return "grey"
elif "running" in job_states:
return "grey"
else:
@ -180,21 +197,35 @@ class ResultSetViewSet(viewsets.ViewSet):
"""
GET method implementation for detail view of ``resultset``
"""
filters = ["job_type_name"]
try:
jm = JobsModel(project)
rs = list(jm.get_result_set_by_id(pk))[0]
jobs_ungrouped = list(jm.get_result_set_job_list(pk))
jobs_ungrouped = list(jm.get_result_set_job_list(
pk,
**dict((k, v) for k, v in request.QUERY_PARAMS.iteritems() if k in filters)
))
# group these by their platforms for return
jobs_sorted = sorted(jobs_ungrouped, key=lambda x: x["platform"])
import itertools
rs["jobs"] = []
rs["platforms"] = []
# job_groups by platform
for k, g in itertools.groupby(jobs_sorted, key=lambda x: x["platform"]):
jobs = list(g)
rs["jobs"].append({
"platform": k,
"warning_level": self.get_warning_level(jobs),
"jobs": jobs
job_groups = sorted(list(g), key=lambda x: x["job_group_symbol"])
groups = []
for jg_k, jg_g in itertools.groupby(job_groups, key=lambda x: x["job_group_symbol"]):
jobs = list(jg_g)
groups.append({
"symbol": jg_k,
"jobs": jobs
})
rs["platforms"].append({
"name": k,
"groups": groups,
"warning_level": self.get_warning_level(groups)
})
return Response(rs)
except DatasetNotFoundError as e:
return Response(