fix diff, new reference files
This commit is contained in:
Родитель
b1ffc6e4b2
Коммит
cc8e4b8063
|
@ -438,7 +438,7 @@ class BugHistoryParser(object):
|
|||
Log.note(
|
||||
"[Bug {{bug_id}}]: PROBLEM inconsistent change at {{timestamp}}: {{field}} was {{expecting|quote}} got {{observed|quote}}",
|
||||
bug_id=self.currBugID,
|
||||
timestamp=row_in.modified_timestamp,
|
||||
timestamp=row_in.modified_ts,
|
||||
field=row_in.field_name,
|
||||
expecting=expected_value,
|
||||
observed=new_value
|
||||
|
@ -1120,7 +1120,7 @@ class ApplyDiff(object):
|
|||
if isinstance(text, ApplyDiff):
|
||||
if text.timestamp != timestamp:
|
||||
# DIFFERNT DIFF
|
||||
self._text = str(text) # ACTUALIZE THE EFFECTS OF THE OTHER DIFF
|
||||
self._text = str(text) # ACTUALIZE THE EFFECTS OF THE OTHER DIFF
|
||||
else:
|
||||
# CHAIN THE DIFF
|
||||
text.parent = self
|
||||
|
@ -1143,7 +1143,8 @@ class ApplyDiff(object):
|
|||
return self._diff
|
||||
|
||||
def __data__(self):
|
||||
return self.__str__()
|
||||
output = self.__str__()
|
||||
return output if output else None
|
||||
|
||||
def __gt__(self, other):
|
||||
return str(self)>other
|
||||
|
@ -1164,7 +1165,8 @@ class ApplyDiff(object):
|
|||
diff = self.diff
|
||||
if not self.result:
|
||||
try:
|
||||
self.result = "\n".join(apply_diff(text.split("\n"), diff.split("\n"), reverse=self.reverse, verify=DEBUG_DIFF))
|
||||
new_text = apply_diff(coalesce(text, "").split("\n"), diff.split("\n"), reverse=self.reverse, verify=DEBUG_DIFF)
|
||||
self.result = "\n".join(new_text)
|
||||
except Exception as e:
|
||||
e = Except.wrap(e)
|
||||
self.result = "<ERROR>"
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
Различия файлов скрыты, потому что одна или несколько строк слишком длинны
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -188,7 +188,7 @@ class TestETL(unittest.TestCase):
|
|||
break
|
||||
Log.error("Comments do not match reference\n{{sample}}", sample=can[MIN([0, found - 100]):found + 100])
|
||||
|
||||
@skip("working on reference file")
|
||||
|
||||
def test_public_etl(self):
|
||||
"""
|
||||
ENSURE ETL GENERATES WHAT'S IN THE REFERENCE FILE
|
||||
|
@ -696,11 +696,7 @@ def compare_both(candidate, reference, settings, bug_ids):
|
|||
v.etl.timestamp = None
|
||||
|
||||
pre_ref_versions = get_all_bug_versions(None, bug_id, max_time, esq=referenceq)
|
||||
ref_versions = jx.sort(
|
||||
# ADDED TO FIX OLD PRODUCTION BUG VERSIONS
|
||||
[compare_es.old2new(x, settings.bugzilla.expires_on) for x in pre_ref_versions],
|
||||
"modified_ts"
|
||||
)
|
||||
ref_versions = jx.sort(pre_ref_versions, "modified_ts")
|
||||
for v in ref_versions:
|
||||
v.etl.timestamp = None
|
||||
|
||||
|
|
|
@ -84,67 +84,3 @@ def get_private_bugs(es):
|
|||
return output
|
||||
|
||||
|
||||
def old2new(bug, max_date):
|
||||
"""
|
||||
CONVERT THE OLD ES FORMAT TO THE NEW
|
||||
THESE ARE KNOWN CHANGES THAT SHOULD BE MADE TO THE PRODUCTION VERSION
|
||||
"""
|
||||
# if bug.everconfirmed != None:
|
||||
# if bug.everconfirmed == "":
|
||||
# bug.everconfirmed = None
|
||||
# else:
|
||||
# bug.everconfirmed = int(bug.everconfirmed)
|
||||
|
||||
# bug = json2value(value2json(bug).replace("bugzilla: other b.m.o issues ", "bugzilla: other b.m.o issues"))
|
||||
|
||||
# if bug.expires_on > max_date:
|
||||
# bug.expires_on = MAX_TIMESTAMP
|
||||
# if bug.votes != None:
|
||||
# bug.votes = int(bug.votes)
|
||||
# bug.dupe_by = convert.value2intlist(bug.dupe_by)
|
||||
# if bug.votes == 0:
|
||||
# del bug["votes"]
|
||||
# if Math.is_integer(bug.remaining_time) and int(bug.remaining_time) == 0:
|
||||
# bug.remaining_time = 0
|
||||
# if bug.cf_due_date != None and not Math.is_number(bug.cf_due_date):
|
||||
# bug.cf_due_date = convert.datetime2milli(
|
||||
# convert.string2datetime(bug.cf_due_date, "%Y-%m-%d")
|
||||
# )
|
||||
# bug.changes = jx.sort(listwrap(bug.changes), "field_name")
|
||||
|
||||
# if bug.everconfirmed == 0:
|
||||
# del bug["everconfirmed"]
|
||||
# if bug.id == "692436_1336314345":
|
||||
# bug.votes = 3
|
||||
|
||||
# try:
|
||||
# if bug.cf_last_resolved == None:
|
||||
# pass
|
||||
# elif Math.is_number(bug.cf_last_resolved):
|
||||
# bug.cf_last_resolved = long(bug.cf_last_resolved)
|
||||
# else:
|
||||
# bug.cf_last_resolved = convert.datetime2milli(convert.string2datetime(bug.cf_last_resolved, "%Y-%m-%d %H:%M:%S"))
|
||||
# except Exception as e:
|
||||
# pass
|
||||
|
||||
for c in listwrap(bug.changes):
|
||||
if c.attach_id == '':
|
||||
c.attach_id = None
|
||||
else:
|
||||
c.attach_id = convert.value2int(c.attach_id)
|
||||
|
||||
bug.attachments = jx.sort(listwrap(bug.attachments), "attach_id")
|
||||
for a in bug.attachments:
|
||||
a.attach_id = convert.value2int(a.attach_id)
|
||||
for k, v in list(a.items()):
|
||||
if k.startswith('attachments') and k.endswith("isobsolete") or k.endswith("ispatch") or k.endswith("isprivate"):
|
||||
del a[k]
|
||||
k = k.replace('attachments.', '').replace('attachments_', '')
|
||||
a[k] = convert.value2int(v)
|
||||
elif k in ('attachments_mimetype','attachments.mimetype'):
|
||||
del a[k]
|
||||
k = k.replace('attachments.', '').replace('attachments_', '')
|
||||
a[k] = v
|
||||
|
||||
bug = transform_bugzilla.normalize(bug)
|
||||
return bug
|
||||
|
|
|
@ -12,12 +12,11 @@ from __future__ import absolute_import
|
|||
from __future__ import division
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from mo_times import Date
|
||||
|
||||
_range = range
|
||||
|
||||
from mo_times import Date
|
||||
from collections import Mapping
|
||||
|
||||
from jx_base import query
|
||||
from jx_python import expressions as _expressions
|
||||
from jx_python import flat_list, group_by
|
||||
|
|
|
@ -22,11 +22,10 @@ from datetime import datetime as builtin_datetime
|
|||
from datetime import timedelta, date
|
||||
from json.encoder import encode_basestring
|
||||
|
||||
import sys
|
||||
|
||||
from mo_dots import coalesce, wrap, get_module, Data
|
||||
from mo_dots import coalesce, wrap, get_module, Data, Null
|
||||
from mo_future import text_type, xrange, binary_type, round as _round, PY3, get_function_name, zip_longest, transpose
|
||||
from mo_logs.convert import datetime2unix, datetime2string, value2json, milli2datetime, unix2datetime
|
||||
|
||||
# from mo_files.url import value2url_param
|
||||
|
||||
FORMATTERS = {}
|
||||
|
@ -746,19 +745,18 @@ def apply_diff(text, diff, reverse=False, verify=True):
|
|||
+Content Team Engagement & Tasks : https://appreview.etherpad.mozilla.org/40
|
||||
"""
|
||||
|
||||
output = text
|
||||
if not diff:
|
||||
return output
|
||||
|
||||
diff = [d for d in diff if d != "\\ No newline at end of file"] # ANOTHER REPAIR
|
||||
|
||||
R = xrange(0, len(diff))
|
||||
R = reversed(R) if reverse else R
|
||||
for start_of_hunk in R:
|
||||
header = diff[start_of_hunk]
|
||||
if not header.strip() or not header.startswith("@@"):
|
||||
continue
|
||||
return text
|
||||
output = text
|
||||
diff = [d for d in diff if d and d != "\\ No newline at end of file"] + ["@@"] # ANOTHER REPAIR
|
||||
hunks = [
|
||||
(diff[start_hunk], diff[start_hunk+1:end_hunk])
|
||||
for start_hunk, end_hunk in pairwise(i for i, l in enumerate(diff) if l.startswith('@@'))
|
||||
]
|
||||
if reverse:
|
||||
hunks = reversed(hunks)
|
||||
|
||||
for header, hunk_body in hunks:
|
||||
matches = DIFF_PREFIX.match(header.strip())
|
||||
if not matches:
|
||||
if not _Log:
|
||||
|
@ -774,16 +772,14 @@ def apply_diff(text, diff, reverse=False, verify=True):
|
|||
if add.length == 0 and add.start == 0:
|
||||
add.start = remove.start
|
||||
|
||||
def repair_hunk(diff):
|
||||
def repair_hunk(hunk_body):
|
||||
# THE LAST DELETED LINE MAY MISS A "\n" MEANING THE FIRST
|
||||
# ADDED LINE WILL BE APPENDED TO THE LAST DELETED LINE
|
||||
# EXAMPLE: -kward has the details.+kward has the details.
|
||||
# DETECT THIS PROBLEM FOR THIS HUNK AND FIX THE DIFF
|
||||
if reverse:
|
||||
last_line = output[-1]
|
||||
for problem_index, problem_line in enumerate(diff[start_of_hunk+1:]):
|
||||
if problem_line.startswith('@@'):
|
||||
return diff
|
||||
last_line = hunk_body[-1]
|
||||
for problem_index, problem_line in enumerate(hunk_body):
|
||||
if problem_line.startswith('-') and problem_line.endswith('+' + last_line):
|
||||
split_point = len(problem_line) - (len(last_line) + 1)
|
||||
break
|
||||
|
@ -791,12 +787,10 @@ def apply_diff(text, diff, reverse=False, verify=True):
|
|||
split_point = len(last_line) + 1
|
||||
break
|
||||
else:
|
||||
return diff
|
||||
return hunk_body
|
||||
else:
|
||||
last_line = output[-1]
|
||||
for problem_index, problem_line in enumerate(diff[start_of_hunk+1:]):
|
||||
if problem_line.startswith('@@'):
|
||||
return diff
|
||||
last_line = hunk_body[-1]
|
||||
for problem_index, problem_line in enumerate(hunk_body):
|
||||
if problem_line.startswith('+') and problem_line.endswith('-' + last_line):
|
||||
split_point = len(problem_line) - (len(last_line) + 1)
|
||||
break
|
||||
|
@ -804,16 +798,15 @@ def apply_diff(text, diff, reverse=False, verify=True):
|
|||
split_point = len(last_line) + 1
|
||||
break
|
||||
else:
|
||||
return diff
|
||||
return hunk_body
|
||||
|
||||
new_diff = (
|
||||
diff[:start_of_hunk + 1 + problem_index] +
|
||||
new_hunk_body = (
|
||||
hunk_body[:problem_index] +
|
||||
[problem_line[:split_point], problem_line[split_point:]] +
|
||||
diff[start_of_hunk + 1 + problem_index + 1:]
|
||||
hunk_body[problem_index + 1:]
|
||||
)
|
||||
return new_diff
|
||||
diff = repair_hunk(diff)
|
||||
hunk_body = diff[start_of_hunk + 1:start_of_hunk + 1 + add.length + remove.length]
|
||||
return new_hunk_body
|
||||
hunk_body = repair_hunk(hunk_body)
|
||||
|
||||
if reverse:
|
||||
new_output = (
|
||||
|
@ -889,3 +882,15 @@ def wordify(value):
|
|||
|
||||
|
||||
|
||||
|
||||
def pairwise(values):
|
||||
"""
|
||||
WITH values = [a, b, c, d, ...]
|
||||
RETURN [(a, b), (b, c), (c, d), ...]
|
||||
"""
|
||||
i = iter(values)
|
||||
a = next(i)
|
||||
|
||||
for b in i:
|
||||
yield (a, b)
|
||||
a = b
|
||||
|
|
Загрузка…
Ссылка в новой задаче