Bug 1133482 - Always ensure files are closed after being open()ed

This commit is contained in:
Ed Morley 2015-03-03 01:59:49 +00:00
Родитель 4cd29c1f0a
Коммит 5e0f36ae91
13 изменённых файлов: 102 добавлений и 125 удалений

Просмотреть файл

@ -40,7 +40,9 @@ while current_try < RETRIES:
continue
# Load and process the json...
content = json.loads(open(CREDENTIALS).read())
with open(CREDENTIALS) as f:
content = json.loads(f.read())
print('Projects:', content.keys())
# It is possible we have a database setup but no fixtures imported if this

Просмотреть файл

@ -11,33 +11,31 @@ from thclient import (TreeherderJobCollection)
from tests import test_utils
base_dir = os.path.dirname(__file__)
@pytest.fixture
def pending_jobs():
"""returns a list of buildapi pending jobs"""
return json.loads(open(
os.path.join(os.path.dirname(__file__), "pending.json")
).read())
with open(os.path.join(base_dir, "pending.json")) as f:
return json.loads(f.read())
@pytest.fixture
def running_jobs():
"""returns a list of buildapi running jobs"""
return json.loads(open(
os.path.join(os.path.dirname(__file__), "running.json")
).read())
with open(os.path.join(base_dir, "running.json")) as f:
return json.loads(f.read())
@pytest.fixture
def completed_jobs(sample_data):
"""returns a list of pulse completed jobs"""
base_dir = os.path.dirname(__file__)
content = open(
os.path.join(os.path.dirname(__file__), "finished.json")
).read()
t = Template(content)
c = Context({"base_dir": base_dir})
return json.loads(t.render(c))
with open(os.path.join(base_dir, "finished.json")) as f:
content = f.read()
t = Template(content)
c = Context({"base_dir": base_dir})
return json.loads(t.render(c))
@pytest.fixture

Просмотреть файл

@ -22,8 +22,8 @@ def mock_extract(monkeypatch):
"sample_data",
"bug_list.json"
)
content = json.loads(open(bug_list_path).read())
return content
with open(bug_list_path) as f:
return json.loads(f.read())
monkeypatch.setattr(BzApiBugProcess,
'extract',

Просмотреть файл

@ -15,7 +15,8 @@ def test_ingest_hg_pushlog(jm, initial_data, test_base_dir,
"""ingesting a number of pushes should populate result set and revisions"""
pushlog_path = os.path.join(test_base_dir, 'sample_data', 'hg_pushlog.json')
pushlog_content = open(pushlog_path).read()
with open(pushlog_path) as f:
pushlog_content = f.read()
pushlog_fake_url = "http://www.thisismypushlog.com"
push_num = 10
responses.add(responses.GET, pushlog_fake_url,
@ -61,7 +62,8 @@ def test_ingest_hg_pushlog_already_stored(jm, initial_data, test_base_dir,
e.g. trying to store [A,B] with A already stored, B will be stored"""
pushlog_path = os.path.join(test_base_dir, 'sample_data', 'hg_pushlog.json')
pushlog_content = open(pushlog_path).read()
with open(pushlog_path) as f:
pushlog_content = f.read()
pushes = json.loads(pushlog_content).values()
first_push, second_push = pushes[0:2]
@ -151,7 +153,8 @@ def test_ingest_hg_pushlog_cache_last_push(jm, initial_data, test_repository,
pushlog_path = os.path.join(test_base_dir, 'sample_data',
'hg_pushlog.json')
pushlog_content = open(pushlog_path).read()
with open(pushlog_path) as f:
pushlog_content = f.read()
pushlog_fake_url = "http://www.thisismypushlog.com"
responses.add(responses.GET, pushlog_fake_url, body=pushlog_content,
status=200, content_type='application/json')

Просмотреть файл

@ -41,7 +41,6 @@ def do_test(log, check_errors=True):
# :: use to create the ``exp`` files, if you're making a lot of them
# with open(SampleData().get_log_path("{0}.logview.json".format(log)), "w") as f:
# f.write(json.dumps(act, indent=4))
# f.close()
# log urls won't match in tests, since they're machine specific
# but leave it in the exp file as an example of what the real structure

Просмотреть файл

@ -14,17 +14,12 @@ from datetime import timedelta
def ref_data_json():
"""Return reference data json structure"""
filename = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"ref_data.json",
)
json_data = ""
with open(filename) as f:
json_data = f.read()
return json_data
return f.read()
def job_json(**kwargs):

Просмотреть файл

@ -425,7 +425,8 @@ def sample_bugs(test_base_dir):
'sample_data',
'bug_list.json'
)
return json.loads(open(filename).read())
with open(filename) as f:
return json.loads(f.read())
def test_update_bugscache(refdata, sample_bugs):

Просмотреть файл

@ -15,17 +15,12 @@ from datetime import timedelta
def ref_data_json():
"""Return reference data json structure"""
filename = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"ref_data.json",
)
json_data = ""
with open(filename) as f:
json_data = f.read()
return json_data
return f.read()
def job_json(**kwargs):
@ -175,7 +170,8 @@ def result_set(**kwargs):
"resultset_data.json"
)
defaults = json.loads(open(source_file).read())[0]
with open(source_file) as f:
defaults = json.loads(f.read())[0]
defaults.update(kwargs)
# ensure that the repository values for all the revisions have the

Просмотреть файл

@ -23,21 +23,15 @@ class SampleData(object):
@classmethod
def get_talos_perf_data(cls):
talos_perf_data = []
with open("{0}/sample_data/artifacts/performance/talos_perf.json".format(
os.path.dirname(__file__))) as f:
talos_perf_data = json.loads(f.read())
return talos_perf_data
return json.loads(f.read())
@classmethod
def get_b2g_perf_data(cls):
b2g_perf_data = []
with open("{0}/sample_data/artifacts/performance/b2g_perf.json".format(
os.path.dirname(__file__))) as f:
b2g_perf_data = json.loads(f.read())
return b2g_perf_data
return json.loads(f.read())
def __init__(self):
@ -80,7 +74,6 @@ class SampleData(object):
self.job_data.append(json.loads(line.strip()))
with open(self.resultset_data_file) as f:
self.resultset_data = json.loads(f.read())
# ensure that the repository values for all the revisions have the
@ -91,9 +84,9 @@ class SampleData(object):
rev["repository"] = settings.DATABASES["default"]["TEST_NAME"]
with open(self.raw_pulse_data_file) as f:
for line in f.readlines():
line = str(line)
self.raw_pulse_data.append(json.loads(line.strip()))
for line in f:
line = str(line).strip()
self.raw_pulse_data.append(json.loads(line))
def get_log_path(self, name):
"""Returns the full path to a log file"""

Просмотреть файл

@ -354,12 +354,12 @@ def load_exp(filename):
development.
"""
path = SampleData().get_log_path(filename)
exp_str = open(path, "a+").read()
try:
return json.loads(exp_str)
except ValueError:
# if it's not parse-able, return an empty dict
return {}
with open(path, "a+") as f:
try:
return json.loads(f.read())
except ValueError:
# if it's not parse-able, return an empty dict
return {}
def unicode_keys(d):

Просмотреть файл

@ -807,73 +807,70 @@ class Builds4hAnalyzer(JsonExtractorMixin, Builds4hTransformerMixin):
deserialized_data['analyzers'][analysis_type]
def write_report(self):
report_fh = open(self.builds4h_report_file_path, 'w')
divider = "------------------------------------------------------\n"
header_line = "Builds4h Report Last Run Time {0}\n".format(
self.readable_time)
report_fh.write(header_line)
report_fh.write(divider)
data_to_write = {'analyzers': {}, 'guids': {}}
data_to_write['guids'] = self.report_obj['guids']
for analyzer in sorted(self.report_obj['analyzers']):
# Set data for json structure
data_to_write['analyzers'][analyzer] = \
self.report_obj['analyzers'][analyzer]['data']
# Remove any blacklist names found
for exclude_name in self.blacklist:
if exclude_name in self.report_obj['analyzers'][analyzer]['data']:
del self.report_obj['analyzers'][analyzer]['data'][exclude_name]
# Write the title line
all_misses = self.report_obj['analyzers'][analyzer]['all_misses']
if all_misses > 0:
title = self.report_obj['analyzers'][analyzer].get(
'title', '{0} Needs Title')
report_fh.write(
"{0}\n".format(title.format(str(all_misses)))
)
report_fh.write(divider)
else:
continue
# Write out display report
for k, v in sorted(
self.report_obj['analyzers'][analyzer]['data'].iteritems(),
key=lambda k_v: (k_v[1]['first_seen'], k_v[0])):
if k in self.blacklist:
continue
if v['missed_count'] == 0:
continue
readable_time = datetime.datetime.fromtimestamp(
v['first_seen']).strftime('%Y-%m-%d')
line = "{0}\t{1}\t{2}/{3}\n".format(
str(k), readable_time, str(v['missed_count']),
str(v['total_count']))
report_fh.write(line)
if len(v['objects']) > 0:
for o in v['objects']:
report_fh.write("\n{0}\n\n".format(o))
with open(self.builds4h_report_file_path, 'w') as report_fh:
divider = "------------------------------------------------------\n"
header_line = "Builds4h Report Last Run Time {0}\n".format(
self.readable_time)
report_fh.write(header_line)
report_fh.write(divider)
report_fh.close()
data_to_write = {'analyzers': {}, 'guids': {}}
data_to_write['guids'] = self.report_obj['guids']
for analyzer in sorted(self.report_obj['analyzers']):
# Set data for json structure
data_to_write['analyzers'][analyzer] = \
self.report_obj['analyzers'][analyzer]['data']
# Remove any blacklist names found
for exclude_name in self.blacklist:
if exclude_name in self.report_obj['analyzers'][analyzer]['data']:
del self.report_obj['analyzers'][analyzer]['data'][exclude_name]
# Write the title line
all_misses = self.report_obj['analyzers'][analyzer]['all_misses']
if all_misses > 0:
title = self.report_obj['analyzers'][analyzer].get(
'title', '{0} Needs Title')
report_fh.write(
"{0}\n".format(title.format(str(all_misses)))
)
report_fh.write(divider)
else:
continue
# Write out display report
for k, v in sorted(
self.report_obj['analyzers'][analyzer]['data'].iteritems(),
key=lambda k_v: (k_v[1]['first_seen'], k_v[0])):
if k in self.blacklist:
continue
if v['missed_count'] == 0:
continue
readable_time = datetime.datetime.fromtimestamp(
v['first_seen']).strftime('%Y-%m-%d')
line = "{0}\t{1}\t{2}/{3}\n".format(
str(k), readable_time, str(v['missed_count']),
str(v['total_count']))
report_fh.write(line)
if len(v['objects']) > 0:
for o in v['objects']:
report_fh.write("\n{0}\n\n".format(o))
report_fh.write(divider)
# Write out the data json
f = open(self.builds4h_analysis_file_path, 'w')
f.write(json.dumps(data_to_write))
f.close()
with open(self.builds4h_analysis_file_path, 'w') as f:
f.write(json.dumps(data_to_write))
def get_objects_missing_buildernames(
self, analysis_type, build, buildername, job_guid):

Просмотреть файл

@ -12,13 +12,10 @@ from treeherder.model.derived.base import TreeherderModelBase
class Command(BaseCommand):
"""Management command to export project credentials."""
help = "Exports the objectstore Oauth keys for etl data import tasks"
option_list = BaseCommand.option_list + (
make_option(
'--safe',
action='store_true',
@ -29,7 +26,6 @@ class Command(BaseCommand):
)
def handle(self, *args, **options):
safe = options.get("safe")
file_path = os.path.join(
@ -39,10 +35,8 @@ class Command(BaseCommand):
)
if not os.path.isfile(file_path):
# If it doesn't exist create it
write_credentials(file_path)
else:
# File already exists, if safe is specified don't do anything
if not safe:
@ -50,8 +44,6 @@ class Command(BaseCommand):
def write_credentials(file_path):
immutable_credentials = TreeherderModelBase.get_oauth_credentials()
keys_fh = open(file_path, 'w')
keys_fh.write(json.dumps(immutable_credentials, indent=4))
keys_fh.close()
with open(file_path, 'w') as keys_fh:
keys_fh.write(json.dumps(immutable_credentials, indent=4))

Просмотреть файл

@ -58,7 +58,8 @@ Type 'yes' to continue, or 'no' to cancel: """ % connection.settings_dict['NAME'
'treeherder_reference_1.sql.tmpl',
):
sql = open(os.path.join(options['template_path'], sql_file)).read()
with open(os.path.join(options['template_path'], sql_file)) as f:
sql = f.read()
cursor = connection.cursor()
try:
rendered_sql = sql.format(engine=options['engine'])