reformat codebase to reduce the ignore rules for ruff (#20972)
* ignore rule F405 per file * ruff autofix of B007 and B010 * B007 manual fixes * B026 fixes * B023 fixes * updated ruff ignore list * black formatting & rework decorators * optimize iterating on dicts where we don't need the keys or values
This commit is contained in:
Родитель
31ac09358c
Коммит
ee8a48ec68
|
@ -21,15 +21,21 @@ exclude = [
|
|||
".git",
|
||||
"*/migrations/*.py",
|
||||
]
|
||||
ignore = ["F405", "B004", "B007", "B009", "B010", "B017", "B018", "B023", "B026", "B028", "B904", "B905"]
|
||||
ignore = [
|
||||
"B017", # `assertRaises(Exception)` should be considered evil
|
||||
"B018", # Found useless attribute access. Either assign it to a variable or remove it.
|
||||
"B028", # No explicit `stacklevel` keyword argument found
|
||||
"B904", # Within an `except` clause, raise exceptions with `raise ... from err|None`
|
||||
"B905" # `zip()` without an explicit `strict=` parameter
|
||||
]
|
||||
line-length = 88
|
||||
select = [
|
||||
"B",
|
||||
"E",
|
||||
"F",
|
||||
"I",
|
||||
"Q",
|
||||
"W",
|
||||
"B", # flake8-bugbear
|
||||
"E", # pycodestyle errors
|
||||
"F", # pyflakes
|
||||
"I", # isort
|
||||
"Q", # flake8-quotes
|
||||
"W", # pycodestyle warnings
|
||||
]
|
||||
|
||||
[tool.ruff.flake8-quotes]
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
# ruff: noqa: F405
|
||||
"""This is the standard development settings file.
|
||||
|
||||
If you need to overload settings, please do so in a local_settings.py file (it
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# ruff: noqa: F405
|
||||
from settings import * # noqa
|
||||
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ from olympia.zadmin.models import set_config
|
|||
def addon_factory_with_abuse_reports(*args, **kwargs):
|
||||
abuse_reports_count = kwargs.pop('abuse_reports_count')
|
||||
addon = addon_factory(*args, **kwargs)
|
||||
for x in range(0, abuse_reports_count):
|
||||
for _x in range(0, abuse_reports_count):
|
||||
AbuseReport.objects.create(guid=addon.guid)
|
||||
return addon
|
||||
|
||||
|
|
|
@ -1309,8 +1309,8 @@ class Addon(OnChangeMixin, ModelBase):
|
|||
for addon_id, addonusers in groupby:
|
||||
authors = []
|
||||
for addonuser in addonusers:
|
||||
setattr(addonuser.user, 'role', addonuser.role)
|
||||
setattr(addonuser.user, 'listed', addonuser.listed)
|
||||
addonuser.user.role = addonuser.role
|
||||
addonuser.user.listed = addonuser.listed
|
||||
authors.append(addonuser.user)
|
||||
setattr(addon_dict[addon_id], to_attr, authors)
|
||||
seen.add(addon_id)
|
||||
|
|
|
@ -192,7 +192,7 @@ class TestCleanSlug(TestCase):
|
|||
# worst case scenario where all the available clashes have been
|
||||
# avoided. Check the comment in addons.models.clean_slug, in the 'else'
|
||||
# part of the 'for" loop checking for available slugs not yet assigned.
|
||||
for i in range(100):
|
||||
for _i in range(100):
|
||||
Addon.objects.create(slug=long_slug)
|
||||
|
||||
with self.assertRaises(RuntimeError): # Fail on the 100th clash.
|
||||
|
@ -1938,7 +1938,7 @@ class TestShouldRedirectToSubmitFlow(TestCase):
|
|||
|
||||
status_exc_null = dict(amo.STATUS_CHOICES_ADDON)
|
||||
status_exc_null.pop(amo.STATUS_NULL)
|
||||
for status in status_exc_null:
|
||||
for _status in status_exc_null:
|
||||
assert not addon.should_redirect_to_submit_flow()
|
||||
addon.update(status=amo.STATUS_NULL)
|
||||
assert addon.should_redirect_to_submit_flow()
|
||||
|
|
|
@ -5676,7 +5676,7 @@ class TestAddonSearchView(ESTestCase):
|
|||
# contain a string that was causing such breakage before.
|
||||
# Populate the index with a few add-ons first (enough to trigger the
|
||||
# issue locally).
|
||||
for i in range(0, 10):
|
||||
for _i in range(0, 10):
|
||||
addon_factory()
|
||||
self.refresh()
|
||||
query = (
|
||||
|
|
|
@ -262,9 +262,10 @@ class AMOModelAdmin(admin.ModelAdmin):
|
|||
"""
|
||||
# The utility function was admin.utils.lookup_needs_distinct in django3.2;
|
||||
# it was renamed to admin.utils.lookup_spawns_duplicates in django4.0
|
||||
lookup_function = getattr(
|
||||
admin.utils, 'lookup_spawns_duplicates', None
|
||||
) or getattr(admin.utils, 'lookup_needs_distinct')
|
||||
lookup_function = (
|
||||
getattr(admin.utils, 'lookup_spawns_duplicates', None)
|
||||
or admin.utils.lookup_needs_distinct
|
||||
)
|
||||
rval = lookup_function(opts, lookup_path)
|
||||
lookup_fields = lookup_path.split(LOOKUP_SEP)
|
||||
# Not pretty but looking up the actual field would require truly
|
||||
|
|
|
@ -99,8 +99,8 @@ class BaseQuerySet(models.QuerySet):
|
|||
class RawQuerySet(models.query.RawQuerySet):
|
||||
"""A RawQuerySet with __len__."""
|
||||
|
||||
def __init__(self, *args, **kw):
|
||||
super().__init__(*args, **kw)
|
||||
def __init__(self, raw_query, *args, **kw):
|
||||
super().__init__(raw_query, *args, **kw)
|
||||
self._result_cache = None
|
||||
|
||||
def __iter__(self):
|
||||
|
@ -145,9 +145,13 @@ class ManagerBase(models.Manager):
|
|||
def transform(self, fn):
|
||||
return self.all().transform(fn)
|
||||
|
||||
def raw(self, raw_query, params=None, *args, **kwargs):
|
||||
def raw(self, raw_query, params=(), translations=None, using=None):
|
||||
return RawQuerySet(
|
||||
raw_query, self.model, params=params, using=self._db, *args, **kwargs
|
||||
raw_query,
|
||||
model=self.model,
|
||||
params=params,
|
||||
translations=translations,
|
||||
using=using or self._db,
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -557,7 +557,7 @@ class TestCase(PatchMixin, InitializeSessionMixin, test.TestCase):
|
|||
middleware = SessionMiddleware(request)
|
||||
middleware.process_request(request)
|
||||
messages = FallbackStorage(request)
|
||||
setattr(request, '_messages', messages)
|
||||
request._messages = messages
|
||||
request.session.save()
|
||||
return request
|
||||
|
||||
|
|
|
@ -249,7 +249,7 @@ class TestWriteSitemaps(TestCase):
|
|||
entry = (
|
||||
'<sitemap><loc>http://testserver/sitemap.xml?{params}</loc></sitemap>'
|
||||
)
|
||||
for (section, app), sitemap in sitemaps.items():
|
||||
for section, app in sitemaps:
|
||||
if not app:
|
||||
assert entry.format(params=f'section={section}') in contents
|
||||
else:
|
||||
|
|
|
@ -36,7 +36,7 @@ def test_xss():
|
|||
def test_no_dupes():
|
||||
"""Test that duplicate messages aren't saved."""
|
||||
request = HttpRequest()
|
||||
setattr(request, '_messages', default_storage(request))
|
||||
request._messages = default_storage(request)
|
||||
|
||||
info(request, 'Title', 'Body')
|
||||
info(request, 'Title', 'Body')
|
||||
|
@ -49,7 +49,7 @@ def test_no_dupes():
|
|||
def test_l10n_dups():
|
||||
"""Test that L10n values are preserved."""
|
||||
request = HttpRequest()
|
||||
setattr(request, '_messages', default_storage(request))
|
||||
request._messages = default_storage(request)
|
||||
|
||||
info(request, gettext('Title'), gettext('Body'))
|
||||
info(request, gettext('Title'), gettext('Body'))
|
||||
|
@ -62,7 +62,7 @@ def test_l10n_dups():
|
|||
def test_unicode_dups():
|
||||
"""Test that unicode values are preserved."""
|
||||
request = HttpRequest()
|
||||
setattr(request, '_messages', default_storage(request))
|
||||
request._messages = default_storage(request)
|
||||
|
||||
info(request, 'Titlé', 'Body')
|
||||
info(request, 'Titlé', 'Body')
|
||||
|
@ -75,7 +75,7 @@ def test_unicode_dups():
|
|||
def test_html_rendered_properly():
|
||||
"""Html markup is properly displayed in final template."""
|
||||
request = HttpRequest()
|
||||
setattr(request, '_messages', default_storage(request))
|
||||
request._messages = default_storage(request)
|
||||
|
||||
# This will call _file_message, which in turn calls _make_message, which in
|
||||
# turn renders the message_content.html template, which adds html markup.
|
||||
|
|
|
@ -253,8 +253,7 @@ class TestCacheControlMiddleware(TestCase):
|
|||
request.is_api = True
|
||||
for method in ('POST', 'DELETE', 'PUT', 'PATCH'):
|
||||
request.method = method
|
||||
response = HttpResponse()
|
||||
response = CacheControlMiddleware(lambda x: response)(request)
|
||||
response = CacheControlMiddleware(lambda x: HttpResponse())(request)
|
||||
assert response['Cache-Control'] == 's-maxage=0'
|
||||
|
||||
def test_disable_caching_arg_should_not_cache(self):
|
||||
|
@ -292,19 +291,19 @@ class TestCacheControlMiddleware(TestCase):
|
|||
def test_non_success_status_code_should_not_cache(self):
|
||||
request = self.request_factory.get('/api/v5/foo')
|
||||
request.is_api = True
|
||||
response = HttpResponse()
|
||||
for status_code in (400, 401, 403, 404, 429, 500, 502, 503, 504):
|
||||
response.status_code = status_code
|
||||
response = CacheControlMiddleware(lambda x: response)(request)
|
||||
response = CacheControlMiddleware(
|
||||
lambda x, status=status_code: HttpResponse(status=status)
|
||||
)(request)
|
||||
assert response['Cache-Control'] == 's-maxage=0'
|
||||
|
||||
def test_everything_ok_should_cache_for_3_minutes(self):
|
||||
request = self.request_factory.get('/api/v5/foo')
|
||||
request.is_api = True
|
||||
response = HttpResponse()
|
||||
for status_code in (200, 201, 202, 204, 301, 302, 303, 304):
|
||||
response.status_code = status_code
|
||||
response = CacheControlMiddleware(lambda x: response)(request)
|
||||
response = CacheControlMiddleware(
|
||||
lambda x, status=status_code: HttpResponse(status=status)
|
||||
)(request)
|
||||
assert response['Cache-Control'] == 'max-age=180'
|
||||
|
||||
def test_functional_should_cache(self):
|
||||
|
|
|
@ -333,7 +333,7 @@ def test_images_are_small():
|
|||
"""A test that will fail if we accidentally include a large image."""
|
||||
large_images = []
|
||||
img_path = os.path.join(settings.ROOT, 'static', 'img')
|
||||
for root, dirs, files in os.walk(img_path):
|
||||
for root, _dirs, files in os.walk(img_path):
|
||||
large_images += [
|
||||
os.path.join(root, name)
|
||||
for name in files
|
||||
|
|
|
@ -123,7 +123,7 @@ def sorted_groupby(seq, key, *, reverse=False):
|
|||
|
||||
key should be a string (used with attrgetter) or a function.
|
||||
"""
|
||||
if not hasattr(key, '__call__'):
|
||||
if not callable(key):
|
||||
key = operator.attrgetter(key)
|
||||
return itertools.groupby(sorted(seq, key=key, reverse=reverse), key=key)
|
||||
|
||||
|
@ -342,9 +342,8 @@ def send_html_mail_jinja(
|
|||
msg = send_mail(
|
||||
subject,
|
||||
text_template.render(context),
|
||||
html_message=html_template.render(context),
|
||||
*args,
|
||||
**kwargs,
|
||||
**{'html_message': html_template.render(context), **kwargs},
|
||||
)
|
||||
return msg
|
||||
|
||||
|
@ -941,7 +940,7 @@ class SafeStorage(FileSystemStorage):
|
|||
"""
|
||||
empty_dirs = []
|
||||
# Delete all files first then all empty directories.
|
||||
for root, dirs, files in self.walk(dir_path):
|
||||
for root, _dirs, files in self.walk(dir_path):
|
||||
for fn in files:
|
||||
self.delete(f'{root}/{fn}')
|
||||
empty_dirs.insert(0, root)
|
||||
|
|
|
@ -15,7 +15,9 @@ class Command(BaseCommand):
|
|||
def handle(self, *args, **options):
|
||||
revoked_count = 0
|
||||
with open(options['csv_file']) as csvfile:
|
||||
for idx, (key, secret) in enumerate(csv.reader(csvfile), start=1):
|
||||
for idx, (key, secret) in enumerate( # noqa: B007
|
||||
csv.reader(csvfile), start=1
|
||||
):
|
||||
try:
|
||||
apikey = APIKey.objects.get(key=key, is_active=True)
|
||||
except APIKey.DoesNotExist:
|
||||
|
|
|
@ -196,7 +196,9 @@ class TestJWTKeyAuthProtectedView(WithDynamicEndpoints, JWTAuthKeyTester, TestCa
|
|||
return handler(reverse('test-dynamic-endpoint'), *args, **kw)
|
||||
|
||||
def jwt_request(self, token, method, *args, **kw):
|
||||
return self.request(method, HTTP_AUTHORIZATION=f'JWT {token}', *args, **kw)
|
||||
return self.request(
|
||||
method, *args, **{'HTTP_AUTHORIZATION': f'JWT {token}', **kw}
|
||||
)
|
||||
|
||||
def test_get_requires_auth(self):
|
||||
res = self.request('get')
|
||||
|
|
|
@ -38,7 +38,7 @@ class TestAPIKey(TestCase):
|
|||
|
||||
def test_generate_new_unique_keys(self):
|
||||
last_key = None
|
||||
for counter in range(3):
|
||||
for _counter in range(3):
|
||||
credentials = APIKey.new_jwt_credentials(self.user)
|
||||
assert credentials.key != last_key
|
||||
last_key = credentials.key
|
||||
|
@ -56,7 +56,7 @@ class TestAPIKey(TestCase):
|
|||
mock_filter.return_value.exists.return_value = True
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
for counter in range(max + 1):
|
||||
for _counter in range(max + 1):
|
||||
APIKey.get_unique_key('key-prefix-', max_tries=max)
|
||||
|
||||
def test_generate_secret(self):
|
||||
|
|
|
@ -179,7 +179,7 @@ def save_versions_to_blocks(guids, submission):
|
|||
for block in blocks:
|
||||
change = bool(block.id)
|
||||
if change:
|
||||
setattr(block, 'modified', modified_datetime)
|
||||
block.modified = modified_datetime
|
||||
block.updated_by = submission.updated_by
|
||||
if submission.reason is not None:
|
||||
block.reason = submission.reason
|
||||
|
@ -232,7 +232,7 @@ def delete_versions_from_blocks(guids, submission):
|
|||
for block in blocks:
|
||||
if not block.id:
|
||||
continue
|
||||
setattr(block, 'modified', modified_datetime)
|
||||
block.modified = modified_datetime
|
||||
|
||||
BlockVersion.objects.filter(
|
||||
block=block, version_id__in=submission.changed_version_ids
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
# ruff: noqa: F405
|
||||
from olympia.lib.settings_base import * # noqa
|
||||
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
# ruff: noqa: F405
|
||||
from olympia.lib.settings_base import * # noqa
|
||||
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
# ruff: noqa: F405
|
||||
from olympia.lib.settings_base import * # noqa
|
||||
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@ def dev_required(
|
|||
@functools.wraps(f)
|
||||
def wrapper(request, addon, *args, **kw):
|
||||
def fun():
|
||||
return f(request, addon_id=addon.id, addon=addon, *args, **kw)
|
||||
return f(request, *args, **{'addon_id': addon.id, 'addon': addon, **kw})
|
||||
|
||||
if request.method in ('HEAD', 'GET'):
|
||||
# Allow reviewers for read operations, if file_id is present
|
||||
|
|
|
@ -31,18 +31,18 @@ class TestActivity(HubTest):
|
|||
def log_creates(self, num, addon=None):
|
||||
if not addon:
|
||||
addon = self.addon
|
||||
for i in range(num):
|
||||
for _i in range(num):
|
||||
ActivityLog.create(amo.LOG.CREATE_ADDON, addon)
|
||||
|
||||
def log_updates(self, num, version_string='1'):
|
||||
version = Version.objects.create(version=version_string, addon=self.addon)
|
||||
|
||||
for i in range(num):
|
||||
for _i in range(num):
|
||||
with core.override_remote_addr('127.0.0.1'):
|
||||
ActivityLog.create(amo.LOG.ADD_VERSION, self.addon, version)
|
||||
|
||||
def log_status(self, num):
|
||||
for i in range(num):
|
||||
for _i in range(num):
|
||||
ActivityLog.create(amo.LOG.USER_DISABLE, self.addon)
|
||||
|
||||
def log_collection(self, num, prefix='foo'):
|
||||
|
@ -57,7 +57,7 @@ class TestActivity(HubTest):
|
|||
|
||||
def log_rating(self, num):
|
||||
rating = Rating(addon=self.addon)
|
||||
for i in range(num):
|
||||
for _i in range(num):
|
||||
ActivityLog.create(amo.LOG.ADD_RATING, self.addon, rating)
|
||||
|
||||
def get_response(self, **kwargs):
|
||||
|
|
|
@ -218,7 +218,7 @@ class TestNewUploadForm(TestCase):
|
|||
request.user = user
|
||||
request.META['REMOTE_ADDR'] = '5.6.7.8'
|
||||
with freeze_time('2019-04-08 15:16:23.42') as frozen_time:
|
||||
for x in range(0, 6):
|
||||
for _x in range(0, 6):
|
||||
self._add_fake_throttling_action(
|
||||
view_class=AddonViewSet,
|
||||
url='/',
|
||||
|
|
|
@ -55,7 +55,7 @@ def test_summarize_validation():
|
|||
|
||||
def test_log_action_class():
|
||||
v = Mock()
|
||||
for k, v in amo.LOG_BY_ID.items():
|
||||
for v in amo.LOG_BY_ID.values():
|
||||
if v.action_class is not None:
|
||||
cls = 'action-' + v.action_class
|
||||
else:
|
||||
|
|
|
@ -139,7 +139,7 @@ class TestDashboard(HubTest):
|
|||
"""Check themes show on dashboard."""
|
||||
# Create 2 themes.
|
||||
staticthemes = []
|
||||
for x in range(2):
|
||||
for _x in range(2):
|
||||
addon = addon_factory(type=amo.ADDON_STATICTHEME, users=[self.user_profile])
|
||||
VersionPreview.objects.create(version=addon.current_version)
|
||||
staticthemes.append(addon)
|
||||
|
|
|
@ -454,7 +454,7 @@ class TestHeroShelvesView(TestCase):
|
|||
|
||||
found_ids = set()
|
||||
# check its not just returning the same add-on each time
|
||||
for count in range(0, 19):
|
||||
for _count in range(0, 19):
|
||||
response = self.client.get(self.url)
|
||||
found_ids.add(response.json()['primary']['addon']['id'])
|
||||
if len(found_ids) == 3:
|
||||
|
|
|
@ -26,7 +26,7 @@ class _BaseAddonGeneratorMixin:
|
|||
assert len(data) == size
|
||||
# Addons are split up equally within each categories.
|
||||
categories = collections.defaultdict(int)
|
||||
for addonname, category in data:
|
||||
for _addonname, category in data:
|
||||
categories[category.slug] += 1
|
||||
length = len(CATEGORIES[self.app.id][self.type])
|
||||
assert set(categories.values()) == {size / length}
|
||||
|
@ -38,7 +38,7 @@ class _BaseAddonGeneratorMixin:
|
|||
data = list(_yield_name_and_cat(size, self.app, self.type))
|
||||
assert len(data) == size
|
||||
categories = collections.defaultdict(int)
|
||||
for addonname, cat in data:
|
||||
for _addonname, cat in data:
|
||||
categories[cat.id] += 1
|
||||
# Addons are spread between categories evenly - the difference
|
||||
# between the largest and smallest category is less than 2.
|
||||
|
|
|
@ -1335,7 +1335,7 @@ class TestRatingViewSetDelete(TestCase):
|
|||
assert Rating.objects.count() == 0
|
||||
|
||||
# Go over 5 requests and you get throttled though.
|
||||
for x in range(0, 3):
|
||||
for _x in range(0, 3):
|
||||
response = self.client.delete(
|
||||
reverse_ns(self.detail_url_name, kwargs={'pk': rating_b.pk})
|
||||
)
|
||||
|
@ -2282,7 +2282,7 @@ class TestRatingViewSetPost(TestCase):
|
|||
assert response.status_code == 201, response.content
|
||||
|
||||
# Add fake actions to get close to the daily limit.
|
||||
for x in range(3, 23):
|
||||
for _x in range(3, 23):
|
||||
self._add_fake_throttling_action(
|
||||
view_class=RatingViewSet,
|
||||
url=self.url,
|
||||
|
@ -2908,7 +2908,7 @@ class TestRatingViewSetFlag(TestCase):
|
|||
# Both should have been flagged.
|
||||
assert RatingFlag.objects.count() == 2
|
||||
|
||||
for x in range(0, 18):
|
||||
for _x in range(0, 18):
|
||||
# You can keep flagging up to 20 a day.
|
||||
response = self.client.post(
|
||||
url_b, data={'flag': 'review_flag_reason_spam'}
|
||||
|
|
|
@ -231,7 +231,7 @@ class FileEntriesDiffMixin(FileEntriesMixin):
|
|||
entries = super()._get_entries()
|
||||
|
||||
# All files have a "unmodified" status by default
|
||||
for path, value in entries.items():
|
||||
for path in entries:
|
||||
entries[path].setdefault('status', '')
|
||||
|
||||
# Now let's overwrite that with data from the actual delta
|
||||
|
|
|
@ -335,7 +335,7 @@ class TestAutoApprovalSummary(TestCase):
|
|||
|
||||
# Should be capped at 100. We're already at 45, adding 4 more should
|
||||
# result in a weight of 100 instead of 105.
|
||||
for i in range(0, 4):
|
||||
for _i in range(0, 4):
|
||||
AbuseReport.objects.create(guid=self.addon.guid)
|
||||
weight_info = summary.calculate_weight()
|
||||
assert summary.weight == 100
|
||||
|
|
|
@ -402,7 +402,7 @@ class TestReviewReports:
|
|||
self.reviewer1 = user_factory(display_name='Volunteer A')
|
||||
|
||||
with freeze_time(self.last_week_begin) as frozen_time:
|
||||
for i in range(3):
|
||||
for _i in range(3):
|
||||
frozen_time.tick()
|
||||
ActivityLog.create(
|
||||
amo.LOG.APPROVE_CONTENT,
|
||||
|
|
|
@ -160,7 +160,7 @@ class TestRatingsModerationLog(ReviewerTest):
|
|||
reviews.
|
||||
"""
|
||||
review = self.make_review()
|
||||
for i in range(2):
|
||||
for _i in range(2):
|
||||
ActivityLog.create(amo.LOG.APPROVE_RATING, review, review.addon)
|
||||
ActivityLog.create(amo.LOG.DELETE_RATING, review.id, review.addon)
|
||||
response = self.client.get(self.url, {'filter': 'deleted'})
|
||||
|
@ -1154,7 +1154,7 @@ class QueueTest(ReviewerTest):
|
|||
expected = []
|
||||
if not len(self.expected_addons):
|
||||
raise AssertionError('self.expected_addons was an empty list')
|
||||
for idx, addon in enumerate(self.expected_addons):
|
||||
for _idx, addon in enumerate(self.expected_addons):
|
||||
if self.channel_name == 'unlisted' or dont_expect_version_number:
|
||||
# In unlisted queue we don't display latest version number.
|
||||
name = str(addon.name)
|
||||
|
|
|
@ -1548,7 +1548,7 @@ def review_version_redirect(request, addon, version):
|
|||
return None
|
||||
|
||||
# Check each channel to calculate which # it would be in a list of versions
|
||||
for channel, channel_text in amo.CHANNEL_CHOICES_API.items():
|
||||
for channel, channel_text in amo.CHANNEL_CHOICES_API.items(): # noqa: B007
|
||||
if (index := index_in_versions_list(channel, version)) is not None:
|
||||
break
|
||||
else:
|
||||
|
|
|
@ -667,7 +667,7 @@ class TestUploadVersion(BaseUploadVersionTestMixin, TestCase):
|
|||
|
||||
def _test_throttling_verb_user_burst(self, verb, url, expected_status=201):
|
||||
with freeze_time('2019-04-08 15:16:23.42') as frozen_time:
|
||||
for x in range(0, 6):
|
||||
for _x in range(0, 6):
|
||||
# Make the IP different every time so that we test the user
|
||||
# throttling.
|
||||
self._add_fake_throttling_action(
|
||||
|
@ -707,7 +707,7 @@ class TestUploadVersion(BaseUploadVersionTestMixin, TestCase):
|
|||
def _test_throttling_verb_user_hourly(self, verb, url, expected_status=201):
|
||||
with freeze_time('2019-04-08 15:16:23.42') as frozen_time:
|
||||
# 21 is above the hourly limit but below the daily one.
|
||||
for x in range(0, 21):
|
||||
for _x in range(0, 21):
|
||||
# Make the IP different every time so that we test the user
|
||||
# throttling.
|
||||
self._add_fake_throttling_action(
|
||||
|
@ -759,7 +759,7 @@ class TestUploadVersion(BaseUploadVersionTestMixin, TestCase):
|
|||
|
||||
def _test_throttling_verb_user_daily(self, verb, url, expected_status=201):
|
||||
with freeze_time('2019-04-08 15:16:23.42') as frozen_time:
|
||||
for x in range(0, 50):
|
||||
for _x in range(0, 50):
|
||||
# Make the IP different every time so that we test the user
|
||||
# throttling.
|
||||
self._add_fake_throttling_action(
|
||||
|
@ -868,7 +868,7 @@ class TestUploadVersion(BaseUploadVersionTestMixin, TestCase):
|
|||
)
|
||||
url = self.url(self.guid, '3.0')
|
||||
with freeze_time('2019-04-08 15:16:23.42'):
|
||||
for x in range(0, 60):
|
||||
for _x in range(0, 60):
|
||||
# With that many actions all throttling classes should prevent
|
||||
# the user from submitting an addon...
|
||||
self._add_fake_throttling_action(
|
||||
|
@ -1199,7 +1199,7 @@ class TestTestUploadVersionWebextensionTransactions(
|
|||
def test_activity_log_saved_on_throttling(self):
|
||||
url = reverse_ns('signing.version', api_version='v4')
|
||||
with freeze_time('2019-04-08 15:16:23.42'):
|
||||
for x in range(0, 3):
|
||||
for _x in range(0, 3):
|
||||
self._add_fake_throttling_action(
|
||||
view_class=self.view_class,
|
||||
url=url,
|
||||
|
@ -1388,7 +1388,7 @@ class TestCheckVersion(BaseUploadVersionTestMixin, TestCase):
|
|||
url = self.url(self.guid, '3.0')
|
||||
|
||||
with freeze_time('2019-04-08 15:16:23.42'):
|
||||
for x in range(0, 60):
|
||||
for _x in range(0, 60):
|
||||
# With that many actions all throttling classes should prevent
|
||||
# the user from submitting an addon...
|
||||
self._add_fake_throttling_action(
|
||||
|
|
|
@ -19,7 +19,7 @@ def addon_view_stats(f):
|
|||
else:
|
||||
qs = Addon.objects.not_disabled_by_mozilla
|
||||
|
||||
return addon_view(f, qs)(request, addon_id=addon_id, *args, **kw)
|
||||
return addon_view(f, qs)(request, *args, **{'addon_id': addon_id, **kw})
|
||||
|
||||
return wrapper
|
||||
|
||||
|
|
|
@ -1043,7 +1043,7 @@ class TestStatsWithBigQuery(TestCase):
|
|||
|
||||
@override_switch('disable-bigquery', active=True)
|
||||
def test_usage_breakdown_series_disabled(self):
|
||||
for url_name, source in [
|
||||
for url_name, _source in [
|
||||
('stats.apps_series', 'apps'),
|
||||
('stats.countries_series', 'countries'),
|
||||
('stats.locales_series', 'locales'),
|
||||
|
@ -1065,7 +1065,7 @@ class TestStatsWithBigQuery(TestCase):
|
|||
def test_usage_breakdown_series_csv_disabled(self):
|
||||
self.series_args[4] = 'csv'
|
||||
|
||||
for url_name, source in [
|
||||
for url_name, _source in [
|
||||
('stats.apps_series', 'apps'),
|
||||
('stats.countries_series', 'countries'),
|
||||
('stats.locales_series', 'locales'),
|
||||
|
|
|
@ -127,7 +127,7 @@ class SQLCompiler(compiler.SQLCompiler):
|
|||
|
||||
# Add our locale-aware joins. We're not respecting the table ordering
|
||||
# Django had in query.alias_map, but that seems to be ok.
|
||||
for field, aliases in self.query.translation_aliases.items():
|
||||
for aliases in self.query.translation_aliases.values():
|
||||
t1, t2 = aliases
|
||||
joins.append(self.join_with_locale(t1, None, old_map, model))
|
||||
joins.append(self.join_with_locale(t2, fallback, old_map, model))
|
||||
|
|
|
@ -227,12 +227,12 @@ def test_get_staggered_review_due_date_generator():
|
|||
assert due == datetime(2023, 5, 19, 14, 0)
|
||||
|
||||
# Skip the week-end (would otherwise land on Saturday 20th).
|
||||
for x in range(0, 4):
|
||||
for _x in range(0, 4):
|
||||
due = next(generator)
|
||||
assert due == datetime(2023, 5, 22, 2, 0)
|
||||
|
||||
# Should have 7 more on that Monday.
|
||||
for x in range(0, 7):
|
||||
for _x in range(0, 7):
|
||||
due = next(generator)
|
||||
assert due == datetime(2023, 5, 22, 23, 0)
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче