What sourcery is this?
This commit is contained in:
Родитель
196fe9ecc6
Коммит
c0e9fe753d
|
@ -0,0 +1,67 @@
|
|||
# 🪄 This is your project's Sourcery configuration file.
|
||||
|
||||
# You can use it to get Sourcery working in the way you want, such as
|
||||
# ignoring specific refactorings, skipping directories in your project,
|
||||
# or writing custom rules.
|
||||
|
||||
# 📚 For a complete reference to this file, see the documentation at
|
||||
# https://docs.sourcery.ai/Configuration/Project-Settings/
|
||||
|
||||
# This file was auto-generated by Sourcery on 2023-01-18 at 16:36.
|
||||
|
||||
version: '1' # The schema version of this config file
|
||||
|
||||
ignore: # A list of paths or files which Sourcery will ignore.
|
||||
- .git
|
||||
- venv
|
||||
- .venv
|
||||
- env
|
||||
- .env
|
||||
- .tox
|
||||
|
||||
rule_settings:
|
||||
enable:
|
||||
- default
|
||||
disable: [] # A list of rule IDs Sourcery will never suggest.
|
||||
rule_types:
|
||||
- refactoring
|
||||
- suggestion
|
||||
- comment
|
||||
python_version: '3.9' # A string specifying the lowest Python version your project supports. Sourcery will not suggest refactorings requiring a higher Python version.
|
||||
|
||||
# rules: # A list of custom rules Sourcery will include in its analysis.
|
||||
# - id: no-print-statements
|
||||
# description: Do not use print statements in the test directory.
|
||||
# pattern: print(...)
|
||||
# replacement:
|
||||
# condition:
|
||||
# explanation:
|
||||
# paths:
|
||||
# include:
|
||||
# - test
|
||||
# exclude:
|
||||
# - conftest.py
|
||||
# tests: []
|
||||
# tags: []
|
||||
|
||||
# rule_tags: {} # Additional rule tags.
|
||||
|
||||
# metrics:
|
||||
# quality_threshold: 25.0
|
||||
|
||||
# github:
|
||||
# labels: []
|
||||
# ignore_labels:
|
||||
# - sourcery-ignore
|
||||
# request_review: author
|
||||
# sourcery_branch: sourcery/{base_branch}
|
||||
|
||||
# clone_detection:
|
||||
# min_lines: 3
|
||||
# min_duplicates: 2
|
||||
# identical_clones_only: false
|
||||
|
||||
# proxy:
|
||||
# url:
|
||||
# ssl_certs_file:
|
||||
# no_ssl_verify: false
|
|
@ -17,8 +17,8 @@ class Command(BaseCommand):
|
|||
for status, _ in Experiment.STATUS_CHOICES:
|
||||
random_type = random.choice(Experiment.TYPE_CHOICES)[0]
|
||||
experiment = ExperimentFactory.create_with_status(status, type=random_type)
|
||||
logger.info("Created {}: {}".format(experiment, status))
|
||||
logger.info(f"Created {experiment}: {status}")
|
||||
|
||||
for lifecycle in NimbusExperimentFactory.LocalLifecycles:
|
||||
experiment = NimbusExperimentFactory.create_with_lifecycle(lifecycle)
|
||||
logger.info("Created {}: {}".format(experiment, lifecycle))
|
||||
logger.info(f"Created {experiment}: {lifecycle}")
|
||||
|
|
|
@ -8,9 +8,7 @@ class SiteFlagNameChoices(models.TextChoices):
|
|||
class SiteFlagManager(models.Manager):
|
||||
def value(self, choice, defval=False):
|
||||
qs = self.get_queryset().filter(name=choice.name)
|
||||
if qs.exists():
|
||||
return qs.get().value
|
||||
return defval
|
||||
return qs.get().value if qs.exists() else defval
|
||||
|
||||
|
||||
class SiteFlag(models.Model):
|
||||
|
|
|
@ -72,12 +72,11 @@ class NimbusExperimentResource(resources.ModelResource):
|
|||
|
||||
def get_diff_headers(self):
|
||||
skip_list = ["reference_branch_slug"]
|
||||
headers = []
|
||||
for field in self.get_export_fields():
|
||||
if force_text(field.column_name) in skip_list:
|
||||
continue
|
||||
headers.append(force_text(field.column_name))
|
||||
return headers
|
||||
return [
|
||||
force_text(field.column_name)
|
||||
for field in self.get_export_fields()
|
||||
if force_text(field.column_name) not in skip_list
|
||||
]
|
||||
|
||||
class Meta:
|
||||
model = NimbusExperiment
|
||||
|
|
|
@ -23,14 +23,14 @@ class Query(graphene.ObjectType):
|
|||
description="Nimbus Configuration Data for front-end usage.",
|
||||
)
|
||||
|
||||
def resolve_experiments(root, info):
|
||||
def resolve_experiments(self, info):
|
||||
return NimbusExperiment.objects.with_owner_features()
|
||||
|
||||
def resolve_experiment_by_slug(root, info, slug):
|
||||
def resolve_experiment_by_slug(self, info, slug):
|
||||
try:
|
||||
return NimbusExperiment.objects.get(slug=slug)
|
||||
except NimbusExperiment.DoesNotExist:
|
||||
return None
|
||||
|
||||
def resolve_nimbus_config(root, info):
|
||||
def resolve_nimbus_config(self, info):
|
||||
return NimbusConfigurationType()
|
||||
|
|
|
@ -440,12 +440,12 @@ class NimbusBranchSerializer(serializers.ModelSerializer):
|
|||
return data
|
||||
|
||||
def validate_name(self, value):
|
||||
slug_name = slugify(value)
|
||||
if not slug_name:
|
||||
if slug_name := slugify(value):
|
||||
return value
|
||||
else:
|
||||
raise serializers.ValidationError(
|
||||
"Name needs to contain alphanumeric characters."
|
||||
)
|
||||
return value
|
||||
|
||||
def validate(self, data):
|
||||
data = super().validate(data)
|
||||
|
@ -453,7 +453,7 @@ class NimbusBranchSerializer(serializers.ModelSerializer):
|
|||
feature_values = data.get("feature_values")
|
||||
|
||||
if feature_values is not None:
|
||||
unique_features = set(fv["feature_config"] for fv in feature_values)
|
||||
unique_features = {fv["feature_config"] for fv in feature_values}
|
||||
if None not in unique_features and len(feature_values) != len(
|
||||
unique_features
|
||||
):
|
||||
|
@ -465,7 +465,7 @@ class NimbusBranchSerializer(serializers.ModelSerializer):
|
|||
NimbusConstants.ERROR_DUPLICATE_BRANCH_FEATURE_VALUE
|
||||
)
|
||||
}
|
||||
for fv in feature_values
|
||||
for _ in feature_values
|
||||
]
|
||||
}
|
||||
)
|
||||
|
@ -500,30 +500,29 @@ class NimbusBranchSerializer(serializers.ModelSerializer):
|
|||
)
|
||||
|
||||
def _save_screenshots(self, screenshots, branch):
|
||||
if screenshots is not None:
|
||||
updated_screenshots = dict(
|
||||
(x["id"], x) for x in screenshots if x.get("id", None)
|
||||
)
|
||||
for screenshot in branch.screenshots.all():
|
||||
screenshot_id = screenshot.id
|
||||
if screenshot_id not in updated_screenshots:
|
||||
screenshot.delete()
|
||||
else:
|
||||
serializer = NimbusBranchScreenshotSerializer(
|
||||
screenshot,
|
||||
data=updated_screenshots[screenshot_id],
|
||||
partial=True,
|
||||
)
|
||||
if serializer.is_valid(raise_exception=True):
|
||||
serializer.save()
|
||||
|
||||
new_screenshots = (x for x in screenshots if not x.get("id", None))
|
||||
for screenshot_data in new_screenshots:
|
||||
if screenshots is None:
|
||||
return
|
||||
updated_screenshots = {x["id"]: x for x in screenshots if x.get("id", None)}
|
||||
for screenshot in branch.screenshots.all():
|
||||
screenshot_id = screenshot.id
|
||||
if screenshot_id not in updated_screenshots:
|
||||
screenshot.delete()
|
||||
else:
|
||||
serializer = NimbusBranchScreenshotSerializer(
|
||||
data=screenshot_data, partial=True
|
||||
screenshot,
|
||||
data=updated_screenshots[screenshot_id],
|
||||
partial=True,
|
||||
)
|
||||
if serializer.is_valid(raise_exception=True):
|
||||
serializer.save(branch=branch)
|
||||
serializer.save()
|
||||
|
||||
new_screenshots = (x for x in screenshots if not x.get("id", None))
|
||||
for screenshot_data in new_screenshots:
|
||||
serializer = NimbusBranchScreenshotSerializer(
|
||||
data=screenshot_data, partial=True
|
||||
)
|
||||
if serializer.is_valid(raise_exception=True):
|
||||
serializer.save(branch=branch)
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
feature_enabled = self.validated_data.pop("feature_enabled", False)
|
||||
|
@ -566,7 +565,7 @@ class NimbusExperimentBranchMixin:
|
|||
},
|
||||
"treatment_branches": [
|
||||
{"name": NimbusConstants.ERROR_DUPLICATE_BRANCH_NAME}
|
||||
for i in data["treatment_branches"]
|
||||
for _ in data["treatment_branches"]
|
||||
],
|
||||
}
|
||||
)
|
||||
|
@ -591,13 +590,13 @@ class NimbusExperimentBranchMixin:
|
|||
with transaction.atomic():
|
||||
experiment = super().update(experiment, data)
|
||||
|
||||
if set(["reference_branch", "treatment_branches"]).intersection(
|
||||
if {"reference_branch", "treatment_branches"}.intersection(
|
||||
set(self.initial_data.keys())
|
||||
):
|
||||
saved_branch_ids = set(
|
||||
experiment.branches.all().values_list("id", flat=True)
|
||||
)
|
||||
updated_branch_ids = set([b["id"] for b in branches_data if b.get("id")])
|
||||
updated_branch_ids = {b["id"] for b in branches_data if b.get("id")}
|
||||
deleted_branch_ids = saved_branch_ids - updated_branch_ids
|
||||
for deleted_branch_id in deleted_branch_ids:
|
||||
NimbusBranch.objects.get(id=deleted_branch_id).delete()
|
||||
|
@ -656,19 +655,19 @@ class NimbusStatusValidationMixin:
|
|||
def validate(self, data):
|
||||
data = super().validate(data)
|
||||
|
||||
restrictions = {
|
||||
"status": TransitionConstants.STATUS_ALLOWS_UPDATE,
|
||||
"publish_status": TransitionConstants.PUBLISH_STATUS_ALLOWS_UPDATE,
|
||||
}
|
||||
update_exempt_fields = TransitionConstants.STATUS_UPDATE_EXEMPT_FIELDS
|
||||
fields = ["all"]
|
||||
if self.instance:
|
||||
restrictive_statuses = set()
|
||||
exempt_fields = set()
|
||||
fields = ["all"]
|
||||
fields.append("rollouts") if self.instance.is_rollout else fields.append(
|
||||
"experiments"
|
||||
)
|
||||
|
||||
restrictions = {
|
||||
"status": TransitionConstants.STATUS_ALLOWS_UPDATE,
|
||||
"publish_status": TransitionConstants.PUBLISH_STATUS_ALLOWS_UPDATE,
|
||||
}
|
||||
for f in fields:
|
||||
if update_exempt_fields[f] != []:
|
||||
exempt_fields = exempt_fields.union(update_exempt_fields[f])
|
||||
|
@ -949,9 +948,9 @@ class NimbusExperimentSerializer(
|
|||
f"{NimbusExperiment.MAX_PRIMARY_OUTCOMES}."
|
||||
)
|
||||
|
||||
valid_outcomes = set(
|
||||
[o.slug for o in Outcomes.by_application(self.instance.application)]
|
||||
)
|
||||
valid_outcomes = {
|
||||
o.slug for o in Outcomes.by_application(self.instance.application)
|
||||
}
|
||||
|
||||
if valid_outcomes.intersection(value_set) != value_set:
|
||||
invalid_outcomes = value_set - valid_outcomes
|
||||
|
@ -963,9 +962,9 @@ class NimbusExperimentSerializer(
|
|||
|
||||
def validate_secondary_outcomes(self, value):
|
||||
value_set = set(value)
|
||||
valid_outcomes = set(
|
||||
[o.slug for o in Outcomes.by_application(self.instance.application)]
|
||||
)
|
||||
valid_outcomes = {
|
||||
o.slug for o in Outcomes.by_application(self.instance.application)
|
||||
}
|
||||
|
||||
if valid_outcomes.intersection(value_set) != value_set:
|
||||
invalid_outcomes = value_set - valid_outcomes
|
||||
|
@ -1146,10 +1145,7 @@ class NimbusExperimentCsvSerializer(serializers.ModelSerializer):
|
|||
return ",".join([feature.name for feature in sorted_features])
|
||||
|
||||
def get_results_url(self, obj):
|
||||
if obj.results_ready:
|
||||
return obj.experiment_url + "results"
|
||||
else:
|
||||
return ""
|
||||
return f"{obj.experiment_url}results" if obj.results_ready else ""
|
||||
|
||||
|
||||
class NimbusBranchScreenshotReviewSerializer(NimbusBranchScreenshotSerializer):
|
||||
|
@ -1176,7 +1172,7 @@ class NimbusBranchFeatureValueReviewSerializer(NimbusBranchFeatureValueSerialize
|
|||
try:
|
||||
json.loads(value)
|
||||
except Exception as e:
|
||||
raise serializers.ValidationError(f"Invalid JSON: {e.msg}")
|
||||
raise serializers.ValidationError(f"Invalid JSON: {e.msg}") from e
|
||||
return value
|
||||
|
||||
def validate(self, data):
|
||||
|
@ -1190,7 +1186,7 @@ class NimbusBranchFeatureValueReviewSerializer(NimbusBranchFeatureValueSerialize
|
|||
# This check can be removed with #6744
|
||||
branch_feature_value_id = data.get("id")
|
||||
branch_feature_value = self.Meta.model.objects.get(id=branch_feature_value_id)
|
||||
if not branch_feature_value.branch.experiment.feature_configs.count() > 1:
|
||||
if branch_feature_value.branch.experiment.feature_configs.count() <= 1:
|
||||
return data
|
||||
|
||||
if all([branch, feature_config, value]) and feature_config.schema:
|
||||
|
@ -1199,7 +1195,7 @@ class NimbusBranchFeatureValueReviewSerializer(NimbusBranchFeatureValueSerialize
|
|||
jsonschema.validate(json_value, json.loads(feature_config.schema))
|
||||
except jsonschema.ValidationError as e:
|
||||
if not branch.experiment.warn_feature_schema:
|
||||
raise serializers.ValidationError({"value": e.message})
|
||||
raise serializers.ValidationError({"value": e.message}) from e
|
||||
|
||||
return data
|
||||
|
||||
|
@ -1213,7 +1209,7 @@ class NimbusBranchReviewSerializer(NimbusBranchSerializer):
|
|||
try:
|
||||
json.loads(value)
|
||||
except Exception as e:
|
||||
raise serializers.ValidationError(f"Invalid JSON: {e.msg}")
|
||||
raise serializers.ValidationError(f"Invalid JSON: {e.msg}") from e
|
||||
return value
|
||||
|
||||
def validate(self, data):
|
||||
|
@ -1325,7 +1321,7 @@ class NimbusReviewSerializer(serializers.ModelSerializer):
|
|||
error["description"] = [NimbusConstants.ERROR_REQUIRED_FIELD]
|
||||
errors.append(error)
|
||||
|
||||
if any(x for x in errors):
|
||||
if any(errors):
|
||||
raise serializers.ValidationError(errors)
|
||||
return value
|
||||
|
||||
|
@ -1379,10 +1375,9 @@ class NimbusReviewSerializer(serializers.ModelSerializer):
|
|||
error_result = {}
|
||||
if data["reference_branch"].get("feature_enabled"):
|
||||
|
||||
errors = self._validate_feature_value_against_schema(
|
||||
if errors := self._validate_feature_value_against_schema(
|
||||
schema, data["reference_branch"]["feature_value"]
|
||||
)
|
||||
if errors:
|
||||
):
|
||||
if warn_feature_schema:
|
||||
self.warnings["reference_branch"] = {"feature_value": errors}
|
||||
else:
|
||||
|
@ -1395,10 +1390,9 @@ class NimbusReviewSerializer(serializers.ModelSerializer):
|
|||
branch_warning = None
|
||||
|
||||
if branch_data.get("feature_enabled", False):
|
||||
errors = self._validate_feature_value_against_schema(
|
||||
if errors := self._validate_feature_value_against_schema(
|
||||
schema, branch_data["feature_value"]
|
||||
)
|
||||
if errors:
|
||||
):
|
||||
if warn_feature_schema:
|
||||
branch_warning = {"feature_value": errors}
|
||||
else:
|
||||
|
@ -1441,10 +1435,7 @@ class NimbusReviewSerializer(serializers.ModelSerializer):
|
|||
application = data.get("application")
|
||||
min_version = data.get("firefox_min_version", "")
|
||||
|
||||
languages = data.get("languages", [])
|
||||
|
||||
if languages:
|
||||
|
||||
if languages := data.get("languages", []):
|
||||
min_supported_version = (
|
||||
NimbusConstants.LANGUAGES_APPLICATION_SUPPORTED_VERSION[application]
|
||||
)
|
||||
|
@ -1465,10 +1456,7 @@ class NimbusReviewSerializer(serializers.ModelSerializer):
|
|||
application = data.get("application")
|
||||
min_version = data.get("firefox_min_version", "")
|
||||
|
||||
countries = data.get("countries", [])
|
||||
|
||||
if countries:
|
||||
|
||||
if countries := data.get("countries", []):
|
||||
min_supported_version = (
|
||||
NimbusConstants.COUNTRIES_APPLICATION_SUPPORTED_VERSION[application]
|
||||
)
|
||||
|
@ -1509,14 +1497,11 @@ class NimbusReviewSerializer(serializers.ModelSerializer):
|
|||
|
||||
def _validate_feature_enabled_for_treatment_branches(self, data):
|
||||
if self._validate_feature_enabled_version(data) and "treatment_branches" in data:
|
||||
treatment_branches_error = []
|
||||
for branch in data["treatment_branches"]:
|
||||
if not branch["feature_enabled"]:
|
||||
treatment_branches_error.append(
|
||||
{"feature_enabled": NimbusConstants.ERROR_FEATURE_ENABLED}
|
||||
)
|
||||
|
||||
if treatment_branches_error:
|
||||
if treatment_branches_error := [
|
||||
{"feature_enabled": NimbusConstants.ERROR_FEATURE_ENABLED}
|
||||
for branch in data["treatment_branches"]
|
||||
if not branch["feature_enabled"]
|
||||
]:
|
||||
raise serializers.ValidationError(
|
||||
{"treatment_branches": treatment_branches_error}
|
||||
)
|
||||
|
@ -1601,8 +1586,7 @@ class NimbusExperimentCloneSerializer(
|
|||
|
||||
def validate(self, data):
|
||||
data = super().validate(data)
|
||||
rollout_branch_slug = data.get("rollout_branch_slug", None)
|
||||
if rollout_branch_slug:
|
||||
if rollout_branch_slug := data.get("rollout_branch_slug", None):
|
||||
parent = data.get("parent_slug")
|
||||
if not parent.branches.filter(slug=rollout_branch_slug).exists():
|
||||
raise serializers.ValidationError(
|
||||
|
|
|
@ -143,8 +143,8 @@ class NimbusFeatureConfigType(DjangoObjectType):
|
|||
class Meta:
|
||||
model = NimbusFeatureConfig
|
||||
|
||||
def resolve_sets_prefs(root, info):
|
||||
return bool(root.sets_prefs)
|
||||
def resolve_sets_prefs(self, info):
|
||||
return bool(self.sets_prefs)
|
||||
|
||||
|
||||
class NimbusBranchScreenshotType(DjangoObjectType):
|
||||
|
@ -155,9 +155,9 @@ class NimbusBranchScreenshotType(DjangoObjectType):
|
|||
class Meta:
|
||||
model = NimbusBranchScreenshot
|
||||
|
||||
def resolve_image(root, info):
|
||||
if root.image and root.image.name:
|
||||
return root.image.url
|
||||
def resolve_image(self, info):
|
||||
if self.image and self.image.name:
|
||||
return self.image.url
|
||||
|
||||
|
||||
class NimbusBranchFeatureValueType(DjangoObjectType):
|
||||
|
@ -181,20 +181,21 @@ class NimbusBranchType(DjangoObjectType):
|
|||
model = NimbusBranch
|
||||
exclude = ("experiment", "nimbusexperiment")
|
||||
|
||||
def resolve_feature_values(root, info):
|
||||
return root.feature_values.all()
|
||||
def resolve_feature_values(self, info):
|
||||
return self.feature_values.all()
|
||||
|
||||
def resolve_feature_enabled(root, info):
|
||||
def resolve_feature_enabled(self, info):
|
||||
return (
|
||||
root.feature_values.exists()
|
||||
and root.feature_values.all().order_by("feature_config__slug").first().enabled
|
||||
self.feature_values.exists()
|
||||
and self.feature_values.all().order_by("feature_config__slug").first().enabled
|
||||
)
|
||||
|
||||
def resolve_feature_value(root, info):
|
||||
def resolve_feature_value(self, info):
|
||||
return (
|
||||
root.feature_values.exists()
|
||||
and root.feature_values.all().order_by("feature_config__slug").first().value
|
||||
) or ""
|
||||
self.feature_values.exists()
|
||||
and self.feature_values.all().order_by("feature_config__slug").first().value
|
||||
or ""
|
||||
)
|
||||
|
||||
|
||||
class NimbusDocumentationLinkType(DjangoObjectType):
|
||||
|
@ -274,7 +275,7 @@ class NimbusConfigurationType(graphene.ObjectType):
|
|||
conclusion_recommendations = graphene.List(NimbusLabelValueType)
|
||||
types = graphene.List(NimbusLabelValueType)
|
||||
|
||||
def _text_choices_to_label_value_list(root, text_choices):
|
||||
def _text_choices_to_label_value_list(self, text_choices):
|
||||
return [
|
||||
NimbusLabelValueType(
|
||||
label=text_choices[name].label,
|
||||
|
@ -283,13 +284,13 @@ class NimbusConfigurationType(graphene.ObjectType):
|
|||
for name in text_choices.names
|
||||
]
|
||||
|
||||
def resolve_applications(root, info):
|
||||
return root._text_choices_to_label_value_list(NimbusExperiment.Application)
|
||||
def resolve_applications(self, info):
|
||||
return self._text_choices_to_label_value_list(NimbusExperiment.Application)
|
||||
|
||||
def resolve_channels(root, info):
|
||||
return root._text_choices_to_label_value_list(NimbusExperiment.Channel)
|
||||
def resolve_channels(self, info):
|
||||
return self._text_choices_to_label_value_list(NimbusExperiment.Channel)
|
||||
|
||||
def resolve_application_configs(root, info):
|
||||
def resolve_application_configs(self, info):
|
||||
configs = []
|
||||
for application in NimbusExperiment.Application:
|
||||
application_config = NimbusExperiment.APPLICATION_CONFIGS[application]
|
||||
|
@ -305,14 +306,14 @@ class NimbusConfigurationType(graphene.ObjectType):
|
|||
)
|
||||
return configs
|
||||
|
||||
def resolve_all_feature_configs(root, info):
|
||||
def resolve_all_feature_configs(self, info):
|
||||
return NimbusFeatureConfig.objects.all().order_by("name")
|
||||
|
||||
def resolve_firefox_versions(root, info):
|
||||
def resolve_firefox_versions(self, info):
|
||||
return NimbusConfigurationType.sort_version_choices(NimbusExperiment.Version)
|
||||
|
||||
def resolve_conclusion_recommendations(root, info):
|
||||
return root._text_choices_to_label_value_list(
|
||||
def resolve_conclusion_recommendations(self, info):
|
||||
return self._text_choices_to_label_value_list(
|
||||
NimbusExperiment.ConclusionRecommendation
|
||||
)
|
||||
|
||||
|
@ -331,10 +332,10 @@ class NimbusConfigurationType(graphene.ObjectType):
|
|||
|
||||
return sorted_versions
|
||||
|
||||
def resolve_outcomes(root, info):
|
||||
def resolve_outcomes(self, info):
|
||||
return Outcomes.all()
|
||||
|
||||
def resolve_owners(root, info):
|
||||
def resolve_owners(self, info):
|
||||
return (
|
||||
get_user_model()
|
||||
.objects.filter(owned_nimbusexperiments__isnull=False)
|
||||
|
@ -342,7 +343,7 @@ class NimbusConfigurationType(graphene.ObjectType):
|
|||
.order_by("email")
|
||||
)
|
||||
|
||||
def resolve_targeting_configs(root, info):
|
||||
def resolve_targeting_configs(self, info):
|
||||
return [
|
||||
NimbusExperimentTargetingConfigType(
|
||||
label=choice.label,
|
||||
|
@ -361,29 +362,29 @@ class NimbusConfigurationType(graphene.ObjectType):
|
|||
for choice in NimbusExperiment.TargetingConfig
|
||||
]
|
||||
|
||||
def resolve_hypothesis_default(root, info):
|
||||
def resolve_hypothesis_default(self, info):
|
||||
return NimbusExperiment.HYPOTHESIS_DEFAULT
|
||||
|
||||
def resolve_max_primary_outcomes(root, info):
|
||||
def resolve_max_primary_outcomes(self, info):
|
||||
return NimbusExperiment.MAX_PRIMARY_OUTCOMES
|
||||
|
||||
def resolve_documentation_link(root, info):
|
||||
return root._text_choices_to_label_value_list(NimbusExperiment.DocumentationLink)
|
||||
def resolve_documentation_link(self, info):
|
||||
return self._text_choices_to_label_value_list(NimbusExperiment.DocumentationLink)
|
||||
|
||||
def resolve_locales(root, info):
|
||||
def resolve_locales(self, info):
|
||||
return Locale.objects.all().order_by("name")
|
||||
|
||||
def resolve_countries(root, info):
|
||||
def resolve_countries(self, info):
|
||||
return Country.objects.all().order_by("name")
|
||||
|
||||
def resolve_languages(root, info):
|
||||
def resolve_languages(self, info):
|
||||
return Language.objects.all().order_by("name")
|
||||
|
||||
def resolve_projects(root, info):
|
||||
def resolve_projects(self, info):
|
||||
return Project.objects.all().order_by("name")
|
||||
|
||||
def resolve_types(root, info):
|
||||
return root._text_choices_to_label_value_list(NimbusExperiment.Type)
|
||||
def resolve_types(self, info):
|
||||
return self._text_choices_to_label_value_list(NimbusExperiment.Type)
|
||||
|
||||
|
||||
class NimbusExperimentType(DjangoObjectType):
|
||||
|
@ -454,9 +455,9 @@ class NimbusExperimentType(DjangoObjectType):
|
|||
return self.projects.all()
|
||||
|
||||
def resolve_reference_branch(self, info):
|
||||
if self.reference_branch:
|
||||
return self.reference_branch
|
||||
return NimbusBranch(name=NimbusConstants.DEFAULT_REFERENCE_BRANCH_NAME)
|
||||
return self.reference_branch or NimbusBranch(
|
||||
name=NimbusConstants.DEFAULT_REFERENCE_BRANCH_NAME
|
||||
)
|
||||
|
||||
def resolve_treatment_branches(self, info):
|
||||
if self.branches.exists():
|
||||
|
|
|
@ -11,7 +11,7 @@ from experimenter.experiments.models import NimbusExperiment
|
|||
|
||||
class NimbusExperimentCsvRenderer(CSVRenderer):
|
||||
header = NimbusExperimentCsvSerializer.Meta.fields
|
||||
labels = dict(((field, field.replace("_", " ").title()) for field in header))
|
||||
labels = {field: field.replace("_", " ").title() for field in header}
|
||||
|
||||
|
||||
class NimbusExperimentCsvListView(ListAPIView):
|
||||
|
|
|
@ -46,7 +46,10 @@ class Migration(migrations.Migration):
|
|||
max_length=255,
|
||||
),
|
||||
),
|
||||
("pref_key", models.CharField(blank=True, max_length=255, null=True)),
|
||||
(
|
||||
"pref_key",
|
||||
models.CharField(blank=True, max_length=255, null=True),
|
||||
),
|
||||
(
|
||||
"pref_type",
|
||||
models.CharField(
|
||||
|
@ -61,7 +64,8 @@ class Migration(migrations.Migration):
|
|||
(
|
||||
"pref_branch",
|
||||
models.CharField(
|
||||
choices=[("default", "default"), ("user", "user")], max_length=255
|
||||
choices=[("default", "default"), ("user", "user")],
|
||||
max_length=255,
|
||||
),
|
||||
),
|
||||
("firefox_version", models.CharField(max_length=255)),
|
||||
|
@ -81,9 +85,15 @@ class Migration(migrations.Migration):
|
|||
("name", models.CharField(max_length=255, unique=True)),
|
||||
("slug", models.SlugField(max_length=255, unique=True)),
|
||||
("objectives", models.TextField(default="")),
|
||||
("analysis", models.TextField(blank=True, default="", null=True)),
|
||||
(
|
||||
"analysis",
|
||||
models.TextField(blank=True, default="", null=True),
|
||||
),
|
||||
("dashboard_url", models.URLField(blank=True, null=True)),
|
||||
("dashboard_image_url", models.URLField(blank=True, null=True)),
|
||||
(
|
||||
"dashboard_image_url",
|
||||
models.URLField(blank=True, null=True),
|
||||
),
|
||||
(
|
||||
"population_percent",
|
||||
models.DecimalField(decimal_places=4, default="0", max_digits=7),
|
||||
|
@ -97,7 +107,10 @@ class Migration(migrations.Migration):
|
|||
),
|
||||
),
|
||||
],
|
||||
options={"verbose_name": "Experiment", "verbose_name_plural": "Experiments"},
|
||||
options={
|
||||
"verbose_name": "Experiment",
|
||||
"verbose_name_plural": "Experiments",
|
||||
},
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name="ExperimentChangeLog",
|
||||
|
@ -182,7 +195,10 @@ class Migration(migrations.Migration):
|
|||
("is_control", models.BooleanField(default=False)),
|
||||
("description", models.TextField(default="")),
|
||||
("ratio", models.PositiveIntegerField(default=1)),
|
||||
("value", django.contrib.postgres.fields.jsonb.JSONField(default=False)),
|
||||
(
|
||||
"value",
|
||||
django.contrib.postgres.fields.jsonb.JSONField(default=False),
|
||||
),
|
||||
(
|
||||
"experiment",
|
||||
models.ForeignKey(
|
||||
|
@ -199,6 +215,9 @@ class Migration(migrations.Migration):
|
|||
),
|
||||
migrations.AlterUniqueTogether(
|
||||
name="experimentvariant",
|
||||
unique_together=set([("is_control", "experiment"), ("slug", "experiment")]),
|
||||
unique_together={
|
||||
("is_control", "experiment"),
|
||||
("slug", "experiment"),
|
||||
},
|
||||
),
|
||||
]
|
||||
|
|
|
@ -15,7 +15,7 @@ def update_dates(apps, schema_editor): # pragma: no cover
|
|||
experiment=experiment, old_status="Accepted", new_status="Launched"
|
||||
).changed_on
|
||||
experiment.proposed_start_date = start_date
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
|
@ -23,7 +23,7 @@ def update_dates(apps, schema_editor): # pragma: no cover
|
|||
experiment=experiment, old_status="Launched", new_status="Complete"
|
||||
).changed_on
|
||||
experiment.proposed_end_date = end_date
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
experiment.save()
|
||||
|
||||
|
@ -32,7 +32,7 @@ def update_versions(apps, schema_editor): # pragma: no cover
|
|||
Experiment = apps.get_model("experiments", "Experiment")
|
||||
|
||||
for experiment in Experiment.objects.all():
|
||||
experiment.firefox_version = "{}.0".format(experiment.firefox_version)
|
||||
experiment.firefox_version = f"{experiment.firefox_version}.0"
|
||||
experiment.save()
|
||||
|
||||
|
||||
|
@ -40,7 +40,7 @@ def add_project_to_name(apps, schema_editor): # pragma: no cover
|
|||
Experiment = apps.get_model("experiments", "Experiment")
|
||||
|
||||
for experiment in Experiment.objects.all():
|
||||
experiment.name = "{} {}".format(experiment.project.name, experiment.name)
|
||||
experiment.name = f"{experiment.project.name} {experiment.name}"
|
||||
experiment.save()
|
||||
|
||||
|
||||
|
|
|
@ -17,6 +17,6 @@ class Migration(migrations.Migration):
|
|||
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True),
|
||||
),
|
||||
migrations.AlterUniqueTogether(
|
||||
name="experimentvariant", unique_together=set([("slug", "experiment")])
|
||||
name="experimentvariant", unique_together={("slug", "experiment")}
|
||||
),
|
||||
]
|
||||
|
|
|
@ -9,8 +9,8 @@ class Migration(migrations.Migration):
|
|||
|
||||
dependencies = [("experiments", "0068_experiment_related_to")]
|
||||
|
||||
def format_to_new_changelog(apps, schema_editor):
|
||||
ExperimentChangeLog = apps.get_model("experiments", "ExperimentChangeLog")
|
||||
def format_to_new_changelog(self, schema_editor):
|
||||
ExperimentChangeLog = self.get_model("experiments", "ExperimentChangeLog")
|
||||
for changeLog in ExperimentChangeLog.objects.all():
|
||||
changed_values = {}
|
||||
# ensure change log has new_values
|
||||
|
|
|
@ -12,11 +12,11 @@ class Migration(migrations.Migration):
|
|||
|
||||
dependencies = [("experiments", "0071_auto_20190911_1515")]
|
||||
|
||||
def prune_new_changelog(apps, schema_editor):
|
||||
def prune_new_changelog(self, schema_editor):
|
||||
|
||||
ExperimentChangeLog = apps.get_model("experiments", "ExperimentChangeLog")
|
||||
ExperimentChangeLog = self.get_model("experiments", "ExperimentChangeLog")
|
||||
|
||||
Experiment = apps.get_model("experiments", "experiment")
|
||||
Experiment = self.get_model("experiments", "experiment")
|
||||
|
||||
for experiment in Experiment.objects.all():
|
||||
change_logs = experiment.changes.filter(old_status=None).order_by(
|
||||
|
@ -36,7 +36,7 @@ class Migration(migrations.Migration):
|
|||
current_date = None
|
||||
for change in changes:
|
||||
if change.changed_on.date() != current_date:
|
||||
seen_edits = set([change.changed_by.email])
|
||||
seen_edits = {change.changed_by.email}
|
||||
current_date = change.changed_on.date()
|
||||
elif change.changed_by.email in seen_edits:
|
||||
change.delete()
|
||||
|
|
|
@ -9,9 +9,9 @@ class Migration(migrations.Migration):
|
|||
("experiments", "0120_add_feature_config"),
|
||||
]
|
||||
|
||||
def prune_new_changelog(apps, schema_editor):
|
||||
def prune_new_changelog(self, schema_editor):
|
||||
|
||||
ExperimentChangeLog = apps.get_model("experiments", "ExperimentChangeLog")
|
||||
ExperimentChangeLog = self.get_model("experiments", "ExperimentChangeLog")
|
||||
|
||||
ExperimentChangeLog.objects.filter(
|
||||
message="Added Version(s)",
|
||||
|
|
|
@ -9,9 +9,9 @@ class Migration(migrations.Migration):
|
|||
("experiments", "0121_prune_add_version_changelogs"),
|
||||
]
|
||||
|
||||
def prune_new_changelog(apps, schema_editor):
|
||||
def prune_new_changelog(self, schema_editor):
|
||||
|
||||
ExperimentChangeLog = apps.get_model("experiments", "ExperimentChangeLog")
|
||||
ExperimentChangeLog = self.get_model("experiments", "ExperimentChangeLog")
|
||||
|
||||
ExperimentChangeLog.objects.filter(
|
||||
message="Added Version(s)",
|
||||
|
|
|
@ -7,9 +7,9 @@ class Migration(migrations.Migration):
|
|||
("experiments", "0132_auto_20201103_1710"),
|
||||
]
|
||||
|
||||
def prune_new_changelog(apps, schema_editor):
|
||||
def prune_new_changelog(self, schema_editor):
|
||||
|
||||
ExperimentChangeLog = apps.get_model("experiments", "ExperimentChangeLog")
|
||||
ExperimentChangeLog = self.get_model("experiments", "ExperimentChangeLog")
|
||||
|
||||
ExperimentChangeLog.objects.filter(
|
||||
message="Updated Population Percent", changed_values={}
|
||||
|
|
|
@ -7,7 +7,7 @@ def restore_feature_configs(apps, schema_editor):
|
|||
NimbusExperiment = apps.get_model("experiments", "NimbusExperiment")
|
||||
NimbusFeatureConfig = apps.get_model("experiments", "NimbusFeatureConfig")
|
||||
for experiment in NimbusExperiment.objects.all():
|
||||
feature_slug = (
|
||||
if feature_slug := (
|
||||
experiment.changes.all()
|
||||
.exclude(experiment_data__feature_config=None)
|
||||
.exclude(experiment_data__feature_config__slug="no-feature-firefox-desktop")
|
||||
|
@ -16,9 +16,7 @@ def restore_feature_configs(apps, schema_editor):
|
|||
.order_by("-changed_on")
|
||||
.values_list("experiment_data__feature_config__slug", flat=True)
|
||||
.first()
|
||||
)
|
||||
|
||||
if feature_slug:
|
||||
):
|
||||
feature_config = NimbusFeatureConfig.objects.get(slug=feature_slug)
|
||||
experiment.feature_config = feature_config
|
||||
experiment.save()
|
||||
|
|
|
@ -10,9 +10,7 @@ def update_languages_field_for_mobile_client(apps, schema_editor):
|
|||
):
|
||||
for locale in experiment.locales.all():
|
||||
locale_code = locale.code[:2]
|
||||
language = Language.objects.filter(code=locale_code).first()
|
||||
if language:
|
||||
|
||||
if language := Language.objects.filter(code=locale_code).first():
|
||||
experiment.languages.add(language.id)
|
||||
experiment.locales.clear()
|
||||
|
||||
|
|
|
@ -11,8 +11,7 @@ def update_results_data_schema(apps, schema_editor):
|
|||
if data is not None:
|
||||
for key, value in data.items():
|
||||
if value is not None and key in windows and "all" not in value:
|
||||
data[key] = {}
|
||||
data[key]["all"] = value
|
||||
data[key] = {"all": value}
|
||||
else:
|
||||
data[key] = value
|
||||
|
||||
|
|
|
@ -10,11 +10,10 @@ def analysis_add_basis(apps, schema_editor):
|
|||
windows = ["daily", "weekly", "overall"]
|
||||
default_analysis_basis = "enrollments"
|
||||
for experiment in NimbusExperiment.objects.all():
|
||||
new_data = {}
|
||||
results = experiment.results_data
|
||||
if results is not None and "v2" not in results:
|
||||
data = results["v1"]
|
||||
new_data["v1"] = copy.deepcopy(data)
|
||||
new_data = {"v1": copy.deepcopy(data)}
|
||||
if data is not None:
|
||||
for key, value in data.items():
|
||||
if (
|
||||
|
@ -22,8 +21,7 @@ def analysis_add_basis(apps, schema_editor):
|
|||
and key in windows
|
||||
and default_analysis_basis not in value
|
||||
):
|
||||
data[key] = {}
|
||||
data[key][default_analysis_basis] = value
|
||||
data[key] = {default_analysis_basis: value}
|
||||
else:
|
||||
data[key] = value
|
||||
|
||||
|
|
|
@ -334,29 +334,25 @@ class NimbusExperiment(NimbusConstants, TargetingConstants, FilterMixin, models.
|
|||
sticky_expressions.append(self.targeting_config.targeting)
|
||||
|
||||
is_desktop = self.application == self.Application.DESKTOP
|
||||
if is_desktop:
|
||||
if self.channel:
|
||||
expressions.append(f'browserSettings.update.channel == "{self.channel}"')
|
||||
if is_desktop and self.channel:
|
||||
expressions.append(f'browserSettings.update.channel == "{self.channel}"')
|
||||
|
||||
sticky_expressions.extend(self._get_targeting_min_version())
|
||||
expressions.extend(self._get_targeting_max_version())
|
||||
|
||||
locales = self.locales.all()
|
||||
if locales:
|
||||
if locales := self.locales.all():
|
||||
locales = [locale.code for locale in sorted(locales, key=lambda l: l.code)]
|
||||
|
||||
sticky_expressions.append(f"locale in {locales}")
|
||||
|
||||
languages = self.languages.all()
|
||||
if languages:
|
||||
if languages := self.languages.all():
|
||||
languages = [
|
||||
language.code for language in sorted(languages, key=lambda l: l.code)
|
||||
]
|
||||
|
||||
sticky_expressions.append(f"language in {languages}")
|
||||
|
||||
countries = self.countries.all()
|
||||
if countries:
|
||||
if countries := self.countries.all():
|
||||
countries = [
|
||||
country.code for country in sorted(countries, key=lambda c: c.code)
|
||||
]
|
||||
|
@ -365,10 +361,10 @@ class NimbusExperiment(NimbusConstants, TargetingConstants, FilterMixin, models.
|
|||
if self.is_sticky and sticky_expressions:
|
||||
sticky_clause = "is_already_enrolled"
|
||||
if is_desktop:
|
||||
sticky_clause = "experiment.slug in activeExperiments"
|
||||
if self.is_rollout:
|
||||
sticky_clause = "experiment.slug in activeRollouts"
|
||||
|
||||
else:
|
||||
sticky_clause = "experiment.slug in activeExperiments"
|
||||
sticky_expressions_joined = " && ".join(
|
||||
[f"({expression})" for expression in sticky_expressions]
|
||||
)
|
||||
|
@ -381,10 +377,11 @@ class NimbusExperiment(NimbusConstants, TargetingConstants, FilterMixin, models.
|
|||
expressions.extend(f"!('{pref}'|preferenceIsUserSet)" for pref in prefs)
|
||||
|
||||
# If there is no targeting defined all clients should match, so we return "true"
|
||||
if len(expressions) == 0:
|
||||
return "true"
|
||||
|
||||
return " && ".join([f"({expression})" for expression in expressions])
|
||||
return (
|
||||
" && ".join([f"({expression})" for expression in expressions])
|
||||
if expressions
|
||||
else "true"
|
||||
)
|
||||
|
||||
@property
|
||||
def application_config(self):
|
||||
|
@ -416,14 +413,12 @@ class NimbusExperiment(NimbusConstants, TargetingConstants, FilterMixin, models.
|
|||
return self._start_date
|
||||
|
||||
if self.is_started:
|
||||
start_changelogs = [
|
||||
if start_changelogs := [
|
||||
c
|
||||
for c in self.changes.all()
|
||||
if c.old_status == NimbusExperiment.Status.DRAFT
|
||||
and c.new_status == NimbusExperiment.Status.LIVE
|
||||
]
|
||||
|
||||
if start_changelogs:
|
||||
]:
|
||||
start_date = sorted(start_changelogs, key=lambda c: c.changed_on)[
|
||||
-1
|
||||
].changed_on.date()
|
||||
|
@ -443,14 +438,12 @@ class NimbusExperiment(NimbusConstants, TargetingConstants, FilterMixin, models.
|
|||
|
||||
if self.status == self.Status.COMPLETE:
|
||||
changes = self.changes.all()
|
||||
end_changelogs = [
|
||||
if end_changelogs := [
|
||||
c
|
||||
for c in changes
|
||||
if c.old_status == self.Status.LIVE
|
||||
and c.new_status == self.Status.COMPLETE
|
||||
]
|
||||
|
||||
if end_changelogs:
|
||||
]:
|
||||
end_date = sorted(end_changelogs, key=lambda c: c.changed_on)[
|
||||
-1
|
||||
].changed_on.date()
|
||||
|
@ -474,7 +467,7 @@ class NimbusExperiment(NimbusConstants, TargetingConstants, FilterMixin, models.
|
|||
return (self._enrollment_end_date - self._start_date).days
|
||||
|
||||
changes = self.changes.all()
|
||||
paused_changelogs = [
|
||||
if paused_changelogs := [
|
||||
c
|
||||
for c in changes
|
||||
if c.experiment_data is not None
|
||||
|
@ -483,9 +476,7 @@ class NimbusExperiment(NimbusConstants, TargetingConstants, FilterMixin, models.
|
|||
and c.new_status == NimbusExperiment.Status.LIVE
|
||||
and c.new_status_next is None
|
||||
and c.new_publish_status == NimbusExperiment.PublishStatus.IDLE
|
||||
]
|
||||
|
||||
if paused_changelogs:
|
||||
]:
|
||||
paused_change = sorted(paused_changelogs, key=lambda c: c.changed_on)[-1]
|
||||
self._enrollment_end_date = paused_change.changed_on.date()
|
||||
self.save()
|
||||
|
@ -505,10 +496,7 @@ class NimbusExperiment(NimbusConstants, TargetingConstants, FilterMixin, models.
|
|||
|
||||
@property
|
||||
def computed_end_date(self):
|
||||
if self.end_date:
|
||||
return self.end_date
|
||||
else:
|
||||
return self.proposed_end_date
|
||||
return self.end_date or self.proposed_end_date
|
||||
|
||||
@property
|
||||
def enrollment_duration(self):
|
||||
|
@ -590,9 +578,10 @@ class NimbusExperiment(NimbusConstants, TargetingConstants, FilterMixin, models.
|
|||
self.application_config.slug,
|
||||
]
|
||||
|
||||
for feature_config in self.feature_configs.all().order_by("slug"):
|
||||
keys.append(feature_config.slug)
|
||||
|
||||
keys.extend(
|
||||
feature_config.slug
|
||||
for feature_config in self.feature_configs.all().order_by("slug")
|
||||
)
|
||||
if self.channel:
|
||||
keys.append(self.channel)
|
||||
|
||||
|
@ -833,10 +822,7 @@ class NimbusBranchScreenshot(models.Model):
|
|||
ordering = ["id"]
|
||||
|
||||
def delete(self, *args, **kwargs):
|
||||
old_image_name = None
|
||||
if self.image and self.image.name:
|
||||
old_image_name = self.image.name
|
||||
|
||||
old_image_name = self.image.name if self.image and self.image.name else None
|
||||
super().delete(*args, **kwargs)
|
||||
|
||||
if old_image_name and self.image.storage.exists(old_image_name):
|
||||
|
@ -1104,13 +1090,10 @@ class NimbusChangeLog(FilterMixin, models.Model):
|
|||
COMPLETED = "Experiment is complete"
|
||||
|
||||
def __str__(self):
|
||||
if self.message:
|
||||
return self.message
|
||||
else:
|
||||
return (
|
||||
f"{self.old_status} > {self.new_status} "
|
||||
f"by {self.changed_by} on {self.changed_on}"
|
||||
)
|
||||
return (
|
||||
self.message
|
||||
or f"{self.old_status} > {self.new_status} by {self.changed_by} on {self.changed_on}"
|
||||
)
|
||||
|
||||
|
||||
class NimbusEmail(models.Model):
|
||||
|
|
|
@ -1112,7 +1112,7 @@ class TestUpdateExperimentMutationMultiFeature(GraphQLTestCase):
|
|||
experiment = NimbusExperiment.objects.get(id=experiment.id)
|
||||
self.assertEqual(
|
||||
set(experiment.feature_configs.all().values_list("id", flat=True)),
|
||||
set([feature1.id, feature2.id]),
|
||||
{feature1.id, feature2.id},
|
||||
)
|
||||
|
||||
self.assertEqual(experiment.branches.count(), 2)
|
||||
|
|
|
@ -1497,9 +1497,7 @@ class TestNimbusConfigQuery(GraphQLTestCase):
|
|||
]
|
||||
self.assertEqual(
|
||||
set(channels),
|
||||
set(
|
||||
[channel.name for channel in application_config.channel_app_id.keys()]
|
||||
),
|
||||
{channel.name for channel in application_config.channel_app_id.keys()},
|
||||
)
|
||||
|
||||
self.assertEqual(config["owners"], [{"username": experiment.owner.username}])
|
||||
|
|
|
@ -54,9 +54,7 @@ class TestNimbusConfigurationSerializer(TestCase):
|
|||
]
|
||||
self.assertEqual(
|
||||
set(channels),
|
||||
set(
|
||||
[channel.name for channel in application_config.channel_app_id.keys()]
|
||||
),
|
||||
{channel.name for channel in application_config.channel_app_id.keys()},
|
||||
)
|
||||
|
||||
self.assertEqual(config["owners"], [{"username": experiment.owner.username}])
|
||||
|
|
|
@ -258,7 +258,7 @@ class TestNimbusExperimentBranchMixinSingleFeature(TestCase):
|
|||
"reference_branch": {"name": NimbusConstants.ERROR_DUPLICATE_BRANCH_NAME},
|
||||
"treatment_branches": [
|
||||
{"name": NimbusConstants.ERROR_DUPLICATE_BRANCH_NAME}
|
||||
for i in data["treatment_branches"]
|
||||
for _ in data["treatment_branches"]
|
||||
],
|
||||
},
|
||||
)
|
||||
|
@ -315,7 +315,7 @@ class TestNimbusExperimentBranchMixinMultiFeature(TestCase):
|
|||
experiment = NimbusExperiment.objects.get(id=experiment.id)
|
||||
self.assertEqual(
|
||||
set(experiment.feature_configs.all().values_list("id", flat=True)),
|
||||
set([feature_config1.id, feature_config2.id]),
|
||||
{feature_config1.id, feature_config2.id},
|
||||
)
|
||||
|
||||
self.assertEqual(experiment.reference_branch.name, "control")
|
||||
|
@ -327,7 +327,7 @@ class TestNimbusExperimentBranchMixinMultiFeature(TestCase):
|
|||
"feature_config__id", flat=True
|
||||
)
|
||||
),
|
||||
set([feature_config1.id, feature_config2.id]),
|
||||
{feature_config1.id, feature_config2.id},
|
||||
)
|
||||
|
||||
self.assertEqual(len(experiment.treatment_branches), 1)
|
||||
|
@ -340,7 +340,7 @@ class TestNimbusExperimentBranchMixinMultiFeature(TestCase):
|
|||
.feature_values.all()
|
||||
.values_list("feature_config__id", flat=True)
|
||||
),
|
||||
set([feature_config1.id, feature_config2.id]),
|
||||
{feature_config1.id, feature_config2.id},
|
||||
)
|
||||
|
||||
def test_serializer_replace_branches(self):
|
||||
|
@ -387,7 +387,7 @@ class TestNimbusExperimentBranchMixinMultiFeature(TestCase):
|
|||
experiment = NimbusExperiment.objects.get(id=experiment.id)
|
||||
self.assertEqual(
|
||||
set(experiment.feature_configs.all().values_list("id", flat=True)),
|
||||
set([feature_config1.id, feature_config2.id]),
|
||||
{feature_config1.id, feature_config2.id},
|
||||
)
|
||||
self.assertEqual(
|
||||
set(experiment.branches.all().values_list("id", flat=True)).intersection(
|
||||
|
@ -405,7 +405,7 @@ class TestNimbusExperimentBranchMixinMultiFeature(TestCase):
|
|||
"feature_config__id", flat=True
|
||||
)
|
||||
),
|
||||
set([feature_config1.id, feature_config2.id]),
|
||||
{feature_config1.id, feature_config2.id},
|
||||
)
|
||||
|
||||
self.assertEqual(len(experiment.treatment_branches), 1)
|
||||
|
@ -418,7 +418,7 @@ class TestNimbusExperimentBranchMixinMultiFeature(TestCase):
|
|||
.feature_values.all()
|
||||
.values_list("feature_config__id", flat=True)
|
||||
),
|
||||
set([feature_config1.id, feature_config2.id]),
|
||||
{feature_config1.id, feature_config2.id},
|
||||
)
|
||||
|
||||
def test_serializer_update_branches_with_ids(self):
|
||||
|
@ -467,7 +467,7 @@ class TestNimbusExperimentBranchMixinMultiFeature(TestCase):
|
|||
experiment = NimbusExperiment.objects.get(id=experiment.id)
|
||||
self.assertEqual(
|
||||
set(experiment.feature_configs.all().values_list("id", flat=True)),
|
||||
set([feature_config1.id, feature_config2.id]),
|
||||
{feature_config1.id, feature_config2.id},
|
||||
)
|
||||
self.assertEqual(
|
||||
set(experiment.branches.all().values_list("id", flat=True)), branch_ids
|
||||
|
@ -482,7 +482,7 @@ class TestNimbusExperimentBranchMixinMultiFeature(TestCase):
|
|||
"feature_config__id", flat=True
|
||||
)
|
||||
),
|
||||
set([feature_config1.id, feature_config2.id]),
|
||||
{feature_config1.id, feature_config2.id},
|
||||
)
|
||||
|
||||
self.assertEqual(len(experiment.treatment_branches), 1)
|
||||
|
@ -495,5 +495,5 @@ class TestNimbusExperimentBranchMixinMultiFeature(TestCase):
|
|||
.feature_values.all()
|
||||
.values_list("feature_config__id", flat=True)
|
||||
),
|
||||
set([feature_config1.id, feature_config2.id]),
|
||||
{feature_config1.id, feature_config2.id},
|
||||
)
|
||||
|
|
|
@ -66,7 +66,7 @@ class TestNimbusExperimentCsvSerializer(TestCase):
|
|||
"start_date": experiment.start_date,
|
||||
"enrollment_duration": experiment.enrollment_duration,
|
||||
"end_date": experiment.end_date,
|
||||
"results_url": experiment.experiment_url + "results",
|
||||
"results_url": f"{experiment.experiment_url}results",
|
||||
"experiment_summary": experiment.experiment_url,
|
||||
"rollout": experiment.is_rollout,
|
||||
"hypothesis": experiment.hypothesis,
|
||||
|
|
|
@ -40,14 +40,13 @@ class TestNimbusExperimentDocumentationLinkMixin(TestCase):
|
|||
experiment = NimbusExperimentFactory.create(
|
||||
status=NimbusExperiment.Status.DRAFT,
|
||||
)
|
||||
links_before = []
|
||||
for link in experiment.documentation_links.all():
|
||||
links_before.append(
|
||||
{
|
||||
"title": link.title,
|
||||
"link": link.link,
|
||||
}
|
||||
)
|
||||
links_before = [
|
||||
{
|
||||
"title": link.title,
|
||||
"link": link.link,
|
||||
}
|
||||
for link in experiment.documentation_links.all()
|
||||
]
|
||||
data = {
|
||||
"public_description": "changed",
|
||||
"changelog_message": "test changelog message",
|
||||
|
@ -68,14 +67,13 @@ class TestNimbusExperimentDocumentationLinkMixin(TestCase):
|
|||
experiment = NimbusExperimentFactory.create(
|
||||
status=NimbusExperiment.Status.DRAFT,
|
||||
)
|
||||
links_before = []
|
||||
for link in experiment.documentation_links.all():
|
||||
links_before.append(
|
||||
{
|
||||
"title": link.title,
|
||||
"link": link.link,
|
||||
}
|
||||
)
|
||||
links_before = [
|
||||
{
|
||||
"title": link.title,
|
||||
"link": link.link,
|
||||
}
|
||||
for link in experiment.documentation_links.all()
|
||||
]
|
||||
data = {
|
||||
"public_description": "changed reference",
|
||||
"reference_branch": {
|
||||
|
|
|
@ -166,7 +166,7 @@ class TestNimbusExperimentSerializer(TestCase):
|
|||
},
|
||||
)
|
||||
|
||||
self.assertEqual(set(feature_ids_data), set([feature1.slug, feature2.slug]))
|
||||
self.assertEqual(set(feature_ids_data), {feature1.slug, feature2.slug})
|
||||
|
||||
self.assertEqual(
|
||||
bucket_data,
|
||||
|
|
|
@ -337,7 +337,7 @@ class NimbusExperimentFactory(factory.django.DjangoModelFactory):
|
|||
for project in extracted:
|
||||
self.projects.add(project)
|
||||
else:
|
||||
for i in range(3):
|
||||
for _ in range(3):
|
||||
self.projects.add(ProjectFactory.create())
|
||||
|
||||
@factory.post_generation
|
||||
|
@ -456,10 +456,9 @@ class NimbusExperimentFactory(factory.django.DjangoModelFactory):
|
|||
if (
|
||||
experiment.status == experiment.Status.COMPLETE
|
||||
and experiment.status_next is None
|
||||
):
|
||||
if end_date is not None:
|
||||
experiment._end_date = end_date
|
||||
current_datetime = end_date
|
||||
) and end_date is not None:
|
||||
experiment._end_date = end_date
|
||||
current_datetime = end_date
|
||||
|
||||
experiment.save()
|
||||
|
||||
|
@ -536,7 +535,7 @@ class NimbusBranchScreenshotFactory(factory.django.DjangoModelFactory):
|
|||
branch = factory.SubFactory(NimbusBranchFactory)
|
||||
description = factory.LazyAttribute(lambda o: faker.text())
|
||||
image = factory.LazyAttribute(
|
||||
lambda o: SimpleUploadedFile(name="%s.png" % faker.slug(), content=TINY_PNG)
|
||||
lambda o: SimpleUploadedFile(name=f"{faker.slug()}.png", content=TINY_PNG)
|
||||
)
|
||||
|
||||
class Meta:
|
||||
|
|
|
@ -250,10 +250,10 @@ class TestNimbusExperimentExport(TestCase):
|
|||
NimbusExperimentFactory.Lifecycles.ENDING_APPROVE_APPROVE
|
||||
)
|
||||
experiment.reference_branch = None
|
||||
branches = []
|
||||
for b in experiment.branches.all():
|
||||
branches.append(dict(NimbusBranchChangeLogSerializer(b).data))
|
||||
|
||||
branches = [
|
||||
dict(NimbusBranchChangeLogSerializer(b).data)
|
||||
for b in experiment.branches.all()
|
||||
]
|
||||
changes = []
|
||||
num_changes = 3
|
||||
for _ in range(num_changes):
|
||||
|
|
|
@ -22,9 +22,12 @@ BRANCH_DATA = "branch_data"
|
|||
STATISTICS_FOLDER = "statistics"
|
||||
METADATA_FOLDER = "metadata"
|
||||
ERRORS_FOLDER = "errors"
|
||||
ALL_STATISTICS = set(
|
||||
[Statistic.BINOMIAL, Statistic.MEAN, Statistic.COUNT, Statistic.PERCENT]
|
||||
)
|
||||
ALL_STATISTICS = {
|
||||
Statistic.BINOMIAL,
|
||||
Statistic.MEAN,
|
||||
Statistic.COUNT,
|
||||
Statistic.PERCENT,
|
||||
}
|
||||
|
||||
|
||||
class AnalysisWindow:
|
||||
|
@ -62,10 +65,10 @@ def get_results_metrics_map(
|
|||
# A mapping of metric label to relevant statistic. This is
|
||||
# used to see which statistic will be used for each metric.
|
||||
RESULTS_METRICS_MAP = {
|
||||
Metric.RETENTION: set([Statistic.BINOMIAL]),
|
||||
Metric.SEARCH: set([Statistic.MEAN]),
|
||||
Metric.DAYS_OF_USE: set([Statistic.MEAN]),
|
||||
Metric.USER_COUNT: set([Statistic.COUNT, Statistic.PERCENT]),
|
||||
Metric.RETENTION: {Statistic.BINOMIAL},
|
||||
Metric.SEARCH: {Statistic.MEAN},
|
||||
Metric.DAYS_OF_USE: {Statistic.MEAN},
|
||||
Metric.USER_COUNT: {Statistic.COUNT, Statistic.PERCENT},
|
||||
}
|
||||
primary_metrics_set = set()
|
||||
primary_outcome_metrics = list(
|
||||
|
@ -104,7 +107,7 @@ def get_results_metrics_map(
|
|||
other_metrics_map, other_metrics = get_other_metrics_names_and_map(
|
||||
data, RESULTS_METRICS_MAP
|
||||
)
|
||||
RESULTS_METRICS_MAP.update(other_metrics_map)
|
||||
RESULTS_METRICS_MAP |= other_metrics_map
|
||||
|
||||
return RESULTS_METRICS_MAP, primary_metrics_set, other_metrics
|
||||
|
||||
|
@ -138,7 +141,7 @@ def get_other_metrics_names_and_map(data, RESULTS_METRICS_MAP):
|
|||
|
||||
# Turn other_metrics_map into the format needed
|
||||
# by get_result_metrics_map()
|
||||
other_metrics_map = {k: set([v]) for k, v in other_metrics_map.items()}
|
||||
other_metrics_map = {k: {v} for k, v in other_metrics_map.items()}
|
||||
|
||||
return other_metrics_map, other_metrics_names
|
||||
|
||||
|
|
|
@ -191,7 +191,6 @@ class ResultsObjectModelBase(BaseModel):
|
|||
super(ResultsObjectModelBase, self).__init__()
|
||||
|
||||
for jetstream_data_point in data:
|
||||
branch = jetstream_data_point.branch
|
||||
metric = jetstream_data_point.metric
|
||||
statistic = jetstream_data_point.statistic
|
||||
|
||||
|
@ -210,6 +209,7 @@ class ResultsObjectModelBase(BaseModel):
|
|||
window_index = 1 if window == "overall" else jetstream_data_point.window_index
|
||||
|
||||
if metric in result_metrics and statistic in result_metrics[metric]:
|
||||
branch = jetstream_data_point.branch
|
||||
branch_obj = getattr(self, branch)
|
||||
branch_obj.is_control = experiment.reference_branch.slug == branch
|
||||
group_obj = getattr(
|
||||
|
|
|
@ -918,9 +918,11 @@ class TestFetchJetstreamDataTask(TestCase):
|
|||
def read(self):
|
||||
if "metadata" in self.name:
|
||||
return "{}"
|
||||
if "errors" in self.name:
|
||||
return "[]"
|
||||
return json.dumps(DAILY_DATA + SEGMENT_DATA)
|
||||
return (
|
||||
"[]"
|
||||
if "errors" in self.name
|
||||
else json.dumps(DAILY_DATA + SEGMENT_DATA)
|
||||
)
|
||||
|
||||
def open_file(filename):
|
||||
return File(filename)
|
||||
|
@ -974,9 +976,7 @@ class TestFetchJetstreamDataTask(TestCase):
|
|||
def read(self):
|
||||
if "metadata" in self.name:
|
||||
return "{}"
|
||||
if "errors" in self.name:
|
||||
return "[]"
|
||||
return json.dumps(DAILY_DATA)
|
||||
return "[]" if "errors" in self.name else json.dumps(DAILY_DATA)
|
||||
|
||||
def open_file(filename):
|
||||
return File(filename)
|
||||
|
|
|
@ -67,9 +67,7 @@ def nimbus_check_kinto_push_queue_by_collection(collection):
|
|||
should_rollback = False
|
||||
if kinto_client.has_pending_review():
|
||||
logger.info(f"{collection} has pending review")
|
||||
should_abort = handle_pending_review(applications)
|
||||
|
||||
if should_abort:
|
||||
if should_abort := handle_pending_review(applications):
|
||||
return
|
||||
|
||||
should_rollback = True
|
||||
|
@ -105,9 +103,7 @@ def nimbus_check_kinto_push_queue_by_collection(collection):
|
|||
|
||||
|
||||
def handle_pending_review(applications):
|
||||
experiment = NimbusExperiment.objects.waiting(applications).first()
|
||||
|
||||
if experiment:
|
||||
if experiment := NimbusExperiment.objects.waiting(applications).first():
|
||||
if experiment.should_timeout:
|
||||
experiment.publish_status = NimbusExperiment.PublishStatus.REVIEW
|
||||
experiment.save()
|
||||
|
@ -126,9 +122,7 @@ def handle_pending_review(applications):
|
|||
|
||||
def handle_rejection(applications, kinto_client):
|
||||
collection_data = kinto_client.get_rejected_collection_data()
|
||||
experiment = NimbusExperiment.objects.waiting(applications).first()
|
||||
|
||||
if experiment:
|
||||
if experiment := NimbusExperiment.objects.waiting(applications).first():
|
||||
experiment.publish_status = NimbusExperiment.PublishStatus.IDLE
|
||||
experiment.status_next = None
|
||||
experiment.is_paused = False
|
||||
|
|
|
@ -128,11 +128,11 @@ def make_bugzilla_call(url, method, data=None):
|
|||
response = method(url, data)
|
||||
return response.json()
|
||||
except requests.exceptions.RequestException as e:
|
||||
logging.exception("Error calling Bugzilla API: {}".format(e))
|
||||
raise BugzillaError(*e.args)
|
||||
logging.exception(f"Error calling Bugzilla API: {e}")
|
||||
raise BugzillaError(*e.args) from e
|
||||
except ValueError as e:
|
||||
logging.exception("Error parsing JSON Bugzilla response: {}".format(e))
|
||||
raise BugzillaError(*e.args)
|
||||
logging.exception(f"Error parsing JSON Bugzilla response: {e}")
|
||||
raise BugzillaError(*e.args) from e
|
||||
|
||||
|
||||
def format_creation_bug_body(experiment, extra_fields):
|
||||
|
@ -150,13 +150,13 @@ def format_creation_bug_body(experiment, extra_fields):
|
|||
"priority": "P3",
|
||||
"url": experiment.experiment_url,
|
||||
}
|
||||
bug_data.update(extra_fields)
|
||||
bug_data |= extra_fields
|
||||
return bug_data
|
||||
|
||||
|
||||
def format_summary(experiment):
|
||||
|
||||
truncated_name = experiment.name[0:EXPERIMENT_NAME_MAX_LEN]
|
||||
truncated_name = experiment.name[:EXPERIMENT_NAME_MAX_LEN]
|
||||
|
||||
if truncated_name != experiment.name:
|
||||
truncated_name += "..."
|
||||
|
@ -188,9 +188,7 @@ def format_normandy_experiment_request(experiment):
|
|||
|
||||
extra_fields = {"assigned_to": assigned_to, "blocks": blocks}
|
||||
|
||||
bug_data = format_creation_bug_body(experiment, extra_fields)
|
||||
|
||||
return bug_data
|
||||
return format_creation_bug_body(experiment, extra_fields)
|
||||
|
||||
|
||||
def get_bugzilla_id(bug_url):
|
||||
|
|
|
@ -127,9 +127,7 @@ def add_start_date_comment_task(experiment_id):
|
|||
experiment = Experiment.objects.get(id=experiment_id)
|
||||
metrics.incr("add_start_data_comment.started")
|
||||
logger.info("Adding Bugzilla Start Date Comment")
|
||||
comment = "Start Date: {} End Date: {}".format(
|
||||
experiment.start_date, experiment.end_date
|
||||
)
|
||||
comment = f"Start Date: {experiment.start_date} End Date: {experiment.end_date}"
|
||||
try:
|
||||
bugzilla_id = experiment.bugzilla_id
|
||||
bugzilla.add_experiment_comment(bugzilla_id, comment)
|
||||
|
|
|
@ -257,9 +257,7 @@ class TestAddExperimentComment(MockBugzillaMixin, TestCase):
|
|||
bugzilla_id="123",
|
||||
type=Experiment.TYPE_PREF,
|
||||
)
|
||||
comment = "Start Date: {} End Date: {}".format(
|
||||
experiment.start_date, experiment.end_date
|
||||
)
|
||||
comment = f"Start Date: {experiment.start_date} End Date: {experiment.end_date}"
|
||||
|
||||
comment_id = add_experiment_comment(experiment.bugzilla_id, comment)
|
||||
|
||||
|
@ -278,9 +276,7 @@ class TestAddExperimentComment(MockBugzillaMixin, TestCase):
|
|||
type=Experiment.TYPE_ADDON,
|
||||
)
|
||||
|
||||
comment = "Start Date: {} End Date: {}".format(
|
||||
experiment.start_date, experiment.end_date
|
||||
)
|
||||
comment = f"Start Date: {experiment.start_date} End Date: {experiment.end_date}"
|
||||
|
||||
comment_id = add_experiment_comment(experiment.bugzilla_id, comment)
|
||||
|
||||
|
|
|
@ -299,9 +299,7 @@ class TestUpdateTask(MockRequestMixin, MockBugzillaMixin, TestCase):
|
|||
class TestUpdateExperimentSubTask(MockNormandyMixin, MockBugzillaMixin, TestCase):
|
||||
def test_add_start_date_comment_task(self):
|
||||
experiment = ExperimentFactory.create(normandy_id=12345)
|
||||
comment = "Start Date: {} End Date: {}".format(
|
||||
experiment.start_date, experiment.end_date
|
||||
)
|
||||
comment = f"Start Date: {experiment.start_date} End Date: {experiment.end_date}"
|
||||
expected_call_data = {"comment": comment}
|
||||
|
||||
tasks.add_start_date_comment_task(experiment.id)
|
||||
|
|
|
@ -20,10 +20,7 @@ class JSTimestampField(serializers.Field):
|
|||
"""
|
||||
|
||||
def to_representation(self, obj):
|
||||
if obj:
|
||||
return time.mktime(obj.timetuple()) * 1000
|
||||
else:
|
||||
return None
|
||||
return time.mktime(obj.timetuple()) * 1000 if obj else None
|
||||
|
||||
|
||||
class PrefTypeField(serializers.Field):
|
||||
|
|
|
@ -34,22 +34,23 @@ class PrefValidationMixin(object):
|
|||
if pref_type == "Firefox Pref Type":
|
||||
errors["pref_type"] = "Please select a pref type"
|
||||
|
||||
pref_value_error = self.validate_pref_value(pref_type, pref_value, field_name)
|
||||
if pref_value_error:
|
||||
if pref_value_error := self.validate_pref_value(
|
||||
pref_type, pref_value, field_name
|
||||
):
|
||||
errors[field_name] = pref_value_error
|
||||
return errors
|
||||
|
||||
def validate_pref_value(self, pref_type, pref_value, field_name):
|
||||
if pref_type == "integer":
|
||||
if pref_type == "boolean":
|
||||
if pref_value not in ["true", "false"]:
|
||||
return {field_name: "The pref value must be a boolean."}
|
||||
|
||||
elif pref_type == "integer":
|
||||
try:
|
||||
int(pref_value)
|
||||
except ValueError:
|
||||
return {field_name: "The pref value must be an integer."}
|
||||
|
||||
if pref_type == "boolean":
|
||||
if pref_value not in ["true", "false"]:
|
||||
return {field_name: "The pref value must be a boolean."}
|
||||
|
||||
if pref_type == "json string":
|
||||
try:
|
||||
json.loads(pref_value)
|
||||
|
@ -58,17 +59,15 @@ class PrefValidationMixin(object):
|
|||
return {}
|
||||
|
||||
def is_pref_valid(self, preferences):
|
||||
unique_names = len(
|
||||
set([slugify(pref["pref_name"]) for pref in preferences])
|
||||
) == len(preferences)
|
||||
|
||||
return unique_names
|
||||
return len({slugify(pref["pref_name"]) for pref in preferences}) == len(
|
||||
preferences
|
||||
)
|
||||
|
||||
|
||||
class VariantsListSerializer(serializers.ListSerializer):
|
||||
def to_representation(self, data):
|
||||
data = super().to_representation(data)
|
||||
initial_fields = set(self.child.fields) - set(["id"])
|
||||
initial_fields = set(self.child.fields) - {"id"}
|
||||
|
||||
if data == []:
|
||||
blank_variant = {}
|
||||
|
@ -127,9 +126,7 @@ class ExperimentDesignVariantPrefSerializer(ExperimentDesignVariantBaseSerialize
|
|||
class PreferenceListSerializer(serializers.ListSerializer):
|
||||
def to_representation(self, data):
|
||||
data = super().to_representation(data)
|
||||
if data == []:
|
||||
return [{}]
|
||||
return data
|
||||
return [{}] if data == [] else data
|
||||
|
||||
|
||||
class ExperimentDesignBasePreferenceSerializer(serializers.ModelSerializer):
|
||||
|
@ -184,7 +181,7 @@ class ExperimentDesignBranchMultiPrefSerializer(
|
|||
error_list = []
|
||||
for pref in preferences:
|
||||
errors = {}
|
||||
errors.update(self.validate_pref_branch(pref["pref_branch"]))
|
||||
errors |= self.validate_pref_branch(pref["pref_branch"])
|
||||
errors.update(self.validate_multi_preference(pref))
|
||||
error_list.append(errors)
|
||||
|
||||
|
@ -202,23 +199,17 @@ class ExperimentDesignBaseSerializer(
|
|||
fields = ("variants",)
|
||||
|
||||
def validate(self, data):
|
||||
variants = data.get("variants")
|
||||
|
||||
if variants:
|
||||
if sum([variant["ratio"] for variant in variants]) != 100:
|
||||
error_list = []
|
||||
for variant in variants:
|
||||
error_list.append({"ratio": ["All branch sizes must add up to 100."]})
|
||||
|
||||
if variants := data.get("variants"):
|
||||
if sum(variant["ratio"] for variant in variants) != 100:
|
||||
error_list = [
|
||||
{"ratio": ["All branch sizes must add up to 100."]} for _ in variants
|
||||
]
|
||||
raise serializers.ValidationError({"variants": error_list})
|
||||
|
||||
if not self.is_variant_valid(variants):
|
||||
error_list = []
|
||||
for variant in variants:
|
||||
error_list.append(
|
||||
{"name": [("All branches must have a unique name")]}
|
||||
)
|
||||
|
||||
error_list = [
|
||||
{"name": [("All branches must have a unique name")]} for _ in variants
|
||||
]
|
||||
raise serializers.ValidationError({"variants": error_list})
|
||||
|
||||
return data
|
||||
|
@ -248,22 +239,20 @@ class ExperimentDesignBaseSerializer(
|
|||
ExperimentVariant(**variant_data).save()
|
||||
|
||||
# Delete removed variants
|
||||
submitted_variant_ids = set(
|
||||
[v.get("id") for v in variants_data if v.get("id")]
|
||||
)
|
||||
removed_ids = existing_variant_ids - submitted_variant_ids
|
||||
|
||||
if removed_ids:
|
||||
submitted_variant_ids = {
|
||||
v.get("id") for v in variants_data if v.get("id")
|
||||
}
|
||||
if removed_ids := existing_variant_ids - submitted_variant_ids:
|
||||
ExperimentVariant.objects.filter(id__in=removed_ids).delete()
|
||||
|
||||
return instance
|
||||
except IntegrityError:
|
||||
except IntegrityError as e:
|
||||
error_string = (
|
||||
"Error: unable to save this change, please contact an experimenter admin"
|
||||
)
|
||||
error = [{"name": error_string}] * len(variants_data)
|
||||
|
||||
raise serializers.ValidationError({"variants": error})
|
||||
raise serializers.ValidationError({"variants": error}) from e
|
||||
|
||||
def update(self, instance, validated_data):
|
||||
instance = self.update_instance(instance, validated_data)
|
||||
|
@ -334,9 +323,7 @@ class ExperimentDesignPrefRolloutSerializer(
|
|||
id=pref_id, defaults=preference_data
|
||||
)
|
||||
|
||||
removed_ids = existing_preference_ids - set(submitted_preference_ids)
|
||||
|
||||
if removed_ids:
|
||||
if removed_ids := existing_preference_ids - set(submitted_preference_ids):
|
||||
RolloutPreference.objects.filter(id__in=removed_ids).delete()
|
||||
|
||||
self.update_changelog(instance, validated_data_copy)
|
||||
|
@ -381,9 +368,7 @@ class ExperimentDesignMultiPrefSerializer(ExperimentDesignBaseSerializer):
|
|||
pref_id = pref.get("id")
|
||||
submitted_pref_ids.append(pref_id)
|
||||
|
||||
removed_ids = set(existing_pref_ids) - set(submitted_pref_ids)
|
||||
|
||||
if removed_ids:
|
||||
if removed_ids := set(existing_pref_ids) - set(submitted_pref_ids):
|
||||
VariantPreferences.objects.filter(id__in=removed_ids).delete()
|
||||
|
||||
self.update_changelog(instance, validated_data)
|
||||
|
@ -424,22 +409,18 @@ class ExperimentDesignPrefSerializer(PrefValidationMixin, ExperimentDesignBaseSe
|
|||
|
||||
variants = data["variants"]
|
||||
|
||||
if not len(set(variant["value"] for variant in variants)) == len(variants):
|
||||
error_list = []
|
||||
for variant in variants:
|
||||
error_list.append(
|
||||
{"value": ["All branches must have a unique pref value."]}
|
||||
)
|
||||
|
||||
if len({variant["value"] for variant in variants}) != len(variants):
|
||||
error_list = [
|
||||
{"value": ["All branches must have a unique pref value."]}
|
||||
for _ in variants
|
||||
]
|
||||
raise serializers.ValidationError({"variants": error_list})
|
||||
|
||||
error_list = []
|
||||
pref_type = data.get("pref_type", "")
|
||||
for variant in variants:
|
||||
error_list.append(
|
||||
self.validate_pref_value(pref_type, variant["value"], "value")
|
||||
)
|
||||
|
||||
error_list = [
|
||||
self.validate_pref_value(pref_type, variant["value"], "value")
|
||||
for variant in variants
|
||||
]
|
||||
if any(error_list):
|
||||
raise serializers.ValidationError({"variants": error_list})
|
||||
return data
|
||||
|
@ -658,30 +639,36 @@ class ExperimentTimelinePopSerializer(
|
|||
def validate(self, data):
|
||||
data = super().validate(data)
|
||||
|
||||
if data["proposed_enrollment"] and data["proposed_duration"]:
|
||||
if data["proposed_enrollment"] >= data["proposed_duration"]:
|
||||
raise serializers.ValidationError(
|
||||
{
|
||||
"proposed_enrollment": (
|
||||
"Enrollment duration is optional,"
|
||||
" but if set, must be lower than the delivery "
|
||||
"duration. If enrollment duration is not "
|
||||
"specified - users are enrolled for the"
|
||||
"entire delivery."
|
||||
)
|
||||
}
|
||||
)
|
||||
if (
|
||||
data["proposed_enrollment"]
|
||||
and data["proposed_duration"]
|
||||
and data["proposed_enrollment"] >= data["proposed_duration"]
|
||||
):
|
||||
raise serializers.ValidationError(
|
||||
{
|
||||
"proposed_enrollment": (
|
||||
"Enrollment duration is optional,"
|
||||
" but if set, must be lower than the delivery "
|
||||
"duration. If enrollment duration is not "
|
||||
"specified - users are enrolled for the"
|
||||
"entire delivery."
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
if data["firefox_min_version"] and data["firefox_max_version"]:
|
||||
if float(data["firefox_min_version"]) > float(data["firefox_max_version"]):
|
||||
raise serializers.ValidationError(
|
||||
{
|
||||
"firefox_max_version": (
|
||||
"The max version must be larger "
|
||||
"than or equal to the min version."
|
||||
)
|
||||
}
|
||||
)
|
||||
if (
|
||||
data["firefox_min_version"]
|
||||
and data["firefox_max_version"]
|
||||
and float(data["firefox_min_version"]) > float(data["firefox_max_version"])
|
||||
):
|
||||
raise serializers.ValidationError(
|
||||
{
|
||||
"firefox_max_version": (
|
||||
"The max version must be larger "
|
||||
"than or equal to the min version."
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
return data
|
||||
|
||||
|
@ -717,11 +704,9 @@ class ExperimentCloneSerializer(serializers.ModelSerializer):
|
|||
fields = ("name", "clone_url")
|
||||
|
||||
def validate_name(self, value):
|
||||
existing_slug_or_name = Experiment.objects.filter(
|
||||
if existing_slug_or_name := Experiment.objects.filter(
|
||||
Q(slug=slugify(value)) | Q(name=value)
|
||||
)
|
||||
|
||||
if existing_slug_or_name:
|
||||
):
|
||||
raise serializers.ValidationError("This experiment name already exists.")
|
||||
|
||||
if slugify(value):
|
||||
|
|
|
@ -108,7 +108,7 @@ class ExperimentTimelinePopulationView(RetrieveUpdateAPIView):
|
|||
|
||||
class ExperimentCSVRenderer(CSVRenderer):
|
||||
header = ExperimentCSVSerializer.Meta.fields
|
||||
labels = dict(((field, field.replace("_", " ").title()) for field in header))
|
||||
labels = {field: field.replace("_", " ").title() for field in header}
|
||||
|
||||
|
||||
class ExperimentCSVListView(ListAPIView):
|
||||
|
|
|
@ -233,8 +233,8 @@ def generate_change_log(
|
|||
}
|
||||
|
||||
else:
|
||||
old_val = None
|
||||
for field in changed_data:
|
||||
old_val = None
|
||||
new_val = None
|
||||
if field in new_serialized_vals:
|
||||
if field in ("countries", "locales"):
|
||||
|
|
|
@ -24,7 +24,7 @@ class ExperimentConstants(object):
|
|||
|
||||
@classmethod
|
||||
def FEATURE_TYPE_CHOICES(cls): # pragma: no cover
|
||||
choices = (
|
||||
return (
|
||||
(cls.TYPE_PREF, "Pref-Flip Experiment"),
|
||||
(cls.TYPE_ADDON, "Add-On Experiment"),
|
||||
(cls.TYPE_GENERIC, "Generic Experiment"),
|
||||
|
@ -32,8 +32,6 @@ class ExperimentConstants(object):
|
|||
(cls.TYPE_MESSAGE, "Message Router Content Experiment"),
|
||||
)
|
||||
|
||||
return choices
|
||||
|
||||
# Message stuff
|
||||
MESSAGE_DEFAULT_LOCALES = ("en-AU", "en-GB", "en-CA", "en-NZ", "en-ZA", "en-US")
|
||||
MESSAGE_DEFAULT_COUNTRIES = ("US", "CA", "GB", "DE", "FR")
|
||||
|
|
|
@ -135,19 +135,22 @@ def format_and_send_html_email(
|
|||
|
||||
def send_period_ending_emails_task(experiment):
|
||||
# send experiment ending soon emails if end date is 5 days out
|
||||
if experiment.ending_soon:
|
||||
if not ExperimentEmail.objects.filter(
|
||||
if (
|
||||
experiment.ending_soon
|
||||
and not ExperimentEmail.objects.filter(
|
||||
experiment=experiment, type=ExperimentConstants.EXPERIMENT_ENDS
|
||||
).exists():
|
||||
send_experiment_ending_email(experiment)
|
||||
logging.info("Sent ending email for Experiment: {}".format(experiment))
|
||||
).exists()
|
||||
):
|
||||
send_experiment_ending_email(experiment)
|
||||
logging.info(f"Sent ending email for Experiment: {experiment}")
|
||||
# send enrollment ending emails if enrollment end
|
||||
# date is 5 days out
|
||||
if experiment.enrollment_end_date and experiment.enrollment_ending_soon:
|
||||
if not ExperimentEmail.objects.filter(
|
||||
if (
|
||||
experiment.enrollment_end_date
|
||||
and experiment.enrollment_ending_soon
|
||||
and not ExperimentEmail.objects.filter(
|
||||
experiment=experiment, type=ExperimentConstants.EXPERIMENT_PAUSES
|
||||
).exists():
|
||||
send_enrollment_pause_email(experiment)
|
||||
logging.info(
|
||||
"Sent enrollment pause email for Experiment: {}".format(experiment)
|
||||
)
|
||||
).exists()
|
||||
):
|
||||
send_enrollment_pause_email(experiment)
|
||||
logging.info(f"Sent enrollment pause email for Experiment: {experiment}")
|
||||
|
|
|
@ -182,9 +182,7 @@ class ExperimentFilterset(filters.FilterSet):
|
|||
)
|
||||
|
||||
def archived_filter(self, queryset, name, value):
|
||||
if not value:
|
||||
return queryset.exclude(archived=True)
|
||||
return queryset
|
||||
return queryset if value else queryset.exclude(archived=True)
|
||||
|
||||
def experiment_date_field_filter(self, queryset, name, value):
|
||||
# this custom method isn't doing anything. There has to
|
||||
|
@ -199,30 +197,25 @@ class ExperimentFilterset(filters.FilterSet):
|
|||
)
|
||||
|
||||
def date_range_filter(self, queryset, name, value):
|
||||
date_type = self.form.cleaned_data["experiment_date_field"]
|
||||
if date_type:
|
||||
experiment_date_field = {
|
||||
Experiment.EXPERIMENT_STARTS: "start_date",
|
||||
Experiment.EXPERIMENT_PAUSES: "enrollment_end_date",
|
||||
Experiment.EXPERIMENT_ENDS: "end_date",
|
||||
}[date_type]
|
||||
if not (date_type := self.form.cleaned_data["experiment_date_field"]):
|
||||
return queryset
|
||||
experiment_date_field = {
|
||||
Experiment.EXPERIMENT_STARTS: "start_date",
|
||||
Experiment.EXPERIMENT_PAUSES: "enrollment_end_date",
|
||||
Experiment.EXPERIMENT_ENDS: "end_date",
|
||||
}[date_type]
|
||||
|
||||
results = []
|
||||
results = []
|
||||
|
||||
for experiment in queryset.all():
|
||||
date = getattr(experiment, experiment_date_field)
|
||||
for experiment in queryset.all():
|
||||
if date := getattr(experiment, experiment_date_field):
|
||||
if value.start and date < value.start.date():
|
||||
continue
|
||||
if value.stop and date > value.stop.date():
|
||||
continue
|
||||
results.append(experiment.id)
|
||||
|
||||
# enrollment end dates are optional, so there won't always
|
||||
# be a pause date for an experiment
|
||||
if date:
|
||||
if value.start and date < value.start.date():
|
||||
continue
|
||||
if value.stop and date > value.stop.date():
|
||||
continue
|
||||
results.append(experiment.id)
|
||||
|
||||
return queryset.filter(pk__in=results)
|
||||
return queryset
|
||||
return queryset.filter(pk__in=results)
|
||||
|
||||
def in_qa_filter(self, queryset, name, value):
|
||||
if value:
|
||||
|
@ -231,10 +224,7 @@ class ExperimentFilterset(filters.FilterSet):
|
|||
return queryset
|
||||
|
||||
def surveys_filter(self, queryset, name, value):
|
||||
if value:
|
||||
return queryset.filter(survey_required=True)
|
||||
|
||||
return queryset
|
||||
return queryset.filter(survey_required=True) if value else queryset
|
||||
|
||||
def subscribed_filter(self, queryset, name, value):
|
||||
if value:
|
||||
|
@ -294,15 +284,12 @@ class ExperimentFilterset(filters.FilterSet):
|
|||
)
|
||||
|
||||
def get_project_display_value(self):
|
||||
project_ids = self.data.getlist("projects")
|
||||
|
||||
if project_ids:
|
||||
if project_ids := self.data.getlist("projects"):
|
||||
if "null" in project_ids:
|
||||
project_ids.remove("null")
|
||||
project_name_list = Project.objects.filter(id__in=project_ids).values_list(
|
||||
"name"
|
||||
)
|
||||
if project_name_list:
|
||||
if project_name_list := Project.objects.filter(
|
||||
id__in=project_ids
|
||||
).values_list("name"):
|
||||
return ", ".join(project_name[0] for project_name in project_name_list)
|
||||
return "No Projects"
|
||||
|
||||
|
|
|
@ -32,8 +32,8 @@ class JSONField(forms.CharField):
|
|||
if cleaned_value:
|
||||
try:
|
||||
json.loads(cleaned_value)
|
||||
except json.JSONDecodeError:
|
||||
raise forms.ValidationError("This is not valid JSON.")
|
||||
except json.JSONDecodeError as e:
|
||||
raise forms.ValidationError("This is not valid JSON.") from e
|
||||
|
||||
return cleaned_value
|
||||
|
||||
|
@ -43,16 +43,16 @@ class DSIssueURLField(forms.URLField):
|
|||
cleaned_value = super().clean(value)
|
||||
|
||||
if cleaned_value:
|
||||
err_str = (
|
||||
"Please Provide a Valid URL ex: {ds_url}DS-12345 or {ds_url}DO-12345"
|
||||
)
|
||||
|
||||
ds = re.match(
|
||||
re.escape(settings.DS_ISSUE_HOST) + r"(DS|DO)-(\w+.*)", cleaned_value
|
||||
)
|
||||
|
||||
if ds is None:
|
||||
|
||||
err_str = (
|
||||
"Please Provide a Valid URL ex: {ds_url}DS-12345 or {ds_url}DO-12345"
|
||||
)
|
||||
|
||||
raise forms.ValidationError(err_str.format(ds_url=settings.DS_ISSUE_HOST))
|
||||
return cleaned_value
|
||||
|
||||
|
@ -61,13 +61,12 @@ class BugzillaURLField(forms.URLField):
|
|||
def clean(self, value):
|
||||
cleaned_value = super().clean(value)
|
||||
|
||||
if cleaned_value:
|
||||
if cleaned_value and (
|
||||
settings.BUGZILLA_HOST not in cleaned_value
|
||||
or get_bugzilla_id(cleaned_value) is None
|
||||
):
|
||||
err_str = "Please Provide a Valid URL ex: {}show_bug.cgi?id=1234"
|
||||
if (
|
||||
settings.BUGZILLA_HOST not in cleaned_value
|
||||
or get_bugzilla_id(cleaned_value) is None
|
||||
):
|
||||
raise forms.ValidationError(err_str.format(settings.BUGZILLA_HOST))
|
||||
raise forms.ValidationError(err_str.format(settings.BUGZILLA_HOST))
|
||||
|
||||
return cleaned_value
|
||||
|
||||
|
@ -678,7 +677,7 @@ class ExperimentReviewForm(ExperimentConstants, ChangeLogMixin, forms.ModelForm)
|
|||
reviews = set(self.fields) - set(self.instance.get_all_required_reviews())
|
||||
|
||||
if self.instance.is_rollout:
|
||||
reviews -= set(["review_science", "review_bugzilla", "review_engineering"])
|
||||
reviews -= {"review_science", "review_bugzilla", "review_engineering"}
|
||||
|
||||
return [self[r] for r in sorted(reviews)]
|
||||
|
||||
|
@ -819,10 +818,7 @@ class ExperimentArchiveForm(ExperimentConstants, ChangeLogMixin, forms.ModelForm
|
|||
return not self.instance.archived
|
||||
|
||||
def get_changelog_message(self):
|
||||
message = "Archived Delivery"
|
||||
if not self.instance.archived:
|
||||
message = "Unarchived Delivery"
|
||||
return message
|
||||
return "Archived Delivery" if self.instance.archived else "Unarchived Delivery"
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
experiment = Experiment.objects.get(id=self.instance.id)
|
||||
|
@ -923,8 +919,8 @@ class NormandyIdForm(ChangeLogMixin, forms.ModelForm):
|
|||
return [
|
||||
int(i.strip()) for i in self.cleaned_data["other_normandy_ids"].split(",")
|
||||
]
|
||||
except ValueError:
|
||||
raise forms.ValidationError("IDs must be numbers separated by commas.")
|
||||
except ValueError as e:
|
||||
raise forms.ValidationError("IDs must be numbers separated by commas.") from e
|
||||
|
||||
class Meta:
|
||||
model = Experiment
|
||||
|
|
|
@ -378,13 +378,13 @@ class Experiment(ExperimentConstants, models.Model):
|
|||
|
||||
@property
|
||||
def format_ndt_normandy_urls(self):
|
||||
# returns a list of dictionaries containing D.C. and Normandy
|
||||
# urls for the main normandy id and other normandy ids if they exist
|
||||
normandy_recipe_url = settings.NORMANDY_API_RECIPE_URL
|
||||
ndt_recipe_url = settings.NORMANDY_DEVTOOLS_RECIPE_URL
|
||||
urls = []
|
||||
|
||||
if self.normandy_id:
|
||||
# returns a list of dictionaries containing D.C. and Normandy
|
||||
# urls for the main normandy id and other normandy ids if they exist
|
||||
normandy_recipe_url = settings.NORMANDY_API_RECIPE_URL
|
||||
ndt_recipe_url = settings.NORMANDY_DEVTOOLS_RECIPE_URL
|
||||
urls.append(
|
||||
{
|
||||
"id": self.normandy_id,
|
||||
|
@ -394,15 +394,14 @@ class Experiment(ExperimentConstants, models.Model):
|
|||
)
|
||||
|
||||
if self.other_normandy_ids:
|
||||
for norm_id in self.other_normandy_ids:
|
||||
urls.append(
|
||||
{
|
||||
"id": norm_id,
|
||||
"normandy_url": normandy_recipe_url.format(id=norm_id),
|
||||
"ndt_url": ndt_recipe_url.format(id=norm_id),
|
||||
}
|
||||
)
|
||||
|
||||
urls.extend(
|
||||
{
|
||||
"id": norm_id,
|
||||
"normandy_url": normandy_recipe_url.format(id=norm_id),
|
||||
"ndt_url": ndt_recipe_url.format(id=norm_id),
|
||||
}
|
||||
for norm_id in self.other_normandy_ids
|
||||
)
|
||||
return urls
|
||||
|
||||
@property
|
||||
|
@ -460,8 +459,7 @@ class Experiment(ExperimentConstants, models.Model):
|
|||
|
||||
@property
|
||||
def enrollment_end_date(self):
|
||||
changes = self.changes.filter(message="Enrollment Complete")
|
||||
if changes:
|
||||
if changes := self.changes.filter(message="Enrollment Complete"):
|
||||
return changes[0].changed_on.date()
|
||||
if self.proposed_enrollment:
|
||||
return self._compute_end_date(self.proposed_enrollment)
|
||||
|
@ -473,22 +471,15 @@ class Experiment(ExperimentConstants, models.Model):
|
|||
@property
|
||||
def observation_duration(self):
|
||||
if self.enrollment_end_date:
|
||||
duration = (self.end_date - self.enrollment_end_date).days
|
||||
return duration
|
||||
return (self.end_date - self.enrollment_end_date).days
|
||||
return 0
|
||||
|
||||
def _format_date(self, date):
|
||||
return date.strftime("%b %d, %Y")
|
||||
|
||||
def _format_date_string(self, start_date, end_date):
|
||||
start_text = "Unknown"
|
||||
if start_date:
|
||||
start_text = self._format_date(start_date)
|
||||
|
||||
end_text = "Unknown"
|
||||
if end_date:
|
||||
end_text = self._format_date(end_date)
|
||||
|
||||
start_text = self._format_date(start_date) if start_date else "Unknown"
|
||||
end_text = self._format_date(end_date) if end_date else "Unknown"
|
||||
day_text = "days"
|
||||
duration_text = "Unknown"
|
||||
if start_date and end_date:
|
||||
|
@ -558,10 +549,10 @@ class Experiment(ExperimentConstants, models.Model):
|
|||
date_ordered_changes = []
|
||||
for date, users in sorted(self.grouped_changes.items(), reverse=True):
|
||||
|
||||
date_changes = []
|
||||
for user, user_changes in users.items():
|
||||
date_changes.append((user, set([c for c in list(user_changes)])))
|
||||
|
||||
date_changes = [
|
||||
(user, set(list(list(user_changes))))
|
||||
for user, user_changes in users.items()
|
||||
]
|
||||
date_ordered_changes.append((date, date_changes))
|
||||
|
||||
return date_ordered_changes
|
||||
|
@ -634,11 +625,10 @@ class Experiment(ExperimentConstants, models.Model):
|
|||
def display_platforms_or_versions(self):
|
||||
if self.windows_versions:
|
||||
return ", ".join(self.windows_versions)
|
||||
else:
|
||||
if set(ExperimentConstants.PLATFORMS_LIST) == set(self.platforms):
|
||||
return ExperimentConstants.PLATFORM_ALL
|
||||
if set(ExperimentConstants.PLATFORMS_LIST) == set(self.platforms):
|
||||
return ExperimentConstants.PLATFORM_ALL
|
||||
|
||||
return ", ".join(self.platforms)
|
||||
return ", ".join(self.platforms)
|
||||
|
||||
@property
|
||||
def completed_overview(self):
|
||||
|
@ -684,7 +674,7 @@ class Experiment(ExperimentConstants, models.Model):
|
|||
@property
|
||||
def completed_addon(self):
|
||||
if self.is_branched_addon:
|
||||
return all([v.addon_release_url for v in self.variants.all()])
|
||||
return all(v.addon_release_url for v in self.variants.all())
|
||||
else:
|
||||
return self.addon_release_url
|
||||
|
||||
|
@ -721,7 +711,7 @@ class Experiment(ExperimentConstants, models.Model):
|
|||
"results_impact_notes",
|
||||
)
|
||||
|
||||
return any([getattr(self, field) for field in results_fields])
|
||||
return any(getattr(self, field) for field in results_fields)
|
||||
|
||||
@property
|
||||
def additional_results(self):
|
||||
|
@ -840,7 +830,7 @@ class Experiment(ExperimentConstants, models.Model):
|
|||
# review advisory is an exception that is not required
|
||||
required_reviews.remove("review_advisory")
|
||||
|
||||
return all([getattr(self, r) for r in required_reviews])
|
||||
return all(getattr(self, r) for r in required_reviews)
|
||||
|
||||
@property
|
||||
def completed_all_sections(self):
|
||||
|
@ -1073,10 +1063,7 @@ class ExperimentVariant(models.Model):
|
|||
|
||||
@property
|
||||
def type(self):
|
||||
if self.is_control:
|
||||
return "Control"
|
||||
else:
|
||||
return "Treatment"
|
||||
return "Control" if self.is_control else "Treatment"
|
||||
|
||||
|
||||
class Preference(models.Model):
|
||||
|
@ -1207,10 +1194,7 @@ class ExperimentChangeLog(models.Model):
|
|||
ordering = ("changed_on",)
|
||||
|
||||
def __str__(self):
|
||||
if self.message:
|
||||
return self.message
|
||||
else:
|
||||
return self.pretty_status
|
||||
return self.message or self.pretty_status
|
||||
|
||||
@property
|
||||
def pretty_status(self):
|
||||
|
|
|
@ -39,10 +39,7 @@ def pagination_url(context, page, **kwargs):
|
|||
data.pop("page", None)
|
||||
else:
|
||||
data["page"] = page
|
||||
if data:
|
||||
return f"?{data.urlencode()}"
|
||||
else:
|
||||
return "."
|
||||
return f"?{data.urlencode()}" if data else "."
|
||||
|
||||
|
||||
@register.filter
|
||||
|
|
|
@ -18,7 +18,7 @@ class TestExperimentListView(TestCase):
|
|||
def test_list_view_serializes_experiments(self):
|
||||
experiments = []
|
||||
|
||||
for i in range(3):
|
||||
for _ in range(3):
|
||||
experiment = ExperimentFactory.create_with_variants()
|
||||
experiments.append(experiment)
|
||||
|
||||
|
@ -37,11 +37,11 @@ class TestExperimentListView(TestCase):
|
|||
pending_experiments = []
|
||||
|
||||
# new experiments should be excluded
|
||||
for i in range(2):
|
||||
for _ in range(2):
|
||||
ExperimentFactory.create_with_variants()
|
||||
|
||||
# pending experiments should be included
|
||||
for i in range(3):
|
||||
for _ in range(3):
|
||||
experiment = ExperimentFactory.create_with_variants()
|
||||
experiment.status = experiment.STATUS_REVIEW
|
||||
experiment.save()
|
||||
|
|
|
@ -57,13 +57,13 @@ class TestExperimentDesignVariantBaseSerializer(TestCase):
|
|||
def test_serializer_puts_control_branch_first_and_sorts_rest_by_id(self):
|
||||
ExperimentVariantFactory.create(is_control=True)
|
||||
sorted_treatment_ids = sorted(
|
||||
[ExperimentVariantFactory.create(is_control=False).id for i in range(3)]
|
||||
[ExperimentVariantFactory.create(is_control=False).id for _ in range(3)]
|
||||
)
|
||||
serializer = ExperimentDesignVariantBaseSerializer(
|
||||
ExperimentVariant.objects.all().order_by("-id"), many=True
|
||||
)
|
||||
self.assertTrue(serializer.data[0]["is_control"])
|
||||
self.assertFalse(any([b["is_control"] for b in serializer.data[1:]]))
|
||||
self.assertFalse(any(b["is_control"] for b in serializer.data[1:]))
|
||||
self.assertEqual(sorted_treatment_ids, [b["id"] for b in serializer.data[1:]])
|
||||
|
||||
|
||||
|
@ -496,7 +496,7 @@ class TestExperimentDesignBaseSerializer(MockRequestMixin, TestCase):
|
|||
|
||||
self.assertEqual(experiment.variants.all().count(), 2)
|
||||
self.assertEqual(
|
||||
set(experiment.variants.all()), set([control_variant, treatment2_variant])
|
||||
set(experiment.variants.all()), {control_variant, treatment2_variant}
|
||||
)
|
||||
|
||||
def test_serializer_adds_new_variant(self):
|
||||
|
@ -543,7 +543,7 @@ class TestExperimentDesignBaseSerializer(MockRequestMixin, TestCase):
|
|||
new_variant = ExperimentVariant.objects.get(name=treatment2_variant_data["name"])
|
||||
self.assertEqual(
|
||||
set(experiment.variants.all()),
|
||||
set([control_variant, treatment1_variant, new_variant]),
|
||||
{control_variant, treatment1_variant, new_variant},
|
||||
)
|
||||
|
||||
def test_serializer_rejects_ratio_not_100(self):
|
||||
|
@ -756,7 +756,7 @@ class TestExperimentDesignPrefSerializer(MockRequestMixin, TestCase):
|
|||
serializer = ExperimentDesignPrefSerializer(instance=experiment, data=data)
|
||||
|
||||
self.assertFalse(serializer.is_valid())
|
||||
self.assertEqual(set(serializer.errors), set(["pref_type"]))
|
||||
self.assertEqual(set(serializer.errors), {"pref_type"})
|
||||
experiment = Experiment.objects.get(id=experiment.id)
|
||||
self.assertEqual(experiment.changes.count(), 0)
|
||||
|
||||
|
@ -773,7 +773,7 @@ class TestExperimentDesignPrefSerializer(MockRequestMixin, TestCase):
|
|||
serializer = ExperimentDesignPrefSerializer(instance=experiment, data=data)
|
||||
|
||||
self.assertFalse(serializer.is_valid())
|
||||
self.assertEqual(set(serializer.errors), set(["pref_branch"]))
|
||||
self.assertEqual(set(serializer.errors), {"pref_branch"})
|
||||
|
||||
def test_serializer_rejects_inconsistent_pref_type_bool(self):
|
||||
experiment = ExperimentFactory.create(type=ExperimentConstants.TYPE_PREF)
|
||||
|
|
|
@ -209,7 +209,7 @@ class ExperimentFactory(ExperimentConstants, factory.django.DjangoModelFactory):
|
|||
return
|
||||
|
||||
if extracted is None:
|
||||
extracted = [ProjectFactory.create() for i in range(3)]
|
||||
extracted = [ProjectFactory.create() for _ in range(3)]
|
||||
|
||||
self.projects.add(*extracted)
|
||||
|
||||
|
@ -225,7 +225,7 @@ class BaseExperimentVariantFactory(factory.django.DjangoModelFactory):
|
|||
|
||||
@factory.lazy_attribute
|
||||
def addon_release_url(self):
|
||||
return "https://www.example.com/{}-release.xpi".format(slugify(self.name))
|
||||
return f"https://www.example.com/{slugify(self.name)}-release.xpi"
|
||||
|
||||
class Meta:
|
||||
model = ExperimentVariant
|
||||
|
|
|
@ -55,14 +55,12 @@ class TestIntentToShipEmail(TestCase):
|
|||
self.assertEqual(sent_email.from_email, sender)
|
||||
self.assertEqual(
|
||||
set(sent_email.recipients()),
|
||||
set(
|
||||
[
|
||||
release_drivers,
|
||||
experiment.owner.email,
|
||||
experiment.analysis_owner,
|
||||
"smith@example.com",
|
||||
]
|
||||
),
|
||||
{
|
||||
release_drivers,
|
||||
experiment.owner.email,
|
||||
experiment.analysis_owner,
|
||||
"smith@example.com",
|
||||
},
|
||||
)
|
||||
self.assertTrue(
|
||||
experiment.emails.filter(
|
||||
|
@ -116,14 +114,12 @@ class TestIntentToShipEmail(TestCase):
|
|||
self.assertEqual(sent_email.from_email, sender)
|
||||
self.assertEqual(
|
||||
set(sent_email.recipients()),
|
||||
set(
|
||||
[
|
||||
release_drivers,
|
||||
experiment.owner.email,
|
||||
experiment.analysis_owner,
|
||||
"smith@example.com",
|
||||
]
|
||||
),
|
||||
{
|
||||
release_drivers,
|
||||
experiment.owner.email,
|
||||
experiment.analysis_owner,
|
||||
"smith@example.com",
|
||||
},
|
||||
)
|
||||
|
||||
def format_locales(self, experiment):
|
||||
|
|
|
@ -28,7 +28,7 @@ class TestExperimentFilterset(MockRequestMixin, TestCase):
|
|||
queryset=Experiment.objects.all(),
|
||||
)
|
||||
self.assertTrue(filter.is_valid())
|
||||
self.assertEqual(set(filter.qs), set([pref, addon]))
|
||||
self.assertEqual(set(filter.qs), {pref, addon})
|
||||
self.assertEqual(filter.get_type_display_value(), "Pref-Flip, Add-On")
|
||||
|
||||
def test_filters_by_no_project_type(self):
|
||||
|
@ -65,10 +65,10 @@ class TestExperimentFilterset(MockRequestMixin, TestCase):
|
|||
self.assertIn(project2.name, display_value)
|
||||
|
||||
def test_filters_out_archived_by_default(self):
|
||||
for i in range(3):
|
||||
for _ in range(3):
|
||||
ExperimentFactory.create_with_status(Experiment.STATUS_DRAFT, archived=False)
|
||||
|
||||
for i in range(3):
|
||||
for _ in range(3):
|
||||
ExperimentFactory.create_with_status(Experiment.STATUS_DRAFT, archived=True)
|
||||
|
||||
filter = ExperimentFilterset(data={}, queryset=Experiment.objects.all())
|
||||
|
@ -76,10 +76,10 @@ class TestExperimentFilterset(MockRequestMixin, TestCase):
|
|||
self.assertEqual(set(filter.qs), set(Experiment.objects.filter(archived=False)))
|
||||
|
||||
def test_allows_archived_if_True(self):
|
||||
for i in range(3):
|
||||
for _ in range(3):
|
||||
ExperimentFactory.create_with_status(Experiment.STATUS_DRAFT, archived=False)
|
||||
|
||||
for i in range(3):
|
||||
for _ in range(3):
|
||||
ExperimentFactory.create_with_status(Experiment.STATUS_DRAFT, archived=True)
|
||||
|
||||
filter = ExperimentFilterset(
|
||||
|
@ -91,7 +91,7 @@ class TestExperimentFilterset(MockRequestMixin, TestCase):
|
|||
def test_filters_by_owner(self):
|
||||
owner = UserFactory.create()
|
||||
|
||||
for i in range(3):
|
||||
for _ in range(3):
|
||||
ExperimentFactory.create_with_status(Experiment.STATUS_DRAFT, owner=owner)
|
||||
ExperimentFactory.create_with_status(Experiment.STATUS_DRAFT)
|
||||
|
||||
|
@ -103,7 +103,7 @@ class TestExperimentFilterset(MockRequestMixin, TestCase):
|
|||
self.assertEqual(filter.get_owner_display_value(), str(owner))
|
||||
|
||||
def test_filters_by_status(self):
|
||||
for i in range(3):
|
||||
for _ in range(3):
|
||||
ExperimentFactory.create_with_status(Experiment.STATUS_DRAFT)
|
||||
ExperimentFactory.create_with_status(Experiment.STATUS_REVIEW)
|
||||
|
||||
|
@ -136,13 +136,13 @@ class TestExperimentFilterset(MockRequestMixin, TestCase):
|
|||
filter = ExperimentFilterset(
|
||||
{"firefox_version": "59.0"}, queryset=Experiment.objects.all()
|
||||
)
|
||||
self.assertEqual(set(filter.qs), set([exp_1, exp_2, exp_3]))
|
||||
self.assertEqual(set(filter.qs), {exp_1, exp_2, exp_3})
|
||||
|
||||
def test_filters_by_firefox_channel(self):
|
||||
include_channel = Experiment.CHANNEL_CHOICES[1][0]
|
||||
exclude_channel = Experiment.CHANNEL_CHOICES[2][0]
|
||||
|
||||
for i in range(3):
|
||||
for _ in range(3):
|
||||
ExperimentFactory.create_with_variants(firefox_channel=include_channel)
|
||||
ExperimentFactory.create_with_variants(firefox_channel=exclude_channel)
|
||||
|
||||
|
@ -219,8 +219,8 @@ class TestExperimentFilterset(MockRequestMixin, TestCase):
|
|||
**{settings.OPENIDC_EMAIL_HEADER: user_email},
|
||||
).context[0]
|
||||
|
||||
self.assertEqual(set(first_response_context["experiments"]), set([exp_1, exp_2]))
|
||||
self.assertEqual(set(second_response_context["experiments"]), set([exp_3]))
|
||||
self.assertEqual(set(first_response_context["experiments"]), {exp_1, exp_2})
|
||||
self.assertEqual(set(second_response_context["experiments"]), {exp_3})
|
||||
|
||||
def test_filters_by_review_in_qa(self):
|
||||
exp_1 = ExperimentFactory.create_with_variants(
|
||||
|
@ -231,7 +231,7 @@ class TestExperimentFilterset(MockRequestMixin, TestCase):
|
|||
|
||||
filter = ExperimentFilterset({"in_qa": "on"}, queryset=Experiment.objects.all())
|
||||
|
||||
self.assertEqual(set(filter.qs), set([exp_1]))
|
||||
self.assertEqual(set(filter.qs), {exp_1})
|
||||
|
||||
def test_filters_experiments_with_surveys(self):
|
||||
exp_1 = ExperimentFactory.create_with_variants(survey_required=True)
|
||||
|
@ -242,7 +242,7 @@ class TestExperimentFilterset(MockRequestMixin, TestCase):
|
|||
|
||||
filter = ExperimentFilterset({"surveys": "on"}, queryset=Experiment.objects.all())
|
||||
|
||||
self.assertEqual(set(filter.qs), set([exp_1, exp_2]))
|
||||
self.assertEqual(set(filter.qs), {exp_1, exp_2})
|
||||
|
||||
def test_filters_for_subscribed_experiments(self):
|
||||
exp_1 = ExperimentFactory.create(name="Experiment", slug="experiment")
|
||||
|
@ -288,7 +288,7 @@ class TestExperimentFilterset(MockRequestMixin, TestCase):
|
|||
{"longrunning": "on"}, request=self.request, queryset=Experiment.objects.all()
|
||||
)
|
||||
|
||||
self.assertEqual(set(filter.qs), set([exp_1, exp_2]))
|
||||
self.assertEqual(set(filter.qs), {exp_1, exp_2})
|
||||
|
||||
def test_filters_for_results_completed(self):
|
||||
exp1 = ExperimentFactory.create(results_url="https://example.com")
|
||||
|
@ -358,7 +358,7 @@ class TestExperimentFilterset(MockRequestMixin, TestCase):
|
|||
}
|
||||
)
|
||||
|
||||
self.assertEqual(set(filter.qs), set([self.exp_1]))
|
||||
self.assertEqual(set(filter.qs), {self.exp_1})
|
||||
self.assertEqual(
|
||||
filter.get_display_start_date_info(),
|
||||
"starting between 2019-04-01 and 2019-05-01",
|
||||
|
@ -375,7 +375,7 @@ class TestExperimentFilterset(MockRequestMixin, TestCase):
|
|||
}
|
||||
)
|
||||
|
||||
self.assertEqual(set(filter.qs), set([self.exp_1, self.exp_2]))
|
||||
self.assertEqual(set(filter.qs), {self.exp_1, self.exp_2})
|
||||
self.assertEqual(
|
||||
filter.get_display_start_date_info(),
|
||||
"pausing between 2019-04-01 and 2019-05-01",
|
||||
|
@ -392,7 +392,7 @@ class TestExperimentFilterset(MockRequestMixin, TestCase):
|
|||
}
|
||||
)
|
||||
|
||||
self.assertEqual(set(filter.qs), set([self.exp_2, self.exp_4]))
|
||||
self.assertEqual(set(filter.qs), {self.exp_2, self.exp_4})
|
||||
self.assertEqual(
|
||||
filter.get_display_start_date_info(),
|
||||
"ending between 2019-04-01 and 2019-05-01",
|
||||
|
@ -409,7 +409,7 @@ class TestExperimentFilterset(MockRequestMixin, TestCase):
|
|||
}
|
||||
)
|
||||
|
||||
self.assertEqual(set(filter.qs), set([self.exp_1, self.exp_3]))
|
||||
self.assertEqual(set(filter.qs), {self.exp_1, self.exp_3})
|
||||
self.assertEqual(
|
||||
filter.get_display_start_date_info(), "starting after 2019-04-01"
|
||||
)
|
||||
|
@ -425,7 +425,7 @@ class TestExperimentFilterset(MockRequestMixin, TestCase):
|
|||
}
|
||||
)
|
||||
|
||||
self.assertEqual(set(filter.qs), set([self.exp_1, self.exp_2, self.exp_4]))
|
||||
self.assertEqual(set(filter.qs), {self.exp_1, self.exp_2, self.exp_4})
|
||||
self.assertEqual(
|
||||
filter.get_display_start_date_info(), "starting before 2019-05-01"
|
||||
)
|
||||
|
@ -441,15 +441,13 @@ class TestExperimentFilterset(MockRequestMixin, TestCase):
|
|||
}
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
set(filter.qs), set([self.exp_1, self.exp_2, self.exp_3, self.exp_4])
|
||||
)
|
||||
self.assertEqual(set(filter.qs), {self.exp_1, self.exp_2, self.exp_3, self.exp_4})
|
||||
|
||||
def test_filters_by_analysis_owner(self):
|
||||
user = UserFactory.create()
|
||||
experiment = ExperimentFactory.create(analysis_owner=user)
|
||||
filter = ExperimentFilterset(data={"analysis_owner": user.id})
|
||||
self.assertEqual(set(filter.qs), set([experiment]))
|
||||
self.assertEqual(set(filter.qs), {experiment})
|
||||
|
||||
def test_filter_by_analysis_owner_invalid_for_non_analysis_owner(self):
|
||||
user = UserFactory.create()
|
||||
|
|
|
@ -684,19 +684,17 @@ class TestExperimentReviewForm(
|
|||
form = ExperimentReviewForm(request=self.request, data={}, instance=experiment)
|
||||
|
||||
self.assertEqual(
|
||||
set([f.name for f in form.required_reviews]),
|
||||
set(
|
||||
[
|
||||
"review_science",
|
||||
"review_advisory",
|
||||
"review_engineering",
|
||||
"review_qa_requested",
|
||||
"review_intent_to_ship",
|
||||
"review_bugzilla",
|
||||
"review_qa",
|
||||
"review_relman",
|
||||
]
|
||||
),
|
||||
{f.name for f in form.required_reviews},
|
||||
{
|
||||
"review_science",
|
||||
"review_advisory",
|
||||
"review_engineering",
|
||||
"review_qa_requested",
|
||||
"review_intent_to_ship",
|
||||
"review_bugzilla",
|
||||
"review_qa",
|
||||
"review_relman",
|
||||
},
|
||||
)
|
||||
|
||||
def test_required_reviews_when_a_risk_partner_related_is_true(self):
|
||||
|
@ -753,16 +751,14 @@ class TestExperimentReviewForm(
|
|||
form = ExperimentReviewForm(self.request, instance=experiment)
|
||||
|
||||
self.assertEqual(
|
||||
set([f.name for f in form.required_reviews]),
|
||||
set(
|
||||
[
|
||||
"review_qa",
|
||||
"review_intent_to_ship",
|
||||
"review_qa_requested",
|
||||
"review_advisory",
|
||||
"review_relman",
|
||||
]
|
||||
),
|
||||
{f.name for f in form.required_reviews},
|
||||
{
|
||||
"review_qa",
|
||||
"review_intent_to_ship",
|
||||
"review_qa_requested",
|
||||
"review_advisory",
|
||||
"review_relman",
|
||||
},
|
||||
)
|
||||
|
||||
def test_optional_reviews_for_rollout(self):
|
||||
|
@ -771,18 +767,16 @@ class TestExperimentReviewForm(
|
|||
form = ExperimentReviewForm(self.request, instance=experiment)
|
||||
|
||||
self.assertEqual(
|
||||
set([f.name for f in form.optional_reviews]),
|
||||
set(
|
||||
[
|
||||
"review_impacted_teams",
|
||||
"review_ux",
|
||||
"review_legal",
|
||||
"review_security",
|
||||
"review_vp",
|
||||
"review_comms",
|
||||
"review_data_steward",
|
||||
]
|
||||
),
|
||||
{f.name for f in form.optional_reviews},
|
||||
{
|
||||
"review_impacted_teams",
|
||||
"review_ux",
|
||||
"review_legal",
|
||||
"review_security",
|
||||
"review_vp",
|
||||
"review_comms",
|
||||
"review_data_steward",
|
||||
},
|
||||
)
|
||||
|
||||
def test_cannot_check_review_relman_without_permissions(self):
|
||||
|
|
|
@ -741,35 +741,35 @@ class TestExperimentModel(TestCase):
|
|||
|
||||
self.assertEqual(
|
||||
set(experiment.grouped_changes.keys()),
|
||||
set([date1.date(), date2.date(), date3.date()]),
|
||||
{date1.date(), date2.date(), date3.date()},
|
||||
)
|
||||
self.assertEqual(
|
||||
set(experiment.grouped_changes[date1.date()].keys()), set([user1, user2])
|
||||
set(experiment.grouped_changes[date1.date()].keys()), {user1, user2}
|
||||
)
|
||||
self.assertEqual(
|
||||
set(experiment.grouped_changes[date2.date()].keys()), set([user2, user3])
|
||||
set(experiment.grouped_changes[date2.date()].keys()), {user2, user3}
|
||||
)
|
||||
self.assertEqual(
|
||||
set(experiment.grouped_changes[date3.date()].keys()),
|
||||
set([user1, user2, user3]),
|
||||
{user1, user2, user3},
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
experiment.grouped_changes[date1.date()][user1],
|
||||
set([change1, change2, change3]),
|
||||
{change1, change2, change3},
|
||||
)
|
||||
self.assertEqual(experiment.grouped_changes[date1.date()][user2], set([change4]))
|
||||
self.assertEqual(experiment.grouped_changes[date1.date()][user2], {change4})
|
||||
|
||||
self.assertEqual(experiment.grouped_changes[date2.date()][user2], set([change5]))
|
||||
self.assertEqual(experiment.grouped_changes[date2.date()][user2], {change5})
|
||||
self.assertEqual(
|
||||
experiment.grouped_changes[date2.date()][user3], set([change6, change7])
|
||||
experiment.grouped_changes[date2.date()][user3], {change6, change7}
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
experiment.grouped_changes[date3.date()][user1], set([change8, change9])
|
||||
experiment.grouped_changes[date3.date()][user1], {change8, change9}
|
||||
)
|
||||
self.assertEqual(experiment.grouped_changes[date3.date()][user2], set([change10]))
|
||||
self.assertEqual(experiment.grouped_changes[date3.date()][user3], set([change11]))
|
||||
self.assertEqual(experiment.grouped_changes[date3.date()][user2], {change10})
|
||||
self.assertEqual(experiment.grouped_changes[date3.date()][user3], {change11})
|
||||
|
||||
def test_ordered_changes_orders_by_date(self):
|
||||
experiment = ExperimentFactory.create()
|
||||
|
@ -817,22 +817,22 @@ class TestExperimentModel(TestCase):
|
|||
)
|
||||
|
||||
expected_changes = {
|
||||
date1.date(): {user1: set([a, b]), user2: set([c])},
|
||||
date2.date(): {user2: set([d]), user3: set([e, f])},
|
||||
date3.date(): {user1: set([g, h]), user2: set([i]), user3: set([j])},
|
||||
date1.date(): {user1: {a, b}, user2: {c}},
|
||||
date2.date(): {user2: {d}, user3: {e, f}},
|
||||
date3.date(): {user1: {g, h}, user2: {i}, user3: {j}},
|
||||
}
|
||||
|
||||
ordered_dates = [date for date, changes in experiment.ordered_changes]
|
||||
self.assertEqual(ordered_dates, [date3.date(), date2.date(), date1.date()])
|
||||
|
||||
day3_users = [user for user, user_changes in experiment.ordered_changes[0][1]]
|
||||
self.assertEqual(set(day3_users), set([user1, user2, user3]))
|
||||
self.assertEqual(set(day3_users), {user1, user2, user3})
|
||||
|
||||
day2_users = [user for user, user_changes in experiment.ordered_changes[1][1]]
|
||||
self.assertEqual(set(day2_users), set([user2, user3]))
|
||||
self.assertEqual(set(day2_users), {user2, user3})
|
||||
|
||||
day1_users = [user for user, user_changes in experiment.ordered_changes[2][1]]
|
||||
self.assertEqual(set(day1_users), set([user1, user2]))
|
||||
self.assertEqual(set(day1_users), {user1, user2})
|
||||
|
||||
for date, date_changes in experiment.ordered_changes:
|
||||
for user, user_changes in date_changes:
|
||||
|
|
|
@ -32,7 +32,7 @@ class TestExperimentListView(TestCase):
|
|||
# Archived experiment is ommitted
|
||||
ExperimentFactory.create_with_status(Experiment.STATUS_DRAFT, archived=True)
|
||||
|
||||
for i in range(3):
|
||||
for _ in range(3):
|
||||
ExperimentFactory.create_with_status(
|
||||
random.choice(Experiment.STATUS_CHOICES)[0]
|
||||
)
|
||||
|
@ -57,7 +57,7 @@ class TestExperimentListView(TestCase):
|
|||
# Archived experiment is included
|
||||
ExperimentFactory.create_with_status(Experiment.STATUS_DRAFT, archived=True)
|
||||
|
||||
for i in range(3):
|
||||
for _ in range(3):
|
||||
ExperimentFactory.create_with_status(
|
||||
random.choice(Experiment.STATUS_CHOICES)[0]
|
||||
)
|
||||
|
@ -85,7 +85,7 @@ class TestExperimentListView(TestCase):
|
|||
filtered_types = (Experiment.TYPE_PREF, Experiment.TYPE_GENERIC)
|
||||
filtered_version = Experiment.VERSION_CHOICES[1][0]
|
||||
|
||||
for i in range(3):
|
||||
for _ in range(3):
|
||||
for filtered_type in filtered_types:
|
||||
ExperimentFactory.create_with_status(
|
||||
firefox_channel=filtered_channel,
|
||||
|
@ -95,7 +95,7 @@ class TestExperimentListView(TestCase):
|
|||
type=filtered_type,
|
||||
)
|
||||
|
||||
for i in range(3):
|
||||
for _ in range(3):
|
||||
ExperimentFactory.create_with_status(
|
||||
random.choice(Experiment.STATUS_CHOICES)[0]
|
||||
)
|
||||
|
@ -134,7 +134,7 @@ class TestExperimentListView(TestCase):
|
|||
user_email = "user@example.com"
|
||||
|
||||
number_of_experiments = settings.EXPERIMENTS_PAGINATE_BY + 1
|
||||
for i in range(number_of_experiments):
|
||||
for _ in range(number_of_experiments):
|
||||
ExperimentFactory.create_with_status(
|
||||
random.choice(Experiment.STATUS_CHOICES)[0]
|
||||
)
|
||||
|
|
|
@ -21,22 +21,22 @@ class NormandyDecodeError(NormandyError):
|
|||
message = "Error parsing JSON Normandy Response"
|
||||
|
||||
|
||||
def make_normandy_call(url, params={}):
|
||||
def make_normandy_call(url, params=None):
|
||||
if params is None:
|
||||
params = {}
|
||||
try:
|
||||
response = requests.get(url, verify=(not settings.DEBUG), params=params)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
except requests.exceptions.HTTPError as e:
|
||||
logging.exception(
|
||||
"Normandy API returned Nonsuccessful Response Code: {}".format(e)
|
||||
)
|
||||
raise NonsuccessfulNormandyCall(*e.args)
|
||||
logging.exception(f"Normandy API returned Nonsuccessful Response Code: {e}")
|
||||
raise NonsuccessfulNormandyCall(*e.args) from e
|
||||
except requests.exceptions.RequestException as e:
|
||||
logging.exception("Error calling Normandy API: {}".format(e))
|
||||
raise APINormandyError(*e.args)
|
||||
logging.exception(f"Error calling Normandy API: {e}")
|
||||
raise APINormandyError(*e.args) from e
|
||||
except ValueError as e:
|
||||
logging.exception("Error parsing JSON Normandy response: {}".format(e))
|
||||
raise NormandyDecodeError(*e.args)
|
||||
logging.exception(f"Error parsing JSON Normandy response: {e}")
|
||||
raise NormandyDecodeError(*e.args) from e
|
||||
|
||||
|
||||
def get_recipe(recipe_id):
|
||||
|
@ -59,8 +59,7 @@ def get_recipe_state_enabler(recipe_data):
|
|||
|
||||
enabled_states = recipe_data.get("enabled_states", [])
|
||||
if len(enabled_states) > 0:
|
||||
creator = enabled_states[0].get("creator")
|
||||
if creator:
|
||||
if creator := enabled_states[0].get("creator"):
|
||||
enabler_email = creator.get("email")
|
||||
|
||||
enabler, _ = get_user_model().objects.get_or_create(
|
||||
|
|
|
@ -44,7 +44,7 @@ def update_recipe_ids_to_experiments():
|
|||
|
||||
for experiment in ready_to_ship_experiments:
|
||||
try:
|
||||
logger.info("Updating Experiment: {}".format(experiment))
|
||||
logger.info(f"Updating Experiment: {experiment}")
|
||||
recipe_data = normandy.get_recipe_list(experiment.slug)
|
||||
|
||||
if len(recipe_data):
|
||||
|
@ -82,7 +82,7 @@ def update_launched_experiments():
|
|||
|
||||
for experiment in launched_experiments:
|
||||
try:
|
||||
logger.info("Updating Experiment: {}".format(experiment))
|
||||
logger.info(f"Updating Experiment: {experiment}")
|
||||
if experiment.normandy_id:
|
||||
recipe_data = normandy.get_recipe(experiment.normandy_id)
|
||||
|
||||
|
@ -103,9 +103,7 @@ def update_launched_experiments():
|
|||
send_period_ending_emails_task(experiment)
|
||||
|
||||
else:
|
||||
logger.info(
|
||||
"Skipping Experiment: {}. No Normandy id found".format(experiment)
|
||||
)
|
||||
logger.info(f"Skipping Experiment: {experiment}. No Normandy id found")
|
||||
except (IntegrityError, KeyError, normandy.NormandyError) as e:
|
||||
logger.info(f"Failed to update Experiment {experiment}: {e}")
|
||||
metrics.incr("update_launched_experiments.failed")
|
||||
|
@ -214,12 +212,12 @@ def update_population_percent(experiment, recipe_data, filter_objects):
|
|||
|
||||
|
||||
def update_firefox_versions(experiment, recipe_data, filter_objects):
|
||||
changed_data = {}
|
||||
|
||||
if versions := filter_objects.get("version"):
|
||||
|
||||
min_version = str(float(min(versions["versions"])))
|
||||
max_version = str(float(max(versions["versions"])))
|
||||
changed_data = {}
|
||||
|
||||
if experiment.firefox_min_version != min_version:
|
||||
changed_data["firefox_min_version"] = min_version
|
||||
if experiment.firefox_max_version != max_version:
|
||||
|
|
|
@ -139,7 +139,7 @@ class TestFilterObjectLocaleSerializer(TestCase):
|
|||
experiment = ExperimentFactory.create(locales=[locale1, locale2])
|
||||
serializer = FilterObjectLocaleSerializer(experiment)
|
||||
self.assertEqual(serializer.data["type"], "locale")
|
||||
self.assertEqual(set(serializer.data["locales"]), set(["ab", "cd"]))
|
||||
self.assertEqual(set(serializer.data["locales"]), {"ab", "cd"})
|
||||
|
||||
|
||||
class TestFilterObjectCountrySerializer(TestCase):
|
||||
|
@ -149,7 +149,7 @@ class TestFilterObjectCountrySerializer(TestCase):
|
|||
experiment = ExperimentFactory.create(countries=[country1, country2])
|
||||
serializer = FilterObjectCountrySerializer(experiment)
|
||||
self.assertEqual(serializer.data["type"], "country")
|
||||
self.assertEqual(set(serializer.data["countries"]), set(["ab", "cd"]))
|
||||
self.assertEqual(set(serializer.data["countries"]), {"ab", "cd"})
|
||||
|
||||
|
||||
class TestExperimentRecipeAddonVariantSerializer(TestCase):
|
||||
|
|
|
@ -8,7 +8,7 @@ class TestNotificationModel(TestCase):
|
|||
def test_has_unread_false_when_all_read(self):
|
||||
user = UserFactory.create()
|
||||
|
||||
for i in range(3):
|
||||
for _ in range(3):
|
||||
NotificationFactory.create(user=user, read=True)
|
||||
|
||||
self.assertFalse(user.notifications.has_unread)
|
||||
|
@ -23,14 +23,12 @@ class TestNotificationModel(TestCase):
|
|||
def test_get_unread_returns_unread_and_marks_as_read(self):
|
||||
user = UserFactory.create()
|
||||
|
||||
unread_notifications = []
|
||||
for i in range(3):
|
||||
unread_notifications.append(NotificationFactory.create(user=user, read=False))
|
||||
|
||||
read_notifications = []
|
||||
for i in range(3):
|
||||
read_notifications.append(NotificationFactory.create(user=user, read=True))
|
||||
|
||||
unread_notifications = [
|
||||
NotificationFactory.create(user=user, read=False) for _ in range(3)
|
||||
]
|
||||
read_notifications = [
|
||||
NotificationFactory.create(user=user, read=True) for _ in range(3)
|
||||
]
|
||||
self.assertTrue(user.notifications.has_unread)
|
||||
|
||||
notifications = user.notifications.get_unread()
|
||||
|
@ -45,7 +43,7 @@ class TestNotificationModel(TestCase):
|
|||
|
||||
user1_notifications = []
|
||||
user2_notifications = []
|
||||
for i in range(3):
|
||||
for _ in range(3):
|
||||
user1_notifications.append(NotificationFactory.create(user=user1))
|
||||
user2_notifications.append(NotificationFactory.create(user=user2))
|
||||
|
||||
|
|
|
@ -54,9 +54,7 @@ class OpenIDCAuthMiddleware(AuthenticationMiddleware):
|
|||
|
||||
class OpenIDCRestFrameworkAuthenticator(SessionAuthentication):
|
||||
def authenticate(self, request):
|
||||
authenticated_user = getattr(request._request, "user", None)
|
||||
|
||||
if authenticated_user:
|
||||
if authenticated_user := getattr(request._request, "user", None):
|
||||
return (authenticated_user, None)
|
||||
|
||||
return super().authenticate(request)
|
||||
|
|
|
@ -9,7 +9,7 @@ faker = FakerFactory.create()
|
|||
|
||||
class ProjectFactory(factory.django.DjangoModelFactory):
|
||||
name = factory.LazyAttribute(lambda o: faker.catch_phrase())
|
||||
slug = factory.LazyAttribute(lambda o: "{}_".format(slugify(o.name)))
|
||||
slug = factory.LazyAttribute(lambda o: f"{slugify(o.name)}_")
|
||||
|
||||
class Meta:
|
||||
model = Project
|
||||
|
|
|
@ -9,8 +9,8 @@ from experimenter.targeting.constants import TargetingConstants
|
|||
class TestTargetingConfigs(TestCase):
|
||||
def test_all_targeting_configs_defined_in_constants(self):
|
||||
self.assertEqual(
|
||||
set([t.value for t in TargetingConstants.TargetingConfig]),
|
||||
set(t for t in TargetingConstants.TARGETING_CONFIGS.keys()),
|
||||
{t.value for t in TargetingConstants.TargetingConfig},
|
||||
set(TargetingConstants.TARGETING_CONFIGS.keys()),
|
||||
"Targeting Configs must be defined in both "
|
||||
"TargetingConstants.TargetingConfig and TargetingConstants.TARGETING_CONFIGS",
|
||||
)
|
||||
|
@ -21,4 +21,6 @@ class TestTargetingConfigs(TestCase):
|
|||
try:
|
||||
JEXLParser().parse(targeting_config.targeting)
|
||||
except ParseError as e:
|
||||
raise Exception(f"JEXL Parse error in {targeting_config.name}: {e}")
|
||||
raise Exception(
|
||||
f"JEXL Parse error in {targeting_config.name}: {e}"
|
||||
) from e
|
||||
|
|
Загрузка…
Ссылка в новой задаче