Skip to content

Commit

Permalink
Use bulk database operations during tests
Browse files Browse the repository at this point in the history
In some places, we can not use _bulk_create because baker doesn't create
M2M-entries then.

Tests are about 5 seconds (8%) faster with a hotpatched baker regarding
model-bakers/model_bakery#297
  • Loading branch information
he3lixxx committed Mar 21, 2022
1 parent 1d1d18f commit 8b7ca1b
Show file tree
Hide file tree
Showing 10 changed files with 166 additions and 179 deletions.
11 changes: 3 additions & 8 deletions evap/evaluation/tests/test_auth.py
Expand Up @@ -27,16 +27,11 @@ def setUpTestData(cls):
baker.make(
Contribution,
evaluation=evaluation,
contributor=cls.external_user,
role=Contribution.Role.EDITOR,
textanswer_visibility=Contribution.TextAnswerVisibility.GENERAL_TEXTANSWERS,
)
baker.make(
Contribution,
evaluation=evaluation,
contributor=cls.inactive_external_user,
contributor=iter([cls.external_user, cls.inactive_external_user]),
role=Contribution.Role.EDITOR,
textanswer_visibility=Contribution.TextAnswerVisibility.GENERAL_TEXTANSWERS,
_quantity=2,
_bulk_create=True,
)

@override_settings(PAGE_URL="https://example.com")
Expand Down
41 changes: 15 additions & 26 deletions evap/evaluation/tests/test_commands.py
Expand Up @@ -27,7 +27,7 @@
TextAnswer,
UserProfile,
)
from evap.evaluation.tests.tools import make_manager
from evap.evaluation.tests.tools import make_manager, make_rating_answer_counters


class TestAnonymizeCommand(TestCase):
Expand Down Expand Up @@ -109,16 +109,14 @@ def setUp(self):
self.addCleanup(self.input_patch.stop)

def test_no_empty_rating_answer_counters_left(self):
counters = []
for question in chain(self.contributor_questions, self.general_questions):
choices = [choice for choice in CHOICES[question.type].values if choice != NO_ANSWER]
for answer in choices:
baker.make(
RatingAnswerCounter, question=question, contribution=self.contribution, count=1, answer=answer
)
counts = [1 for choice in CHOICES[question.type].values if choice != NO_ANSWER]
counters.extend(make_rating_answer_counters(question, self.contribution, counts, False))
RatingAnswerCounter.objects.bulk_create(counters)

old_count = RatingAnswerCounter.objects.count()

random.seed(0)
management.call_command("anonymize", stdout=StringIO())

new_count = RatingAnswerCounter.objects.count()
Expand All @@ -133,15 +131,13 @@ def test_question_with_no_answers(self):

def test_answer_count_unchanged(self):
answers_per_question = defaultdict(int)
random.seed(0)

counters = []
for question in chain(self.contributor_questions, self.general_questions):
choices = [choice for choice in CHOICES[question.type].values if choice != NO_ANSWER]
for answer in choices:
count = random.randint(10, 100) # nosec
baker.make(
RatingAnswerCounter, question=question, contribution=self.contribution, count=count, answer=answer
)
answers_per_question[question] += count
counts = [random.randint(10, 100) for choice in CHOICES[question.type].values if choice != NO_ANSWER]
counters.extend(make_rating_answer_counters(question, self.contribution, counts, False))
answers_per_question[question] += sum(counts)
RatingAnswerCounter.objects.bulk_create(counters)

management.call_command("anonymize", stdout=StringIO())

Expand All @@ -157,17 +153,10 @@ def test_single_result_anonymization(self):

answer_count_before = 0
choices = [choice for choice in CHOICES[question.type].values if choice != NO_ANSWER]
random.seed(0)
for answer in choices:
count = random.randint(50, 100) # nosec
baker.make(
RatingAnswerCounter,
question=question,
contribution=single_result.general_contribution,
count=count,
answer=answer,
)
answer_count_before += count

answer_counts = [random.randint(50, 100) for answer in choices]
answer_count_before = sum(answer_counts)
make_rating_answer_counters(question, single_result.general_contribution, answer_counts)

management.call_command("anonymize", stdout=StringIO())

Expand Down
51 changes: 12 additions & 39 deletions evap/evaluation/tests/test_models.py
Expand Up @@ -328,22 +328,10 @@ def test_hidden_textanswers_get_deleted_on_publish(self):
TextAnswer,
question=question,
contribution=evaluation.general_contribution,
answer="hidden",
state=TextAnswer.State.HIDDEN,
)
baker.make(
TextAnswer,
question=question,
contribution=evaluation.general_contribution,
answer="published",
state=TextAnswer.State.PUBLISHED,
)
baker.make(
TextAnswer,
question=question,
contribution=evaluation.general_contribution,
answer="private",
state=TextAnswer.State.PRIVATE,
answer=iter(["hidden", "published", "private"]),
state=iter([TextAnswer.State.HIDDEN, TextAnswer.State.PUBLISHED, TextAnswer.State.PRIVATE]),
_quantity=3,
_bulk_create=True,
)

self.assertEqual(evaluation.textanswer_set.count(), 3)
Expand Down Expand Up @@ -630,35 +618,20 @@ def test_email_domain_replacement(self):
def test_get_sorted_due_evaluations(self):
student = baker.make(UserProfile, email="student@example.com")
course = baker.make(Course)
evaluation1 = baker.make(
Evaluation,
course=course,
name_en="C",
name_de="C",
vote_end_date=date.today(),
state=Evaluation.State.IN_EVALUATION,
participants=[student],
)
evaluation2 = baker.make(
Evaluation,
course=course,
name_en="B",
name_de="B",
vote_end_date=date.today(),
state=Evaluation.State.IN_EVALUATION,
participants=[student],
)
evaluation3 = baker.make(

evaluations = baker.make(
Evaluation,
course=course,
name_en="A",
name_de="A",
vote_end_date=date.today() + timedelta(days=1),
name_en=iter(["C", "B", "A"]),
name_de=iter(["C", "B", "A"]),
vote_end_date=iter([date.today(), date.today(), date.today() + timedelta(days=1)]),
state=Evaluation.State.IN_EVALUATION,
participants=[student],
_quantity=3,
)

sorted_evaluations = student.get_sorted_due_evaluations()
self.assertEqual(sorted_evaluations, [(evaluation2, 0), (evaluation1, 0), (evaluation3, 1)])
self.assertEqual(sorted_evaluations, [(evaluations[1], 0), (evaluations[0], 0), (evaluations[2], 1)])


class ParticipationArchivingTests(TestCase):
Expand Down
18 changes: 14 additions & 4 deletions evap/evaluation/tests/tools.py
@@ -1,7 +1,7 @@
import functools
import os
from datetime import timedelta
from typing import List, Union
from typing import List, Optional, Sequence, Union

from django.conf import settings
from django.contrib.auth.models import Group
Expand All @@ -16,6 +16,7 @@
Course,
Degree,
Evaluation,
Question,
Questionnaire,
RatingAnswerCounter,
TextAnswer,
Expand Down Expand Up @@ -178,7 +179,12 @@ def make_editor(user, evaluation):
)


def make_rating_answer_counters(question, contribution, answer_counts=None):
def make_rating_answer_counters(
question: Question,
contribution: Contribution,
answer_counts: Optional[Sequence[int]] = None,
store_in_db: bool = True,
):
"""
Create RatingAnswerCounters for a question for a contribution.
Examples:
Expand All @@ -194,12 +200,16 @@ def make_rating_answer_counters(question, contribution, answer_counts=None):

assert len(answer_counts) == expected_counts

return baker.make(
counters = baker.prepare(
RatingAnswerCounter,
question=question,
contribution=contribution,
_bulk_create=True,
_quantity=len(answer_counts),
answer=iter(CHOICES[question.type].values),
count=iter(answer_counts),
)

if store_in_db:
RatingAnswerCounter.objects.bulk_create(counters)

return counters
52 changes: 28 additions & 24 deletions evap/results/tests/test_exporters.py
Expand Up @@ -44,7 +44,7 @@ def test_questionnaire_ordering(self):
degree = baker.make(Degree)
evaluation = baker.make(
Evaluation,
course=baker.make(Course, degrees=[degree]),
course__degrees=[degree],
state=Evaluation.State.PUBLISHED,
_participant_count=2,
_voter_count=2,
Expand Down Expand Up @@ -98,7 +98,7 @@ def test_heading_question_filtering(self):
degree = baker.make(Degree)
evaluation = baker.make(
Evaluation,
course=baker.make(Course, degrees=[degree]),
course__degrees=[degree],
state=Evaluation.State.PUBLISHED,
_participant_count=2,
_voter_count=2,
Expand Down Expand Up @@ -432,18 +432,16 @@ def test_correct_grades_and_bottom_numbers(self):
def test_course_grade(self):
degree = baker.make(Degree)
course = baker.make(Course, degrees=[degree])
evaluations = [
baker.make(
Evaluation,
course=course,
name_en=f"eval{i}",
name_de=f"eval{i}",
state=Evaluation.State.PUBLISHED,
_voter_count=5,
_participant_count=10,
)
for i in range(3)
]
evaluations = baker.make(
Evaluation,
course=course,
name_en=iter(["eval0", "eval1", "eval2"]),
name_de=iter(["eval0", "eval1", "eval2"]),
state=Evaluation.State.PUBLISHED,
_voter_count=5,
_participant_count=10,
_quantity=3,
)

grades_per_eval = [[1, 1, 0, 0, 0], [0, 1, 1, 0, 0], [1, 0, 1, 0, 0]]
expected_average = 2.0
Expand Down Expand Up @@ -546,16 +544,22 @@ def test_contributor_result_export(self):

def test_text_answer_export(self):
evaluation = baker.make(Evaluation, state=Evaluation.State.PUBLISHED, can_publish_text_results=True)
questions = [baker.make(Question, questionnaire__type=t, type=Question.TEXT) for t in Questionnaire.Type.values]

for idx in [0, 1, 2, 2, 0]:
baker.make(
TextAnswer,
question=questions[idx],
contribution__evaluation=evaluation,
contribution__questionnaires=[questions[idx].questionnaire],
state=TextAnswer.State.PUBLISHED,
)
questions = baker.make(
Question,
questionnaire__type=iter(Questionnaire.Type.values),
type=Question.TEXT,
_quantity=len(Questionnaire.Type.values),
_bulk_create=True,
)

baker.make(
TextAnswer,
question=iter(questions[idx] for idx in [0, 1, 2, 2, 0]),
contribution__evaluation=evaluation,
contribution__questionnaires=iter(questions[idx].questionnaire for idx in [0, 1, 2, 2, 0]),
state=TextAnswer.State.PUBLISHED,
_quantity=5,
)

cache_results(evaluation)
evaluation_result = get_results(evaluation)
Expand Down
62 changes: 42 additions & 20 deletions evap/results/tests/test_tools.py
Expand Up @@ -6,7 +6,16 @@
from django.test.testcases import TestCase
from model_bakery import baker

from evap.evaluation.models import Contribution, Course, Evaluation, Question, Questionnaire, TextAnswer, UserProfile
from evap.evaluation.models import (
Contribution,
Course,
Evaluation,
Question,
Questionnaire,
RatingAnswerCounter,
TextAnswer,
UserProfile,
)
from evap.evaluation.tests.tools import make_rating_answer_counters
from evap.results.tools import (
RatingResult,
Expand Down Expand Up @@ -197,14 +206,21 @@ def setUpTestData(cls):
def test_average_grade(self):
question_grade2 = baker.make(Question, questionnaire=self.questionnaire, type=Question.GRADE)

make_rating_answer_counters(self.question_grade, self.contribution1, [0, 1, 0, 0, 0])
make_rating_answer_counters(self.question_grade, self.contribution2, [0, 0, 0, 2, 0])
make_rating_answer_counters(question_grade2, self.contribution1, [1, 0, 0, 0, 0])
make_rating_answer_counters(self.question_likert, self.contribution1, [0, 0, 4, 0, 0])
make_rating_answer_counters(self.question_likert, self.general_contribution, [0, 0, 0, 0, 5])
make_rating_answer_counters(self.question_likert_2, self.general_contribution, [0, 0, 3, 0, 0])
make_rating_answer_counters(self.question_bipolar, self.general_contribution, [0, 0, 0, 0, 0, 0, 2])
make_rating_answer_counters(self.question_bipolar_2, self.general_contribution, [0, 0, 4, 0, 0, 0, 0])
counters = [
*make_rating_answer_counters(self.question_grade, self.contribution1, [0, 1, 0, 0, 0], False),
*make_rating_answer_counters(self.question_grade, self.contribution2, [0, 0, 0, 2, 0], False),
*make_rating_answer_counters(question_grade2, self.contribution1, [1, 0, 0, 0, 0], False),
*make_rating_answer_counters(self.question_likert, self.contribution1, [0, 0, 4, 0, 0], False),
*make_rating_answer_counters(self.question_likert, self.general_contribution, [0, 0, 0, 0, 5], False),
*make_rating_answer_counters(self.question_likert_2, self.general_contribution, [0, 0, 3, 0, 0], False),
*make_rating_answer_counters(
self.question_bipolar, self.general_contribution, [0, 0, 0, 0, 0, 0, 2], False
),
*make_rating_answer_counters(
self.question_bipolar_2, self.general_contribution, [0, 0, 4, 0, 0, 0, 0], False
),
]
RatingAnswerCounter.objects.bulk_create(counters)

cache_results(self.evaluation)

Expand Down Expand Up @@ -243,11 +259,14 @@ def test_average_grade(self):
GENERAL_NON_GRADE_QUESTIONS_WEIGHT=5,
)
def test_distribution_without_general_grade_question(self):
make_rating_answer_counters(self.question_grade, self.contribution1, [1, 0, 1, 0, 0])
make_rating_answer_counters(self.question_grade, self.contribution2, [0, 1, 0, 1, 0])
make_rating_answer_counters(self.question_likert, self.contribution1, [0, 0, 3, 0, 3])
make_rating_answer_counters(self.question_likert, self.general_contribution, [0, 0, 0, 0, 5])
make_rating_answer_counters(self.question_likert_2, self.general_contribution, [0, 0, 3, 0, 0])
counters = [
*make_rating_answer_counters(self.question_grade, self.contribution1, [1, 0, 1, 0, 0], False),
*make_rating_answer_counters(self.question_grade, self.contribution2, [0, 1, 0, 1, 0], False),
*make_rating_answer_counters(self.question_likert, self.contribution1, [0, 0, 3, 0, 3], False),
*make_rating_answer_counters(self.question_likert, self.general_contribution, [0, 0, 0, 0, 5], False),
*make_rating_answer_counters(self.question_likert_2, self.general_contribution, [0, 0, 3, 0, 0], False),
]
RatingAnswerCounter.objects.bulk_create(counters)

cache_results(self.evaluation)

Expand All @@ -274,12 +293,15 @@ def test_distribution_without_general_grade_question(self):
GENERAL_NON_GRADE_QUESTIONS_WEIGHT=5,
)
def test_distribution_with_general_grade_question(self):
make_rating_answer_counters(self.question_grade, self.contribution1, [1, 0, 1, 0, 0])
make_rating_answer_counters(self.question_grade, self.contribution2, [0, 1, 0, 1, 0])
make_rating_answer_counters(self.question_likert, self.contribution1, [0, 0, 3, 0, 3])
make_rating_answer_counters(self.question_likert, self.general_contribution, [0, 0, 0, 0, 5])
make_rating_answer_counters(self.question_likert_2, self.general_contribution, [0, 0, 3, 0, 0])
make_rating_answer_counters(self.question_grade, self.general_contribution, [0, 10, 0, 0, 0])
counters = [
*make_rating_answer_counters(self.question_grade, self.contribution1, [1, 0, 1, 0, 0], False),
*make_rating_answer_counters(self.question_grade, self.contribution2, [0, 1, 0, 1, 0], False),
*make_rating_answer_counters(self.question_likert, self.contribution1, [0, 0, 3, 0, 3], False),
*make_rating_answer_counters(self.question_likert, self.general_contribution, [0, 0, 0, 0, 5], False),
*make_rating_answer_counters(self.question_likert_2, self.general_contribution, [0, 0, 3, 0, 0], False),
*make_rating_answer_counters(self.question_grade, self.general_contribution, [0, 10, 0, 0, 0], False),
]
RatingAnswerCounter.objects.bulk_create(counters)

cache_results(self.evaluation)

Expand Down

0 comments on commit 8b7ca1b

Please sign in to comment.