Skip to content

Commit

Permalink
Full codebase autoformat using pre-commit
Browse files Browse the repository at this point in the history
  • Loading branch information
thenav56 committed Jun 14, 2024
1 parent 6b24521 commit 95cab4b
Show file tree
Hide file tree
Showing 596 changed files with 27,839 additions and 30,180 deletions.
2 changes: 1 addition & 1 deletion apps/analysis/apps.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@


class AnalysisConfig(AppConfig):
name = 'analysis'
name = "analysis"
144 changes: 77 additions & 67 deletions apps/analysis/dataloaders.py
Original file line number Diff line number Diff line change
@@ -1,37 +1,39 @@
from promise import Promise
from collections import defaultdict

from django.utils.functional import cached_property
from django.db import models
from django.utils.functional import cached_property
from promise import Promise

from utils.graphene.dataloaders import DataLoaderWithContext, WithContextMixin

from .models import (
Analysis,
AnalysisPillar,
AnalysisReport,
AnalysisReportContainer,
AnalysisReportContainerData,
AnalysisReportSnapshot,
AnalysisReportUpload,
AnalyticalStatement,
AnalyticalStatementEntry,
DiscardedEntry,
TopicModelCluster,
AnalysisReportUpload,
AnalysisReportContainerData,
AnalysisReportContainer,
AnalysisReportSnapshot,
)


class AnalysisPublicationDatesLoader(DataLoaderWithContext):
def batch_load_fn(self, keys):
qs = AnalyticalStatementEntry.objects.filter(
analytical_statement__analysis_pillar__analysis__in=keys,
).order_by().values('analytical_statement__analysis_pillar__analysis').annotate(
published_on_min=models.Min('entry__lead__published_on'),
published_on_max=models.Max('entry__lead__published_on'),
).values_list(
'published_on_min',
'published_on_max',
'analytical_statement__analysis_pillar__analysis'
qs = (
AnalyticalStatementEntry.objects.filter(
analytical_statement__analysis_pillar__analysis__in=keys,
)
.order_by()
.values("analytical_statement__analysis_pillar__analysis")
.annotate(
published_on_min=models.Min("entry__lead__published_on"),
published_on_max=models.Max("entry__lead__published_on"),
)
.values_list("published_on_min", "published_on_max", "analytical_statement__analysis_pillar__analysis")
)
_map = {}
for start_date, end_date, _id in qs:
Expand All @@ -45,17 +47,13 @@ def batch_load_fn(self, keys):

class AnalysisAnalyzedEntriesLoader(DataLoaderWithContext):
def batch_load_fn(self, keys):
_map = Analysis.get_analyzed_entries([
Analysis(id=key) for key in keys
])
_map = Analysis.get_analyzed_entries([Analysis(id=key) for key in keys])
return Promise.resolve([_map.get(key, 0) for key in keys])


class AnalysisAnalyzedLeadsLoader(DataLoaderWithContext):
def batch_load_fn(self, keys):
_map = Analysis.get_analyzed_sources([
Analysis(id=key) for key in keys
])
_map = Analysis.get_analyzed_sources([Analysis(id=key) for key in keys])
return Promise.resolve([_map.get(key, 0) for key in keys])


Expand Down Expand Up @@ -88,57 +86,71 @@ def batch_load_fn(self, keys):

class AnalysisPillarsAnalyzedEntriesLoader(DataLoaderWithContext):
def batch_load_fn(self, keys):
qs = AnalysisPillar.objects\
.filter(id__in=keys)\
qs = (
AnalysisPillar.objects.filter(id__in=keys)
.annotate(
dragged_entries=models.functions.Coalesce(
models.Subquery(
AnalyticalStatement.objects.filter(
analysis_pillar=models.OuterRef('pk')
).order_by().values('analysis_pillar').annotate(count=models.Count(
'entries',
distinct=True,
filter=models.Q(entries__lead__published_on__lte=models.OuterRef('analysis__end_date'))))
.values('count')[:1],
AnalyticalStatement.objects.filter(analysis_pillar=models.OuterRef("pk"))
.order_by()
.values("analysis_pillar")
.annotate(
count=models.Count(
"entries",
distinct=True,
filter=models.Q(entries__lead__published_on__lte=models.OuterRef("analysis__end_date")),
)
)
.values("count")[:1],
output_field=models.IntegerField(),
), 0),
),
0,
),
discarded_entries=models.functions.Coalesce(
models.Subquery(
DiscardedEntry.objects.filter(
analysis_pillar=models.OuterRef('pk')
).order_by().values('analysis_pillar__analysis').annotate(count=models.Count(
'entry',
distinct=True,
filter=models.Q(entry__lead__published_on__lte=models.OuterRef('analysis__end_date'))))
.values('count')[:1],
DiscardedEntry.objects.filter(analysis_pillar=models.OuterRef("pk"))
.order_by()
.values("analysis_pillar__analysis")
.annotate(
count=models.Count(
"entry",
distinct=True,
filter=models.Q(entry__lead__published_on__lte=models.OuterRef("analysis__end_date")),
)
)
.values("count")[:1],
output_field=models.IntegerField(),
), 0),
analyzed_entries=models.F('dragged_entries') + models.F('discarded_entries'),
).values_list('id', 'analyzed_entries')
_map = {
_id: count
for _id, count in qs
}
),
0,
),
analyzed_entries=models.F("dragged_entries") + models.F("discarded_entries"),
)
.values_list("id", "analyzed_entries")
)
_map = {_id: count for _id, count in qs}
return Promise.resolve([_map.get(key, 0) for key in keys])


class AnalysisStatementAnalyzedEntriesLoader(DataLoaderWithContext):
def batch_load_fn(self, keys):
qs = AnalyticalStatement.objects.filter(id__in=keys).annotate(
count=models.Count('entries', distinct=True)
).values('id', 'count')
_map = {
_id: count
for _id, count in qs
}
qs = (
AnalyticalStatement.objects.filter(id__in=keys)
.annotate(count=models.Count("entries", distinct=True))
.values("id", "count")
)
_map = {_id: count for _id, count in qs}
return Promise.resolve([_map.get(key, 0) for key in keys])


class AnalysisTopicModelClusterEntryLoader(DataLoaderWithContext):
def batch_load_fn(self, keys):
qs = TopicModelCluster.entries.through.objects.filter(
topicmodelcluster__in=keys,
).select_related('entry').order_by('topicmodelcluster', 'entry_id')
qs = (
TopicModelCluster.entries.through.objects.filter(
topicmodelcluster__in=keys,
)
.select_related("entry")
.order_by("topicmodelcluster", "entry_id")
)
_map = defaultdict(list)
for cluster_entry in qs:
_map[cluster_entry.topicmodelcluster_id].append(cluster_entry.entry)
Expand All @@ -151,10 +163,7 @@ def batch_load_fn(self, keys):
qs = AnalysisReportUpload.objects.filter(
id__in=keys,
)
_map = {
item.pk: item
for item in qs
}
_map = {item.pk: item for item in qs}
return Promise.resolve([_map.get(key, []) for key in keys])


Expand All @@ -173,7 +182,7 @@ class OrganizationByAnalysisReportLoader(DataLoaderWithContext):
def batch_load_fn(self, keys):
qs = AnalysisReport.organizations.through.objects.filter(
analysisreport__in=keys,
).select_related('organization')
).select_related("organization")
_map = defaultdict(list)
for item in qs:
_map[item.analysisreport_id].append(item.organization)
Expand Down Expand Up @@ -204,13 +213,14 @@ def batch_load_fn(self, keys):

class LatestReportSnapshotByAnalysisReportLoader(DataLoaderWithContext):
def batch_load_fn(self, keys):
qs = AnalysisReportSnapshot.objects.filter(
report__in=keys,
).order_by('report_id', '-published_on').distinct('report_id')
_map = {
snapshot.report_id: snapshot
for snapshot in qs
}
qs = (
AnalysisReportSnapshot.objects.filter(
report__in=keys,
)
.order_by("report_id", "-published_on")
.distinct("report_id")
)
_map = {snapshot.report_id: snapshot for snapshot in qs}
return Promise.resolve([_map.get(key) for key in keys])


Expand Down
Loading

0 comments on commit 95cab4b

Please sign in to comment.