Skip to content

Commit

Permalink
Update Download data CSV (#4138)
Browse files Browse the repository at this point in the history
  • Loading branch information
dimasciput authored Aug 8, 2024
1 parent 09c2f9c commit 3db2a84
Show file tree
Hide file tree
Showing 7 changed files with 123 additions and 52 deletions.
86 changes: 46 additions & 40 deletions bims/api_views/location_site_overview.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import hashlib
import json
import time
from collections import OrderedDict

from django.contrib.sites.models import Site
Expand Down Expand Up @@ -61,20 +62,25 @@ def biodiversity_data(self):
category=TaxonomicGroupCategory.SPECIES_MODULE.name
).order_by('display_order')

collection_results = collection_results.select_related(
'taxonomy', 'taxonomy__endemism', 'taxonomy__iucn_status', 'site_visit'
)
location_site_ids = set()
taxonomy_ids = set()
for group in groups:
group_data = dict()
group_data = {}
try:
group_data[self.GROUP_ICON] = get_thumbnail(
group.logo, 'x50', crop='center'
).name
except ValueError:
pass
group_data[self.MODULE] = group.id

group_data[self.MODULE] = group.id
biodiversity_data[group.name] = group_data
group_records = collection_results.filter(
module_group=group
)

group_records = collection_results.filter(module_group=group)
group_records_count = group_records.count()

if group_records.count() > 0 and not self.is_sass_exist:
try:
Expand All @@ -91,36 +97,36 @@ def biodiversity_data(self):
except: # noqa
self.is_sass_exist = False

group_data[self.GROUP_OCCURRENCES] = group_records.count()
group_data[self.GROUP_OCCURRENCES] = group_records_count
location_site_ids.update(group_records.values_list('site', flat=True))
taxonomy_ids.update(group_records.values_list('taxonomy', flat=True))

group_data[self.GROUP_SITES] = LocationSite.objects.filter(
id__in=group_records.values('site')
id__in=location_site_ids
).count()

group_data[self.GROUP_NUM_OF_TAXA] = Taxonomy.objects.filter(
id__in=group_records.values('taxonomy')
id__in=taxonomy_ids
).count()
group_data[self.GROUP_ENDEMISM] = list(group_records.annotate(
name=Case(When(taxonomy__endemism__isnull=False,
then=F('taxonomy__endemism__name')),
default=Value('Unknown'))
).values(
'name'
).annotate(
count=Count('name')
).values(
'name', 'count'
).order_by('name'))

endemism_counts = group_records.annotate(
name=Case(
When(
taxonomy__endemism__isnull=False,
then=F('taxonomy__endemism__name')),
default=Value('Unknown')
)
).values('name').annotate(count=Count('name')).order_by('name')

group_data[self.GROUP_ENDEMISM] = list(endemism_counts)

group_origins = group_records.annotate(
name=Case(When(taxonomy__origin='',
then=Value('Unknown')),
default=F('taxonomy__origin'))
).values(
'name'
).annotate(
count=Count('name')
).values(
'name', 'count'
).order_by('name')
name=Case(
When(taxonomy__origin='', then=Value('Unknown')),
default=F('taxonomy__origin')
)
).values('name').annotate(count=Count('name')).order_by('name')

if group_origins:
category = dict(Taxonomy.CATEGORY_CHOICES)
for group_origin in group_origins:
Expand All @@ -131,16 +137,14 @@ def biodiversity_data(self):
all_cons_status = group_records.filter(
taxonomy__iucn_status__national=False
).annotate(
name=Case(When(taxonomy__iucn_status__isnull=False,
then=F('taxonomy__iucn_status__category')),
default=Value('Not evaluated'))
).values(
'name'
).annotate(
count=Count('name')
).values(
'name', 'count'
).order_by('name')
name=Case(
When(
taxonomy__iucn_status__isnull=False,
then=F('taxonomy__iucn_status__category')),
default=Value('Not evaluated')
)
).values('name').annotate(count=Count('name')).order_by('name')

if all_cons_status:
category = dict(IUCNStatus.CATEGORY_CHOICES)
for cons_status in all_cons_status:
Expand All @@ -150,7 +154,6 @@ def biodiversity_data(self):

return biodiversity_data


class MultiLocationSitesOverview(APIView, LocationSiteOverviewData):

def get(self, request):
Expand Down Expand Up @@ -216,6 +219,7 @@ def get_object(self, pk):
raise Http404

def get(self, request):
start_time = time.time()
self.search_filters = dict(request.GET)
if not request.user.is_anonymous:
self.search_filters['requester'] = request.user.id
Expand All @@ -239,4 +243,6 @@ def get(self, request):
serializer = LocationSiteDetailSerializer(
location_site)
response_data.update(serializer.data)
end_time = time.time()
response_data['duration'] = end_time - start_time
return Response(response_data)
7 changes: 3 additions & 4 deletions bims/api_views/search.py
Original file line number Diff line number Diff line change
Expand Up @@ -476,7 +476,7 @@ def process_search(self):
'biologicalcollectionrecord__isnull': False
})
elif self.search_query:
bio = collection_records_by_site.filter(
bio = collection_records_by_site.select_related('taxonomy').filter(
Q(taxonomy__canonical_name__icontains=self.search_query) |
Q(taxonomy__accepted_taxonomy__canonical_name__icontains=
self.search_query) |
Expand Down Expand Up @@ -505,7 +505,7 @@ def process_search(self):
)
if not bio.exists():
# Search by vernacular names
bio = collection_records_by_site.filter(
bio = collection_records_by_site.select_related('taxonomy').filter(
taxonomy__vernacular_names__name__icontains=
self.search_query
)
Expand Down Expand Up @@ -626,13 +626,12 @@ def process_search(self):

if self.filtered_taxa_records is not None:
filters['taxonomy__in'] = self.filtered_taxa_records
bio = bio.select_related('taxonomy')

if filters:
filters['taxonomy__isnull'] = False
bio_filtered = True

bio = bio.filter(**filters)
bio = bio.select_related('taxonomy').filter(**filters)

requester_id = self.parameters.get('requester', None)

Expand Down
32 changes: 31 additions & 1 deletion bims/download/collection_record.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,9 @@ def write_to_csv(headers: list,
if header == 'sub_species':
formatted_headers.append('SubSpecies')
continue
if header.lower().strip() == 'cites_listing':
formatted_headers.append('CITES listing')
continue
header = header.replace('_or_', '/')
if not header.isupper():
header = header.replace('_', ' ').capitalize()
Expand Down Expand Up @@ -78,9 +81,33 @@ def download_collection_records(
from bims.api_views.search import CollectionSearch
from bims.models import BiologicalCollectionRecord
from bims.tasks.email_csv import send_csv_via_email
from preferences import preferences

project_name = preferences.SiteSetting.project_name

exclude_fields = []
headers = []

if project_name.lower() == 'sanparks':
exclude_fields = [
'user_river_name',
'river_name',
'user_wetland_name',
'wetland_name',
'user_geomorphological_zone',
'hydroperiod',
'wetland_indicator_status',
'broad_biotope',
'specific_biotope',
'substratum',
'analyst',
'analyst_institute',
'sampling_effort_measure',
'sampling_effort_value',
'abundance_value',
'abundance_measure'
]

def get_download_request(request_id):
try:
return DownloadRequest.objects.get(
Expand All @@ -93,7 +120,10 @@ def write_batch_to_csv(header, rows, path_file, start_index):
bio_serializer = (
BioCollectionOneRowSerializer(
rows, many=True,
context={'header': header})
context={
'header': header,
'exclude_fields': exclude_fields
})
)
bio_data = bio_serializer.data
if len(header) == 0:
Expand Down
29 changes: 28 additions & 1 deletion bims/serializers/bio_collection_serializer.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import logging
import uuid

from bims.models import CITESListingInfo
from bims.models.chem import Chem
from preferences import preferences
from rest_framework import serializers
Expand Down Expand Up @@ -154,6 +155,7 @@ class BioCollectionOneRowSerializer(
ecosystem_type = serializers.SerializerMethodField()
hydroperiod = serializers.SerializerMethodField()
wetland_indicator_status = serializers.SerializerMethodField()
cites_listing = serializers.SerializerMethodField()

def taxon_name_by_rank(
self,
Expand Down Expand Up @@ -219,9 +221,14 @@ def spatial_data(self, obj, key):
def __init__(self, *args, **kwargs):
super(BioCollectionOneRowSerializer, self).__init__(*args, **kwargs)
self.context['chem_records_cached'] = {}
exclude_fields = self.context.get('exclude_fields', [])
if 'header' not in self.context:
self.context['header'] = []

for field in exclude_fields:
if field in self.fields:
self.fields.pop(field)

def chem_data(self, obj, chem):
return chem

Expand Down Expand Up @@ -301,6 +308,23 @@ def get_conservation_status_national(self, obj):
def get_site_code(self, obj):
return obj.site.site_code

def get_cites_listing(self, obj: BiologicalCollectionRecord):
cites_listing_info = CITESListingInfo.objects.filter(
taxonomy_id=obj.taxonomy.id
)
if cites_listing_info.exists():
return ','.join(list(cites_listing_info.values_list(
'appendix', flat=True
)))
if obj.taxonomy.additional_data:
if 'CITES Listing' in obj.taxonomy.additional_data:
return obj.taxonomy.additional_data['CITES Listing']
if 'Cites listing' in obj.taxonomy.additional_data:
return obj.taxonomy.additional_data['Cites listing']
if 'CITES listing' in obj.taxonomy.additional_data:
return obj.taxonomy.additional_data['CITES listing']
return ''

def get_user_site_code(self, obj):
return obj.site.legacy_site_code

Expand Down Expand Up @@ -683,7 +707,8 @@ class Meta:
'rights_holder',
'recorded_by',
'decision_support_tool',
'record_type'
'record_type',
'cites_listing'
]

def to_representation(self, instance: BiologicalCollectionRecord):
Expand Down Expand Up @@ -935,6 +960,8 @@ def to_representation(self, instance: BiologicalCollectionRecord):
if taxon_extra_attributes.exists():
for taxon_extra_attribute in taxon_extra_attributes:
taxon_attribute_name = taxon_extra_attribute.name
if taxon_attribute_name.lower().strip() == 'cites listing':
continue
key_title = taxon_attribute_name.lower().replace(' ', '_')
cache_key = '{id}-{extra_id}'.format(
id=instance.taxonomy.id,
Expand Down
13 changes: 10 additions & 3 deletions bims/serializers/checklist_serializer.py
Original file line number Diff line number Diff line change
Expand Up @@ -277,12 +277,19 @@ def get_national_conservation_status(self, obj):

def get_cites_listing(self, obj: Taxonomy):
cites_listing_info = CITESListingInfo.objects.filter(
taxonomy=obj
).order_by('appendix')
if cites_listing_info:
taxonomy_id=obj.id
)
if cites_listing_info.exists():
return ','.join(list(cites_listing_info.values_list(
'appendix', flat=True
)))
if obj.additional_data:
if 'CITES Listing' in obj.additional_data:
return obj.additional_data['CITES Listing']
if 'Cites listing' in obj.additional_data:
return obj.additional_data['Cites listing']
if 'CITES listing' in obj.additional_data:
return obj.additional_data['CITES listing']
return ''

class Meta:
Expand Down
2 changes: 1 addition & 1 deletion bims/tasks/download_taxa_list.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def update_headers(_headers):
if header == 'Sub species':
header = 'SubSpecies'
if header.lower().strip() == 'cites_listing':
header = 'CITES Listing'
header = 'CITES listing'
_updated_headers.append(header)
return _updated_headers

Expand Down
6 changes: 4 additions & 2 deletions bims/views/download_csv_taxa_list.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,8 +64,10 @@ def get_cites_listing(self, obj: Taxonomy):
if obj.additional_data:
if 'CITES Listing' in obj.additional_data:
return obj.additional_data['CITES Listing']
if 'Cites Listing' in obj.additional_data:
return obj.additional_data['Cites Listing']
if 'Cites listing' in obj.additional_data:
return obj.additional_data['Cites listing']
if 'CITES listing' in obj.additional_data:
return obj.additional_data['CITES listing']
return ''

def get_taxon_rank(self, obj):
Expand Down

0 comments on commit 3db2a84

Please sign in to comment.