Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

NITF Pansharpening #529

Draft
wants to merge 4 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 5 additions & 4 deletions rdwatch/core/tasks/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -331,7 +331,7 @@ def get_siteobservations_images(
logger.info(f'Skipping Timestamp: {timestamp}')
continue
if found.exists() and not force:
found_timestamps[observation.timestamp] = True
found_timestamps[observation.timestamp.replace(microsecond=0)] = True
continue
results = fetch_boundbox_image(
bbox,
Expand All @@ -344,12 +344,12 @@ def get_siteobservations_images(
logger.info(f'COULD NOT FIND ANY IMAGE FOR TIMESTAMP: {timestamp}')
continue
bytes = results['bytes']
percent_black = get_percent_black_pixels(bytes)
cloudcover = results['cloudcover']
found_timestamp = results['timestamp']
found_timestamp = results['timestamp'].replace(microsecond=0)
if bytes is None:
logger.info(f'COULD NOT FIND ANY IMAGE FOR TIMESTAMP: {timestamp}')
continue
percent_black = get_percent_black_pixels(bytes)
cloudcover = results['cloudcover']
if dayRange != -1 and percent_black < no_data_limit:
found_timestamps[found_timestamp] = True
elif dayRange == -1:
Expand Down Expand Up @@ -523,6 +523,7 @@ def get_siteobservations_images(
image_dimensions=[imageObj.width, imageObj.height],
)
else:
logger.info('Skipping timestamp because image already found')
count += 1
return downloaded_count

Expand Down
133 changes: 112 additions & 21 deletions rdwatch/core/utils/worldview_nitf/raster_tile.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,39 @@
import boto3
import rasterio # type: ignore
from rio_tiler.io.rasterio import Reader
from rio_tiler.models import ImageData
from rio_tiler.utils import pansharpening_brovey

from rdwatch.core.utils.worldview_nitf.satellite_captures import WorldViewNITFCapture

logger = logging.getLogger(__name__)


def find_rgb_channels(colorinterp):
rgb_indexes = {}

# Define the channels to look for
channels = ['red', 'green', 'blue', 'gray']

for index, channel in enumerate(colorinterp):
if channel in channels:
rgb_indexes[channel] = index

# Return indexes as a tuple, band indexes are 1 BASED
red = rgb_indexes.get('red', -1) + 1
green = rgb_indexes.get('green', -1) + 1
blue = rgb_indexes.get('blue', -1) + 1
gray = rgb_indexes.get('gray', -1) + 1
if red > 0 and green > 0 and blue > 0:
return (red, green, blue)
elif gray > 0:
return gray
else:
raise ValueError(
f'colorInterp: {colorinterp} for files does not have RGB or Gray channels'
)


@contextmanager
def _download_nitf_image(uri: str) -> Generator[str, None, None]:
s3 = boto3.client('s3')
Expand Down Expand Up @@ -47,8 +74,10 @@ def get_worldview_nitf_tile(
VSI_CACHE='TRUE',
VSI_CACHE_SIZE=5000000,
):
with Reader(input=capture.uri) as img:
rgb = img.tile(x, y, z, tilesize=512)
with Reader(input=capture.uri) as vis_img:
information = vis_img.info()
rgb_channels = find_rgb_channels(information.colorinterp)
rgb = vis_img.tile(x, y, z, tilesize=512, indexes=rgb_channels)
return rgb.render(img_format='WEBP')


Expand All @@ -61,25 +90,87 @@ def get_worldview_nitf_bbox(
with rasterio.Env(
GDAL_DISABLE_READDIR_ON_OPEN='EMPTY_DIR',
GDAL_HTTP_MERGE_CONSECUTIVE_RANGES='YES',
GDAL_CACHEMAX=200,
CPL_VSIL_CURL_CACHE_SIZE=20000000,
GDAL_BAND_BLOCK_CACHE='HASHSET',
GDAL_HTTP_MULTIPLEX='YES',
GDAL_HTTP_VERSION=2,
VSI_CACHE='TRUE',
VSI_CACHE_SIZE=5000000,
CPL_VSIL_CURL_CHUNK_SIZE=524288,
):
with _download_nitf_image(capture.uri) as nitf_filepath:
with Reader(input=nitf_filepath) as img:
rgb = img.part(bbox)

if scale == 'default':
rgb.rescale(in_range=((0, 255),))
elif scale == 'bits':
if capture.bits_per_pixel != 8:
max_bits = 2**capture.bits_per_pixel - 1
rgb.rescale(in_range=((0, max_bits),))
elif isinstance(scale, list) and len(scale) == 2: # scale is an integeter range
rgb.rescale(in_range=((scale[0], scale[1]),))

return rgb.render(img_format=format)
logger.info(f'Downloading WorldView NITF bbox: {bbox} scale: {scale}')
if capture.panuri is not None:
with Reader(input=capture.panuri) as pan_img:
logger.info(f'Downloading PanURI Chip: {capture.panuri}')
try:
pan_chip = pan_img.part(bbox=bbox, dst_crs='epsg:4326')
except ValueError as e:
logger.info(f'Value Error: {e} - {capture.panuri}')
return None
with Reader(input=capture.uri) as vis_img:
logger.info(f'Downloading URI Chip: {capture.uri}')
information = vis_img.info()
rgb_channels = find_rgb_channels(information.colorinterp)
if all(
channel is not None for channel in rgb_channels
): # Ensure all RGB channels exist
try:
vis_chip = vis_img.part(
bbox=bbox,
indexes=rgb_channels,
dst_crs='epsg:4326',
width=pan_chip.width,
height=pan_chip.height,
)
except ValueError as e:
logger.info(f'Value Error: {e} - {capture.uri}')
return None
final_chip = ImageData(
pansharpening_brovey(
vis_chip.data, pan_chip.data, 0.2, 'uint16'
)
)
statistics = list(final_chip.statistics().values())
if scale == 'default':
in_range = tuple(
(item.min, item.max) for item in statistics
)
elif scale == 'bits':
in_range = tuple(
(item['percentile_2'], item['percentile_98'])
for item in statistics
)
elif (
isinstance(scale, list) and len(scale) == 2
): # scale is an integeter range
in_range = tuple(
(scale[0], scale[1]) for item in statistics
)
final_chip.rescale(in_range=in_range)
return final_chip.render(img_format=format)
else:
with Reader(input=capture.uri) as vis_img:
information = vis_img.info()
rgb_channels = find_rgb_channels(information.colorinterp)
if all(
channel is not None for channel in rgb_channels
): # Ensure all RGB channels exist
try:
final_chip = vis_img.part(
bbox=bbox,
dst_crs='epsg:4326',
indexes=rgb_channels,
)
except ValueError as e:
logger.info(f'Value Error: {e} - {capture.uri}')
return None
statistics = list(final_chip.statistics().values())
if scale == 'default':
in_range = tuple((item.min, item.max) for item in statistics)
elif scale == 'bits':
in_range = tuple(
(item['percentile_2'], item['percentile_98'])
for item in statistics
)
elif (
isinstance(scale, list) and len(scale) == 2
): # scale is an integeter range
in_range = tuple((scale[0], scale[1]) for item in statistics)
final_chip.rescale(in_range=in_range)
return final_chip.render(img_format=format)
37 changes: 34 additions & 3 deletions rdwatch/core/utils/worldview_nitf/satellite_captures.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,13 @@
import logging
from dataclasses import dataclass
from datetime import datetime, timedelta
from typing import cast

from rdwatch.core.utils.capture import URICapture
from rdwatch.core.utils.worldview_nitf.stac_search import worldview_search

logger = logging.getLogger(__name__)


@dataclass()
class WorldViewNITFCapture(URICapture):
Expand All @@ -14,6 +17,8 @@ class WorldViewNITFCapture(URICapture):
collection: str
bits_per_pixel: int
epsg: int
panuri: int | None
instruments: list[str]


def get_features(
Expand All @@ -34,13 +39,22 @@ def get_captures(
timebuffer = timedelta(hours=1)

features = [f for f in get_features(timestamp, bbox, timebuffer=timebuffer)]
captures = []
vis_captures: list[WorldViewNITFCapture] = []
pan_captures: list[WorldViewNITFCapture] = []
for feature in features:
if 'data' in feature['assets']:
cloudcover = 0
instruments = []
if 'properties' in feature:
if 'eo:cloud_cover' in feature['properties']:
cloudcover = feature['properties']['eo:cloud_cover']
if (
'nitf:compression' in feature['properties']
and feature['properties']['nitf:compression'] != 'NC'
):
continue
if 'instruments' in feature['properties']:
instruments = feature['properties']['instruments']
capture = WorldViewNITFCapture(
timestamp=datetime.fromisoformat(
feature['properties']['datetime'].rstrip('Z')
Expand All @@ -50,8 +64,25 @@ def get_captures(
bits_per_pixel=feature['properties']['nitf:bits_per_pixel'],
cloudcover=cloudcover,
collection=feature['collection'],
instruments=instruments,
panuri=None,
epsg=feature['properties']['proj:epsg'],
)
captures.append(capture)
if 'panchromatic' in instruments:
pan_captures.append(capture)
elif 'vis-multi' in instruments:
vis_captures.append(capture)
else:
logger.info(f'Instrument type {instruments} is no supported')
# Attempt to add panuri elements to visual captures that exist
for pan_capture in pan_captures:
pan_timestamp = pan_capture.timestamp
for vis_capture in vis_captures:
vis_timestamp = vis_capture.timestamp
# Compare bbox and timestamp (within a tolerance for timestamp)
if abs((vis_timestamp - pan_timestamp).total_seconds()) < 60:
# If a match is found, update the vis-multi capture's panuri field
vis_capture.panuri = pan_capture.uri
break

return captures
return vis_captures
6 changes: 3 additions & 3 deletions rdwatch/scoring/tasks/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -246,12 +246,12 @@ def get_siteobservations_images(
logger.info(f'COULD NOT FIND ANY IMAGE FOR TIMESTAMP: {timestamp}')
continue
bytes = results['bytes']
percent_black = get_percent_black_pixels(bytes)
cloudcover = results['cloudcover']
found_timestamp = results['timestamp']
if bytes is None:
logger.info(f'COULD NOT FIND ANY IMAGE FOR TIMESTAMP: {timestamp}')
continue
percent_black = get_percent_black_pixels(bytes)
cloudcover = results['cloudcover']
found_timestamp = results['timestamp']
if dayRange != -1 and percent_black < no_data_limit:
found_timestamps[found_timestamp] = True
elif dayRange == -1:
Expand Down
36 changes: 18 additions & 18 deletions rdwatch/scoring/tasks/animation_export.py
Original file line number Diff line number Diff line change
Expand Up @@ -601,24 +601,24 @@ def create_site_animation_export(
celery_id=task_id,
arguments=settings,
)
# try:
site_export.celery_id = task_id
site_export.save()
file_path, name = create_animation(self, site_evaluation_id, settings)
if file_path is False and name is False:
logger.warning('No Images were found')
site_export.delete()
return
site_export.name = name
with open(file_path, 'rb') as file:
site_export.export_file.save(name, File(file))
site_export.save()
if os.path.exists(file_path):
os.remove(file_path)
# except Exception as e:
# logger.warning(f'Error when processing Animation: {e}')
# if site_export:
# site_export.delete()
try:
site_export.celery_id = task_id
site_export.save()
file_path, name = create_animation(self, site_evaluation_id, settings)
if file_path is False and name is False:
logger.warning('No Images were found')
site_export.delete()
return
site_export.name = name
with open(file_path, 'rb') as file:
site_export.export_file.save(name, File(file))
site_export.save()
if os.path.exists(file_path):
os.remove(file_path)
except Exception as e:
logger.warning(f'Error when processing Animation: {e}')
if site_export:
site_export.delete()


@app.task(bind=True)
Expand Down
Loading