diff --git a/satpy/etc/readers/mirs.yaml b/satpy/etc/readers/mirs.yaml
new file mode 100644
index 0000000000..b968c849c2
--- /dev/null
+++ b/satpy/etc/readers/mirs.yaml
@@ -0,0 +1,28 @@
+reader:
+ description: NetCDF Reader for the Microwave Integrated Retrieval System Level 2 swath products
+ name: mirs
+ short_name: MiRS Level 2 NetCDF4
+ long_name: MiRS Level 2 Swath Product Reader (NetCDF4)
+ reader: !!python/name:satpy.readers.yaml_reader.FileYAMLReader
+ sensors: [amsu, amsu-mhs, atms, ssmis, gmi]
+ data_files:
+ - url: "https://zenodo.org/record/4472664/files/limbcoef_atmsland_noaa20.txt"
+ known_hash: "08a3b7c1594a963610dd864b7ecd12f0ab486412d35185c2371d924dd92c5779"
+ - url: "https://zenodo.org/record/4472664/files/limbcoef_atmsland_snpp.txt"
+ known_hash: "4b01543699792306711ef1699244e96186487e8a869e4ae42bf1f0e4d00fd063"
+ - url: "https://zenodo.org/record/4472664/files/limbcoef_atmssea_noaa20.txt"
+ known_hash: "6853d0536b11c31dc130ab12c61fa322a76d3823a4b8ff9a18a0ecedbf269a88"
+ - url: "https://zenodo.org/record/4472664/files/limbcoef_atmssea_snpp.txt"
+ known_hash: "d0f806051b80320e046bdae6a9b68616152bbf8c2dbf3667b9834459259c0d72"
+
+file_types:
+ mirs_atms:
+ file_reader: !!python/name:satpy.readers.mirs.MiRSL2ncHandler
+ file_patterns:
+ - 'NPR-MIRS-IMG_v{version}_{platform_shortname}_s{start_time:%Y%m%d%H%M%S}{extra_num1}_e{end_time:%Y%m%d%H%M%S}{extra_num2}_c{creation_time:%Y%m%d%H%M%S}{extra_num3}.nc'
+ metop_amsu:
+ file_reader: !!python/name:satpy.readers.mirs.MiRSL2ncHandler
+ file_patterns:
+ - 'IMG_SX.{platform_shortname}.D{start_time:%y%j.S%H%M}.E{end_time:%H%M}.B{num}.WE.HR.ORB.nc'
+
+datasets: {}
diff --git a/satpy/readers/mirs.py b/satpy/readers/mirs.py
new file mode 100644
index 0000000000..10934e1281
--- /dev/null
+++ b/satpy/readers/mirs.py
@@ -0,0 +1,474 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Satpy developers
+#
+# This file is part of satpy.
+#
+# satpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, either version 3 of the License, or (at your option) any later
+# version.
+#
+# satpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# satpy. If not, see .
+"""Interface to MiRS product."""
+
+import os
+import logging
+import datetime
+import numpy as np
+import xarray as xr
+import dask.array as da
+from collections import Counter
+from satpy import CHUNK_SIZE
+from satpy.readers.file_handlers import BaseFileHandler
+from satpy.aux_download import retrieve
+
+LOG = logging.getLogger(__name__)
+logging.basicConfig(level=logging.INFO)
+
+try:
+ # try getting setuptools/distribute's version of resource retrieval first
+ from pkg_resources import resource_string as get_resource_string
+except ImportError:
+ from pkgutil import get_data as get_resource_string
+
+#
+
+# 'Polo' variable in MiRS files use these values for H/V polarization
+POLO_V = 2
+POLO_H = 3
+
+amsu = "amsu-mhs"
+PLATFORMS = {"n18": "NOAA-18",
+ "n19": "NOAA-19",
+ "np": "NOAA-19",
+ "m2": "MetOp-A",
+ "m1": "MetOp-B",
+ "m3": "MetOp-C",
+ "ma2": "MetOp-A",
+ "ma1": "MetOp-B",
+ "ma3": "MetOp-C",
+ "npp": "NPP",
+ "f17": "DMSP-F17",
+ "f18": "DMSP-F18",
+ "gpm": "GPM",
+ "n20": "NOAA-20",
+ }
+SENSOR = {"n18": amsu,
+ "n19": amsu,
+ "n20": 'atms',
+ "np": amsu,
+ "m1": amsu,
+ "m2": amsu,
+ "m3": amsu,
+ "ma1": amsu,
+ "ma2": amsu,
+ "ma3": amsu,
+ "npp": "atms",
+ "jpss": "atms",
+ "f17": "ssmis",
+ "f18": "ssmis",
+ "gpm": "GPI",
+ }
+
+
+def read_atms_coeff_to_string(fn):
+ """Read the coefficients into a string."""
+ if os.path.isfile(fn):
+ coeff_str = open(fn, "r").readlines()
+ else:
+ parts = fn.split(":")
+ mod_part, file_part = parts if len(parts) == 2 else ("", parts[0])
+ mod_part = mod_part or __package__ # self.__module__
+ coeff_str = get_resource_string(mod_part, file_part).decode().split("\n")
+
+ return coeff_str
+
+
+def read_atms_limb_correction_coefficients(fn):
+ """Read the limb correction files."""
+ coeff_str = read_atms_coeff_to_string(fn)
+ # there should be 22 channels and 96 fov in the coefficient file. (read last line and get values)
+ n_chn = (coeff_str[-1].replace(" ", " ").split(" ")[1])
+ n_fov = (coeff_str[-1].replace(" ", " ").split(" ")[2])
+ n_chn = int(n_chn)
+ n_fov = int(n_fov)
+ if n_chn < 22:
+ LOG.warning('Coefficient file has less than 22 channels: %s' % n_chn)
+ if n_fov < 96:
+ LOG.warning('Coefficient file has less than 96 fov: %s' % n_fov)
+ # make it a generator
+ coeff_str = (line.strip() for line in coeff_str)
+
+ all_coeffs = np.zeros((n_chn, n_fov, n_chn), dtype=np.float32)
+ all_amean = np.zeros((n_chn, n_fov, n_chn), dtype=np.float32)
+ all_dmean = np.zeros(n_chn, dtype=np.float32)
+ all_nchx = np.zeros(n_chn, dtype=np.int32)
+ all_nchanx = np.zeros((n_chn, n_chn), dtype=np.int32)
+ all_nchanx[:] = 9999
+ # There should be 22 sections
+ for chan_idx in range(n_chn):
+ # blank line at the start of each section
+ _ = next(coeff_str)
+ # section header
+ _nx, nchx, dmean = [x.strip() for x in next(coeff_str).split(" ") if x]
+ all_nchx[chan_idx] = nchx = int(nchx)
+ all_dmean[chan_idx] = float(dmean)
+
+ # coeff locations (indexes to put the future coefficients in)
+ locations = [int(x.strip()) for x in next(coeff_str).split(" ") if x]
+ assert(len(locations) == nchx)
+ for x in range(nchx):
+ all_nchanx[chan_idx, x] = locations[x] - 1
+
+ # Read 'nchx' coefficients for each of 96 FOV
+ for fov_idx in range(n_fov):
+ # chan_num, fov_num, *coefficients, error
+ coeff_line_parts = [x.strip() for x in next(coeff_str).split(" ") if x][2:]
+ coeffs = [float(x) for x in coeff_line_parts[:nchx]]
+ ameans = [float(x) for x in coeff_line_parts[nchx:-1]]
+ # not used but nice to know the purpose of the last column.
+ # _error_val = float(coeff_line_parts[-1])
+ for x in range(nchx):
+ all_coeffs[chan_idx, fov_idx, all_nchanx[chan_idx, x]] = coeffs[x]
+ all_amean[all_nchanx[chan_idx, x], fov_idx, chan_idx] = ameans[x]
+
+ return all_dmean, all_coeffs, all_amean, all_nchx, all_nchanx
+
+
+def apply_atms_limb_correction(datasets, channel_idx, dmean,
+ coeffs, amean, nchx, nchanx):
+ """Calculate the correction for each channel."""
+ ds = datasets[channel_idx]
+
+ fov_line_correct = []
+ for fov_idx in range(ds.shape[1]):
+ coeff_sum = np.zeros(ds.shape[0], dtype=ds.dtype)
+ for k in range(nchx[channel_idx]):
+ chn_repeat = nchanx[channel_idx, k]
+ coef = coeffs[channel_idx, fov_idx, chn_repeat] * (
+ datasets[chn_repeat, :, fov_idx] -
+ amean[chn_repeat, fov_idx, channel_idx])
+ coeff_sum = np.add(coef, coeff_sum)
+ fov_line_correct.append(np.add(coeff_sum, dmean[channel_idx]))
+ return np.stack(fov_line_correct, axis=1)
+
+
+def get_coeff_by_sfc(coeff_fn, bt_data, idx):
+ """Read coefficients for specific filename (land or sea)."""
+ sfc_coeff = read_atms_limb_correction_coefficients(coeff_fn)
+ # transpose bt_data for correction
+ bt_data = bt_data.transpose("Channel", "y", "x")
+ c_size = bt_data[idx, :, :].chunks
+ correction = da.map_blocks(apply_atms_limb_correction,
+ bt_data, idx,
+ *sfc_coeff, chunks=c_size)
+ return correction
+
+
+def limb_correct_atms_bt(bt_data, surf_type_mask, coeff_fns, ds_info):
+ """Gather data needed for limb correction."""
+ idx = ds_info['channel_index']
+ LOG.info("Starting ATMS Limb Correction...")
+
+ sea_bt = get_coeff_by_sfc(coeff_fns['sea'], bt_data, idx)
+ land_bt = get_coeff_by_sfc(coeff_fns['land'], bt_data, idx)
+
+ LOG.info("Finishing limb correction")
+ is_sea = (surf_type_mask == 0)
+ new_data = np.where(is_sea, sea_bt, land_bt)
+
+ bt_corrected = xr.DataArray(new_data, dims=("y", "x"),
+ attrs=ds_info)
+
+ return bt_corrected
+
+
+class MiRSL2ncHandler(BaseFileHandler):
+ """MiRS handler for NetCDF4 files using xarray."""
+
+ def __init__(self, filename, filename_info, filetype_info):
+ """Init method."""
+ super(MiRSL2ncHandler, self).__init__(filename,
+ filename_info,
+ filetype_info)
+
+ self.nc = xr.open_dataset(self.filename,
+ decode_cf=True,
+ mask_and_scale=False,
+ decode_coords=True,
+ chunks={'Field_of_view': CHUNK_SIZE,
+ 'Scanline': CHUNK_SIZE})
+ # y,x is used in satpy, bands rather than channel using in xrimage
+ self.nc = self.nc.rename_dims({"Scanline": "y",
+ "Field_of_view": "x"})
+ self.nc = self.nc.rename({"Latitude": "latitude",
+ "Longitude": "longitude"})
+
+ self.platform_name = self._get_platform_name
+ self.sensor = self._get_sensor
+
+ @property
+ def platform_shortname(self):
+ """Get platform shortname."""
+ return self.filename_info['platform_shortname']
+
+ @property
+ def _get_platform_name(self):
+ """Get platform name."""
+ try:
+ res = PLATFORMS[self.filename_info['platform_shortname'].lower()]
+ except KeyError:
+ res = "mirs"
+ return res.lower()
+
+ @property
+ def _get_sensor(self):
+ """Get sensor."""
+ try:
+ res = SENSOR[self.filename_info["platform_shortname"].lower()]
+ except KeyError:
+ res = self.sensor_names
+ return res
+
+ @property
+ def sensor_names(self):
+ """Return standard sensor names for the file's data."""
+ return list(set(SENSOR.values()))
+
+ @property
+ def start_time(self):
+ """Get start time."""
+ # old file format
+ if self.filename_info.get("date", False):
+ s_time = datetime.datetime.combine(
+ self.force_date("date"),
+ self.force_time("start_time")
+ )
+ self.filename_info["start_time"] = s_time
+ return self.filename_info["start_time"]
+
+ @property
+ def end_time(self):
+ """Get end time."""
+ # old file format
+ if self.filename_info.get("date", False):
+ end_time = datetime.datetime.combine(
+ self.force_date("date"),
+ self.force_time("end_time")
+ )
+ self.filename_info["end_time"] = end_time
+ return self.filename_info["end_time"]
+
+ def force_date(self, key):
+ """Force datetime.date for combine."""
+ if isinstance(self.filename_info[key], datetime.datetime):
+ return self.filename_info[key].date()
+ else:
+ return self.filename_info[key]
+
+ def force_time(self, key):
+ """Force datetime.time for combine."""
+ if isinstance(self.filename_info.get(key), datetime.datetime):
+ return self.filename_info.get(key).time()
+ else:
+ return self.filename_info.get(key)
+
+ @property
+ def _get_coeff_filenames(self):
+ """Retrieve necessary files for coefficients if needed."""
+ coeff_fn = {'sea': None, 'land': None}
+ if self.platform_name == "noaa-20":
+ coeff_fn['land'] = retrieve("readers/limbcoef_atmsland_noaa20.txt")
+ coeff_fn['sea'] = retrieve("readers/limbcoef_atmssea_noaa20.txt")
+ if self.platform_name == 'npp':
+ coeff_fn['land'] = retrieve("readers/limbcoef_atmsland_snpp.txt")
+ coeff_fn['sea'] = retrieve("readers/limbcoef_atmssea_snpp.txt")
+
+ return coeff_fn
+
+ def get_metadata(self, ds_info):
+ """Get metadata."""
+ metadata = {}
+ metadata.update(ds_info)
+ metadata.update({
+ 'sensor': self.sensor,
+ 'platform_name': self.platform_name,
+ 'start_time': self.start_time,
+ 'end_time': self.end_time,
+ })
+ return metadata
+
+ @staticmethod
+ def _nan_for_dtype(data_arr_dtype):
+ # don't force the conversion from 32-bit float to 64-bit float
+ # if we don't have to
+ if data_arr_dtype.type == np.float32:
+ return np.float32(np.nan)
+ elif np.issubdtype(data_arr_dtype, np.timedelta64):
+ return np.timedelta64('NaT')
+ elif np.issubdtype(data_arr_dtype, np.datetime64):
+ return np.datetime64('NaT')
+ return np.nan
+
+ @staticmethod
+ def _scale_data(data_arr, attrs):
+ # handle scaling
+ # take special care for integer/category fields
+ scale_factor = attrs.pop('scale_factor', 1.)
+ add_offset = attrs.pop('add_offset', 0.)
+ scaling_needed = not (scale_factor == 1 and add_offset == 0)
+ if scaling_needed:
+ data_arr = data_arr * scale_factor + add_offset
+ return data_arr, attrs
+
+ def _fill_data(self, data_arr, attrs):
+ try:
+ global_attr_fill = self.nc.missing_value
+ except AttributeError:
+ global_attr_fill = None
+ fill_value = attrs.pop('_FillValue', global_attr_fill)
+
+ fill_out = self._nan_for_dtype(data_arr.dtype)
+ if fill_value is not None:
+ data_arr = data_arr.where(data_arr != fill_value, fill_out)
+ return data_arr, attrs
+
+ def get_dataset(self, ds_id, ds_info):
+ """Get datasets."""
+ if 'dependencies' in ds_info.keys():
+ idx = ds_info['channel_index']
+ data = self['BT']
+ data = data.rename(new_name_or_name_dict=ds_info["name"])
+
+ if self.sensor.lower() != "atms":
+ LOG.info("Limb Correction will not be applied to non-ATMS BTs")
+ data = data[:, :, idx]
+ else:
+ sfc_type_mask = self['Sfc_type']
+ data = limb_correct_atms_bt(data, sfc_type_mask,
+ self._get_coeff_filenames,
+ ds_info)
+ self.nc = self.nc.merge(data)
+ else:
+ data = self[ds_id['name']]
+
+ data.attrs = self.get_metadata(ds_info)
+ return data
+
+ def _available_if_this_file_type(self, configured_datasets):
+ for is_avail, ds_info in (configured_datasets or []):
+ if is_avail is not None:
+ # some other file handler said it has this dataset
+ # we don't know any more information than the previous
+ # file handler so let's yield early
+ yield is_avail, ds_info
+ continue
+ yield self.file_type_matches(ds_info['file_type']), ds_info
+
+ def _count_channel_repeat_number(self):
+ """Count channel/polarization pair repetition."""
+ freq = self.nc.coords.get('Freq', self.nc.get('Freq'))
+ polo = self.nc['Polo']
+
+ chn_total = Counter()
+ normals = []
+ for idx, (f, p) in enumerate(zip(freq, polo)):
+ normal_f = str(int(f))
+ normal_p = 'v' if p == POLO_V else 'h'
+ chn_total[normal_f + normal_p] += 1
+ normals.append((idx, f, p, normal_f, normal_p))
+
+ return chn_total, normals
+
+ def _available_btemp_datasets(self):
+ """Create metadata for channel BTs."""
+ chn_total, normals = self._count_channel_repeat_number()
+ # keep track of current channel count for string description
+ chn_cnt = Counter()
+ for idx, _f, _p, normal_f, normal_p in normals:
+ chn_cnt[normal_f + normal_p] += 1
+ p_count = str(chn_cnt[normal_f + normal_p]
+ if chn_total[normal_f + normal_p] > 1 else '')
+
+ new_name = "btemp_{}{}{}".format(normal_f, normal_p, p_count)
+
+ desc_bt = "Channel {} Brightness Temperature at {}GHz {}{}"
+ desc_bt = desc_bt.format(idx, normal_f, normal_p, p_count)
+ ds_info = {
+ 'file_type': self.filetype_info['file_type'],
+ 'name': new_name,
+ 'description': desc_bt,
+ 'units': 'K',
+ 'channel_index': idx,
+ 'frequency': "{}GHz".format(normal_f),
+ 'polarization': normal_p,
+ 'dependencies': ('BT', 'Sfc_type'),
+ 'coordinates': ["longitude", "latitude"]
+ }
+ yield True, ds_info
+
+ def _get_ds_info_for_data_arr(self, var_name):
+ ds_info = {
+ 'file_type': self.filetype_info['file_type'],
+ 'name': var_name,
+ 'coordinates': ["longitude", "latitude"]
+ }
+
+ if var_name in ["longitude", "latitude"]:
+ ds_info['standard_name'] = var_name
+ return ds_info
+
+ def _is_2d_yx_data_array(self, data_arr):
+ has_y_dim = data_arr.dims[0] == "y"
+ has_x_dim = data_arr.dims[1] == "x"
+ return has_y_dim and has_x_dim
+
+ def _available_new_datasets(self):
+ """Metadata for available variables other than BT."""
+ possible_vars = list(self.nc.items()) + list(self.nc.coords.items())
+ for var_name, data_arr in possible_vars:
+ if data_arr.ndim != 2:
+ # we don't currently handle non-2D variables
+ continue
+ if not self._is_2d_yx_data_array(data_arr):
+ # we need 'traditional' y/x dimensions currently
+ continue
+
+ ds_info = self._get_ds_info_for_data_arr(var_name)
+ yield True, ds_info
+
+ def available_datasets(self, configured_datasets=None):
+ """Dynamically discover what variables can be loaded from this file.
+
+ See :meth:`satpy.readers.file_handlers.BaseHandler.available_datasets`
+ for more information.
+
+ """
+ yield from self._available_if_this_file_type(configured_datasets)
+ yield from self._available_new_datasets()
+ yield from self._available_btemp_datasets()
+
+ def __getitem__(self, item):
+ """Wrap around `self.nc[item]`.
+
+ Some datasets use a 32-bit float scaling factor like the 'x' and 'y'
+ variables which causes inaccurate unscaled data values. This method
+ forces the scale factor to a 64-bit float first.
+
+ """
+ data = self.nc[item]
+ attrs = data.attrs.copy()
+ data, attrs = self._scale_data(data, attrs)
+ data, attrs = self._fill_data(data, attrs)
+
+ # 'Freq' dimension causes issues in other processing
+ if 'Freq' in data.coords:
+ data = data.drop_vars('Freq')
+
+ return data
diff --git a/satpy/tests/reader_tests/test_mirs.py b/satpy/tests/reader_tests/test_mirs.py
new file mode 100644
index 0000000000..bca09d3f90
--- /dev/null
+++ b/satpy/tests/reader_tests/test_mirs.py
@@ -0,0 +1,282 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2019 Satpy developers
+#
+# This file is part of Satpy.
+#
+# Satpy is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, either version 3 of the License, or (at your option) any later
+# version.
+#
+# Satpy is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# Satpy. If not, see .
+"""Module for testing the satpy.readers.tropomi_l2 module."""
+
+import os
+from unittest import mock
+import pytest
+from datetime import datetime
+import numpy as np
+import xarray as xr
+
+AWIPS_FILE = "IMG_SX.M2.D17037.S1601.E1607.B0000001.WE.HR.ORB.nc"
+NPP_MIRS_L2_SWATH = "NPR-MIRS-IMG_v11r6_npp_s201702061601000_e201702061607000_c202012201658410.nc"
+OTHER_MIRS_L2_SWATH = "NPR-MIRS-IMG_v11r4_gpm_s201702061601000_e201702061607000_c202010080001310.nc"
+
+EXAMPLE_FILES = [AWIPS_FILE, NPP_MIRS_L2_SWATH, OTHER_MIRS_L2_SWATH]
+
+N_CHANNEL = 3
+N_FOV = 96
+N_SCANLINE = 100
+DEFAULT_FILE_DTYPE = np.float64
+DEFAULT_2D_SHAPE = (N_SCANLINE, N_FOV)
+DEFAULT_DATE = datetime(2019, 6, 19, 13, 0)
+DEFAULT_LAT = np.linspace(23.09356, 36.42844, N_SCANLINE * N_FOV,
+ dtype=DEFAULT_FILE_DTYPE)
+DEFAULT_LON = np.linspace(127.6879, 144.5284, N_SCANLINE * N_FOV,
+ dtype=DEFAULT_FILE_DTYPE)
+FREQ = xr.DataArray([88, 88, 22], dims='Channel',
+ attrs={'description': "Central Frequencies (GHz)"})
+POLO = xr.DataArray([2, 2, 3], dims='Channel',
+ attrs={'description': "Polarizations"})
+
+DS_IDS = ['RR', 'longitude', 'latitude']
+TEST_VARS = ['btemp_88v1', 'btemp_88v2',
+ 'btemp_22h', 'RR', 'Sfc_type']
+PLATFORM = {"M2": "metop-a", "NPP": "npp", "GPM": "gpm"}
+SENSOR = {"m2": "amsu-mhs", "npp": "atms", "gpm": "GPI"}
+
+START_TIME = datetime(2017, 2, 6, 16, 1, 0)
+END_TIME = datetime(2017, 2, 6, 16, 7, 0)
+
+
+def fake_coeff_from_fn(fn):
+ """Create Fake Coefficients."""
+ ameans = np.random.uniform(261, 267, N_CHANNEL)
+ all_nchx = np.linspace(2, 3, N_CHANNEL, dtype=np.int32)
+
+ coeff_str = []
+ for idx in range(1, N_CHANNEL):
+ nx = idx - 1
+ coeff_str.append('\n')
+ next_line = ' {} {} {}\n'.format(idx, all_nchx[nx], ameans[nx])
+ coeff_str.append(next_line)
+ for fov in range(1, N_FOV+1):
+ random_coeff = np.random.rand(all_nchx[nx])
+ str_coeff = ' '.join([str(x) for x in random_coeff])
+ random_means = np.random.uniform(261, 267, all_nchx[nx])
+ str_means = ' '.join([str(x) for x in random_means])
+ error_val = np.random.uniform(0, 4)
+ coeffs_line = ' {:>2} {:>2} {} {} {}\n'.format(idx, fov,
+ str_coeff,
+ str_means,
+ error_val)
+ coeff_str.append(coeffs_line)
+
+ return coeff_str
+
+
+def _get_datasets_with_attributes():
+ """Represent files with two resolution of variables in them (ex. OCEAN)."""
+ bt = xr.DataArray(np.linspace(1830, 3930, N_SCANLINE * N_FOV * N_CHANNEL).
+ reshape(N_SCANLINE, N_FOV, N_CHANNEL),
+ attrs={'long_name': "Channel Temperature (K)",
+ 'units': "Kelvin",
+ 'coordinates': "Longitude Latitude Freq",
+ 'scale_factor': 0.01,
+ '_FillValue': -999,
+ 'valid_range': [0, 50000]})
+ rr = xr.DataArray(np.random.randint(100, 500, size=(N_SCANLINE, N_FOV)),
+ attrs={'long_name': "Rain Rate (mm/hr)",
+ 'units': "mm/hr",
+ 'coordinates': "Longitude Latitude",
+ 'scale_factor': 0.1,
+ '_FillValue': -999,
+ 'valid_range': [0, 1000]},
+ dims=('Scanline', 'Field_of_view'))
+ sfc_type = xr.DataArray(np.random.randint(0, 4, size=(N_SCANLINE, N_FOV)),
+ attrs={'description': "type of surface:0-ocean," +
+ "1-sea ice,2-land,3-snow",
+ 'units': "1",
+ 'coordinates': "Longitude Latitude",
+ '_FillValue': -999,
+ 'valid_range': [0, 3]
+ },
+ dims=('Scanline', 'Field_of_view'))
+ latitude = xr.DataArray(DEFAULT_LAT.reshape(DEFAULT_2D_SHAPE),
+ attrs={'long_name':
+ "Latitude of the view (-90,90)"},
+ dims=('Scanline', 'Field_of_view'))
+ longitude = xr.DataArray(DEFAULT_LON.reshape(DEFAULT_2D_SHAPE),
+ attrs={'long_name':
+ "Longitude of the view (-180,180)"},
+ dims=('Scanline', 'Field_of_view'))
+
+ ds_vars = {
+ 'Freq': FREQ,
+ 'Polo': POLO,
+ 'BT': bt,
+ 'RR': rr,
+ 'Sfc_type': sfc_type,
+ 'Latitude': latitude,
+ 'Longitude': longitude
+ }
+
+ attrs = {'missing_value': -999.}
+ ds = xr.Dataset(ds_vars, attrs=attrs)
+
+ return ds
+
+
+def _get_datasets_with_less_attributes():
+ """Represent files with two resolution of variables in them (ex. OCEAN)."""
+ bt = xr.DataArray(np.linspace(1830, 3930, N_SCANLINE * N_FOV * N_CHANNEL).
+ reshape(N_SCANLINE, N_FOV, N_CHANNEL),
+ attrs={'long_name': "Channel Temperature (K)",
+ 'scale_factor': 0.01},
+ dims=('Scanline', 'Field_of_view', 'Channel'))
+ rr = xr.DataArray(np.random.randint(100, 500, size=(N_SCANLINE, N_FOV)),
+ attrs={'long_name': "Rain Rate (mm/hr)",
+ 'scale_factor': 0.1},
+ dims=('Scanline', 'Field_of_view'))
+
+ sfc_type = xr.DataArray(np.random.randint(0, 4, size=(N_SCANLINE, N_FOV)),
+ attrs={'description': "type of surface:0-ocean," +
+ "1-sea ice,2-land,3-snow"},
+ dims=('Scanline', 'Field_of_view'))
+ latitude = xr.DataArray(DEFAULT_LAT.reshape(DEFAULT_2D_SHAPE),
+ attrs={'long_name':
+ "Latitude of the view (-90,90)"},
+ dims=('Scanline', 'Field_of_view'))
+ longitude = xr.DataArray(DEFAULT_LON.reshape(DEFAULT_2D_SHAPE),
+ attrs={"long_name":
+ "Longitude of the view (-180,180)"},
+ dims=('Scanline', 'Field_of_view'))
+
+ ds_vars = {
+ 'Freq': FREQ,
+ 'Polo': POLO,
+ 'BT': bt,
+ 'RR': rr,
+ 'Sfc_type': sfc_type,
+ 'Longitude': longitude,
+ 'Latitude': latitude
+ }
+
+ attrs = {'missing_value': -999.}
+ ds = xr.Dataset(ds_vars, attrs=attrs)
+
+ return ds
+
+
+def fake_open_dataset(filename, **kwargs):
+ """Create a Dataset similar to reading an actual file with xarray.open_dataset."""
+ if filename == AWIPS_FILE:
+ return _get_datasets_with_less_attributes()
+ return _get_datasets_with_attributes()
+
+
+class TestMirsL2_NcReader:
+ """Test mirs Reader."""
+
+ yaml_file = "mirs.yaml"
+
+ def setup_method(self):
+ """Read fake data."""
+ from satpy._config import config_search_paths
+ self.reader_configs = config_search_paths(os.path.join('readers', self.yaml_file))
+
+ @pytest.mark.parametrize(
+ ("filenames", "expected_loadables"),
+ [
+ ([AWIPS_FILE], 1),
+ ([NPP_MIRS_L2_SWATH], 1),
+ ([OTHER_MIRS_L2_SWATH], 1),
+ ]
+ )
+ def test_reader_creation(self, filenames, expected_loadables):
+ """Test basic initialization."""
+ from satpy.readers import load_reader
+ with mock.patch('satpy.readers.mirs.xr.open_dataset') as od:
+ od.side_effect = fake_open_dataset
+ r = load_reader(self.reader_configs)
+ loadables = r.select_files_from_pathnames(filenames)
+ assert len(loadables) == expected_loadables
+ r.create_filehandlers(loadables)
+ # make sure we have some files
+ assert r.file_handlers
+
+ @pytest.mark.parametrize(
+ ("filenames", "expected_datasets"),
+ [
+ ([AWIPS_FILE], DS_IDS),
+ ([NPP_MIRS_L2_SWATH], DS_IDS),
+ ([OTHER_MIRS_L2_SWATH], DS_IDS),
+ ]
+ )
+ def test_available_datasets(self, filenames, expected_datasets):
+ """Test that variables are dynamically discovered."""
+ from satpy.readers import load_reader
+ with mock.patch('satpy.readers.mirs.xr.open_dataset') as od:
+ od.side_effect = fake_open_dataset
+ r = load_reader(self.reader_configs)
+ loadables = r.select_files_from_pathnames(filenames)
+ r.create_filehandlers(loadables)
+ avails = list(r.available_dataset_names)
+ for var_name in expected_datasets:
+ assert var_name in avails
+
+ @staticmethod
+ def _check_area(data_arr):
+ from pyresample.geometry import SwathDefinition
+ area = data_arr.attrs['area']
+ assert isinstance(area, SwathDefinition)
+
+ @staticmethod
+ def _check_fill(data_arr):
+ assert '_FillValue' not in data_arr.attrs
+ if np.issubdtype(data_arr.dtype, np.floating):
+ # we started with float32, it should stay that way
+ assert data_arr.dtype.type == np.float64
+
+ @staticmethod
+ def _check_attrs(data_arr, platform_name):
+ attrs = data_arr.attrs
+ assert 'scale_factor' not in attrs
+ assert 'platform_name' in attrs
+ assert attrs['platform_name'] == platform_name
+ assert attrs['start_time'] == START_TIME
+ assert attrs['end_time'] == END_TIME
+
+ @pytest.mark.parametrize(
+ ("filenames", "loadable_ids", "platform_name"),
+ [
+ ([AWIPS_FILE], TEST_VARS, "metop-a"),
+ ([NPP_MIRS_L2_SWATH], TEST_VARS, "npp"),
+ ([OTHER_MIRS_L2_SWATH], TEST_VARS, "gpm"),
+ ]
+ )
+ def test_basic_load(self, filenames, loadable_ids, platform_name):
+ """Test that variables are loaded properly."""
+ from satpy.readers import load_reader
+ with mock.patch('satpy.readers.mirs.xr.open_dataset') as od:
+ od.side_effect = fake_open_dataset
+ r = load_reader(self.reader_configs)
+ loadables = r.select_files_from_pathnames(filenames)
+ r.create_filehandlers(loadables)
+ with mock.patch('satpy.readers.mirs.read_atms_coeff_to_string') as \
+ fd, mock.patch('satpy.readers.mirs.retrieve'):
+ fd.side_effect = fake_coeff_from_fn
+ loaded_data_arrs = r.load(loadable_ids)
+ assert loaded_data_arrs
+ for data_id, data_arr in loaded_data_arrs.items():
+ if data_id['name'] not in ['latitude', 'longitude']:
+ self._check_area(data_arr)
+ self._check_fill(data_arr)
+ self._check_attrs(data_arr, platform_name)