Skip to content
forked from pydata/xarray

Commit

Permalink
Merge remote-tracking branch 'upstream/master' into fix/dask-computes
Browse files Browse the repository at this point in the history
* upstream/master:
  upgrade black verison to 19.10b0 (pydata#3456)
  Remove outdated code related to compatibility with netcdftime (pydata#3450)
  • Loading branch information
dcherian committed Oct 29, 2019
2 parents 53c0f4e + 278d2e6 commit 08f7f74
Show file tree
Hide file tree
Showing 23 changed files with 132 additions and 231 deletions.
5 changes: 5 additions & 0 deletions doc/whats-new.rst
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,11 @@ Breaking changes
~~~~~~~~~~~~~~~~

- Minimum cftime version is now 1.0.3. By `Deepak Cherian <https://github.com/dcherian>`_.
- All leftover support for dates from non-standard calendars through netcdftime, the
module included in versions of netCDF4 prior to 1.4 that eventually became the
cftime package, has been removed in favor of relying solely on the standalone
cftime package (:pull:`3450`). By `Spencer Clark
<https://github.com/spencerkclark>`_.

New Features
~~~~~~~~~~~~
Expand Down
2 changes: 1 addition & 1 deletion xarray/backends/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -677,7 +677,7 @@ def open_dataarray(
"then select the variable you want."
)
else:
data_array, = dataset.data_vars.values()
(data_array,) = dataset.data_vars.values()

data_array._file_obj = dataset._file_obj

Expand Down
43 changes: 5 additions & 38 deletions xarray/coding/times.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,34 +39,6 @@
)


def _import_cftime():
"""
helper function handle the transition to netcdftime/cftime
as a stand-alone package
"""
try:
import cftime
except ImportError:
# in netCDF4 the num2date/date2num function are top-level api
try:
import netCDF4 as cftime
except ImportError:
raise ImportError("Failed to import cftime")
return cftime


def _require_standalone_cftime():
"""Raises an ImportError if the standalone cftime is not found"""
try:
import cftime # noqa: F401
except ImportError:
raise ImportError(
"Decoding times with non-standard calendars "
"or outside the pandas.Timestamp-valid range "
"requires the standalone cftime package."
)


def _netcdf_to_numpy_timeunit(units):
units = units.lower()
if not units.endswith("s"):
Expand Down Expand Up @@ -119,16 +91,11 @@ def _decode_cf_datetime_dtype(data, units, calendar, use_cftime):


def _decode_datetime_with_cftime(num_dates, units, calendar):
cftime = _import_cftime()
import cftime

if cftime.__name__ == "cftime":
return np.asarray(
cftime.num2date(num_dates, units, calendar, only_use_cftime_datetimes=True)
)
else:
# Must be using num2date from an old version of netCDF4 which
# does not have the only_use_cftime_datetimes option.
return np.asarray(cftime.num2date(num_dates, units, calendar))
return np.asarray(
cftime.num2date(num_dates, units, calendar, only_use_cftime_datetimes=True)
)


def _decode_datetime_with_pandas(flat_num_dates, units, calendar):
Expand Down Expand Up @@ -354,7 +321,7 @@ def _encode_datetime_with_cftime(dates, units, calendar):
This method is more flexible than xarray's parsing using datetime64[ns]
arrays but also slower because it loops over each element.
"""
cftime = _import_cftime()
import cftime

if np.issubdtype(dates.dtype, np.datetime64):
# numpy's broken datetime conversion only works for us precision
Expand Down
2 changes: 1 addition & 1 deletion xarray/core/alignment.py
Original file line number Diff line number Diff line change
Expand Up @@ -252,7 +252,7 @@ def align(

if not indexes and len(objects) == 1:
# fast path for the trivial case
obj, = objects
(obj,) = objects
return (obj.copy(deep=copy),)

all_indexes = defaultdict(list)
Expand Down
2 changes: 1 addition & 1 deletion xarray/core/combine.py
Original file line number Diff line number Diff line change
Expand Up @@ -954,7 +954,7 @@ def _auto_concat(
"supply the ``concat_dim`` argument "
"explicitly"
)
dim, = concat_dims
(dim,) = concat_dims
return concat(
datasets,
dim=dim,
Expand Down
8 changes: 4 additions & 4 deletions xarray/core/computation.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ def result_name(objects: list) -> Any:
names = {getattr(obj, "name", _DEFAULT_NAME) for obj in objects}
names.discard(_DEFAULT_NAME)
if len(names) == 1:
name, = names
(name,) = names
else:
name = None
return name
Expand Down Expand Up @@ -187,7 +187,7 @@ def build_output_coords(

if len(coords_list) == 1 and not exclude_dims:
# we can skip the expensive merge
unpacked_coords, = coords_list
(unpacked_coords,) = coords_list
merged_vars = dict(unpacked_coords.variables)
else:
# TODO: save these merged indexes, instead of re-computing them later
Expand Down Expand Up @@ -237,7 +237,7 @@ def apply_dataarray_vfunc(
for variable, coords in zip(result_var, result_coords)
)
else:
coords, = result_coords
(coords,) = result_coords
out = DataArray(result_var, coords, name=name, fastpath=True)

return out
Expand Down Expand Up @@ -384,7 +384,7 @@ def apply_dataset_vfunc(
if signature.num_outputs > 1:
out = tuple(_fast_dataset(*args) for args in zip(result_vars, list_of_coords))
else:
coord_vars, = list_of_coords
(coord_vars,) = list_of_coords
out = _fast_dataset(result_vars, coord_vars)

if keep_attrs and isinstance(first_obj, Dataset):
Expand Down
4 changes: 2 additions & 2 deletions xarray/core/concat.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,10 +149,10 @@ def _calc_concat_dim_coord(dim):
dim = dim_name
elif not isinstance(dim, DataArray):
coord = as_variable(dim).to_index_variable()
dim, = coord.dims
(dim,) = coord.dims
else:
coord = dim
dim, = coord.dims
(dim,) = coord.dims
return dim, coord


Expand Down
2 changes: 1 addition & 1 deletion xarray/core/dataarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -616,7 +616,7 @@ def _level_coords(self) -> Dict[Hashable, Hashable]:
if var.ndim == 1 and isinstance(var, IndexVariable):
level_names = var.level_names
if level_names is not None:
dim, = var.dims
(dim,) = var.dims
level_coords.update({lname: dim for lname in level_names})
return level_coords

Expand Down
2 changes: 1 addition & 1 deletion xarray/core/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -4066,7 +4066,7 @@ def reduce(
if len(reduce_dims) == 1:
# unpack dimensions for the benefit of functions
# like np.argmin which can't handle tuple arguments
reduce_dims, = reduce_dims
(reduce_dims,) = reduce_dims
elif len(reduce_dims) == var.ndim:
# prefer to aggregate over axis=None rather than
# axis=(0, 1) if they will be equivalent, because
Expand Down
6 changes: 3 additions & 3 deletions xarray/core/groupby.py
Original file line number Diff line number Diff line change
Expand Up @@ -321,7 +321,7 @@ def __init__(
raise ValueError("`group` must have a name")

group, obj, stacked_dim, inserted_dims = _ensure_1d(group, obj)
group_dim, = group.dims
(group_dim,) = group.dims

expected_size = obj.sizes[group_dim]
if group.size != expected_size:
Expand Down Expand Up @@ -470,7 +470,7 @@ def _infer_concat_args(self, applied_example):
else:
coord = self._unique_coord
positions = None
dim, = coord.dims
(dim,) = coord.dims
if isinstance(coord, _DummyGroup):
coord = None
return coord, dim, positions
Expand Down Expand Up @@ -644,7 +644,7 @@ def _concat_shortcut(self, applied, dim, positions=None):
def _restore_dim_order(self, stacked):
def lookup_order(dimension):
if dimension == self._group.name:
dimension, = self._group.dims
(dimension,) = self._group.dims
if dimension in self._obj.dims:
axis = self._obj.get_axis_num(dimension)
else:
Expand Down
4 changes: 2 additions & 2 deletions xarray/core/indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ def get_dim_indexers(data_obj, indexers):
level_indexers = defaultdict(dict)
dim_indexers = {}
for key, label in indexers.items():
dim, = data_obj[key].dims
(dim,) = data_obj[key].dims
if key != dim:
# assume here multi-index level indexer
level_indexers[dim][key] = label
Expand Down Expand Up @@ -1368,7 +1368,7 @@ def __getitem__(
if isinstance(key, tuple) and len(key) == 1:
# unpack key so it can index a pandas.Index object (pandas.Index
# objects don't like tuples)
key, = key
(key,) = key

if getattr(key, "ndim", 0) > 1: # Return np-array if multidimensional
return NumpyIndexingAdapter(self.array.values)[indexer]
Expand Down
4 changes: 2 additions & 2 deletions xarray/core/merge.py
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,7 @@ def append_all(variables, indexes):


def collect_from_coordinates(
list_of_coords: "List[Coordinates]"
list_of_coords: "List[Coordinates]",
) -> Dict[Hashable, List[MergeElement]]:
"""Collect variables and indexes to be merged from Coordinate objects."""
grouped: Dict[Hashable, List[Tuple[Variable, pd.Index]]] = {}
Expand Down Expand Up @@ -329,7 +329,7 @@ def merge_coordinates_without_align(


def determine_coords(
list_of_mappings: Iterable["DatasetLike"]
list_of_mappings: Iterable["DatasetLike"],
) -> Tuple[Set[Hashable], Set[Hashable]]:
"""Given a list of dicts with xarray object values, identify coordinates.
Expand Down
4 changes: 2 additions & 2 deletions xarray/core/variable.py
Original file line number Diff line number Diff line change
Expand Up @@ -1528,7 +1528,7 @@ def concat(cls, variables, dim="concat_dim", positions=None, shortcut=False):
along the given dimension.
"""
if not isinstance(dim, str):
dim, = dim.dims
(dim,) = dim.dims

# can't do this lazily: we need to loop through variables at least
# twice
Expand Down Expand Up @@ -2000,7 +2000,7 @@ def concat(cls, variables, dim="concat_dim", positions=None, shortcut=False):
arrays, if possible.
"""
if not isinstance(dim, str):
dim, = dim.dims
(dim,) = dim.dims

variables = list(variables)
first_var = variables[0]
Expand Down
8 changes: 4 additions & 4 deletions xarray/plot/plot.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,8 @@ def _infer_line_data(darray, x, y, hue):
)

else:
xdim, = darray[xname].dims
huedim, = darray[huename].dims
(xdim,) = darray[xname].dims
(huedim,) = darray[huename].dims
yplt = darray.transpose(xdim, huedim)

else:
Expand All @@ -102,8 +102,8 @@ def _infer_line_data(darray, x, y, hue):
)

else:
ydim, = darray[yname].dims
huedim, = darray[huename].dims
(ydim,) = darray[yname].dims
(huedim,) = darray[huename].dims
xplt = darray.transpose(ydim, huedim)

huelabel = label_from_attrs(darray[huename])
Expand Down
4 changes: 0 additions & 4 deletions xarray/tests/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,10 +78,6 @@ def LooseVersion(vstring):
requires_scipy_or_netCDF4 = pytest.mark.skipif(
not has_scipy_or_netCDF4, reason="requires scipy or netCDF4"
)
has_cftime_or_netCDF4 = has_cftime or has_netCDF4
requires_cftime_or_netCDF4 = pytest.mark.skipif(
not has_cftime_or_netCDF4, reason="requires cftime or netCDF4"
)
try:
import_seaborn()
has_seaborn = True
Expand Down
30 changes: 8 additions & 22 deletions xarray/tests/test_accessor_dt.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,8 @@
from . import (
assert_array_equal,
assert_equal,
has_cftime,
has_cftime_or_netCDF4,
has_dask,
raises_regex,
requires_cftime,
requires_dask,
)

Expand Down Expand Up @@ -199,7 +197,7 @@ def times_3d(times):
)


@pytest.mark.skipif(not has_cftime, reason="cftime not installed")
@requires_cftime
@pytest.mark.parametrize(
"field", ["year", "month", "day", "hour", "dayofyear", "dayofweek"]
)
Expand All @@ -217,7 +215,7 @@ def test_field_access(data, field):
assert_equal(result, expected)


@pytest.mark.skipif(not has_cftime, reason="cftime not installed")
@requires_cftime
def test_cftime_strftime_access(data):
""" compare cftime formatting against datetime formatting """
date_format = "%Y%m%d%H"
Expand All @@ -232,8 +230,8 @@ def test_cftime_strftime_access(data):
assert_equal(result, expected)


@pytest.mark.skipif(not has_dask, reason="dask not installed")
@pytest.mark.skipif(not has_cftime, reason="cftime not installed")
@requires_cftime
@requires_dask
@pytest.mark.parametrize(
"field", ["year", "month", "day", "hour", "dayofyear", "dayofweek"]
)
Expand All @@ -254,8 +252,8 @@ def test_dask_field_access_1d(data, field):
assert_equal(result.compute(), expected)


@pytest.mark.skipif(not has_dask, reason="dask not installed")
@pytest.mark.skipif(not has_cftime, reason="cftime not installed")
@requires_cftime
@requires_dask
@pytest.mark.parametrize(
"field", ["year", "month", "day", "hour", "dayofyear", "dayofweek"]
)
Expand Down Expand Up @@ -286,7 +284,7 @@ def cftime_date_type(calendar):
return _all_cftime_date_types()[calendar]


@pytest.mark.skipif(not has_cftime, reason="cftime not installed")
@requires_cftime
def test_seasons(cftime_date_type):
dates = np.array([cftime_date_type(2000, month, 15) for month in range(1, 13)])
dates = xr.DataArray(dates)
Expand All @@ -307,15 +305,3 @@ def test_seasons(cftime_date_type):
seasons = xr.DataArray(seasons)

assert_array_equal(seasons.values, dates.dt.season.values)


@pytest.mark.skipif(not has_cftime_or_netCDF4, reason="cftime or netCDF4 not installed")
def test_dt_accessor_error_netCDF4(cftime_date_type):
da = xr.DataArray(
[cftime_date_type(1, 1, 1), cftime_date_type(2, 1, 1)], dims=["time"]
)
if not has_cftime:
with pytest.raises(TypeError):
da.dt.month
else:
da.dt.month
2 changes: 1 addition & 1 deletion xarray/tests/test_cftime_offsets.py
Original file line number Diff line number Diff line change
Expand Up @@ -1187,5 +1187,5 @@ def test_dayofyear_after_cftime_range(freq):
def test_cftime_range_standard_calendar_refers_to_gregorian():
from cftime import DatetimeGregorian

result, = cftime_range("2000", periods=1)
(result,) = cftime_range("2000", periods=1)
assert isinstance(result, DatetimeGregorian)
10 changes: 3 additions & 7 deletions xarray/tests/test_cftimeindex.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
)
from xarray.tests import assert_array_equal, assert_identical

from . import has_cftime, has_cftime_or_netCDF4, raises_regex, requires_cftime
from . import raises_regex, requires_cftime
from .test_coding_times import (
_ALL_CALENDARS,
_NON_STANDARD_CALENDARS,
Expand Down Expand Up @@ -653,7 +653,7 @@ def test_indexing_in_dataframe_iloc(df, index):
assert result.equals(expected)


@pytest.mark.skipif(not has_cftime_or_netCDF4, reason="cftime not installed")
@requires_cftime
def test_concat_cftimeindex(date_type):
da1 = xr.DataArray(
[1.0, 2.0], coords=[[date_type(1, 1, 1), date_type(1, 2, 1)]], dims=["time"]
Expand All @@ -663,11 +663,7 @@ def test_concat_cftimeindex(date_type):
)
da = xr.concat([da1, da2], dim="time")

if has_cftime:
assert isinstance(da.indexes["time"], CFTimeIndex)
else:
assert isinstance(da.indexes["time"], pd.Index)
assert not isinstance(da.indexes["time"], CFTimeIndex)
assert isinstance(da.indexes["time"], CFTimeIndex)


@requires_cftime
Expand Down
Loading

0 comments on commit 08f7f74

Please sign in to comment.