diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 023519c268..b70c3ad5d1 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -78,6 +78,7 @@ jobs: install-jax: [0] install-torch: [0] part: + - "--doctest-modules --ignore=pytensor/misc/check_duplicate_key.py pytensor --ignore=pytensor/link" - "tests --ignore=tests/tensor --ignore=tests/scan --ignore=tests/sparse" - "tests/scan" - "tests/sparse" @@ -96,6 +97,10 @@ jobs: part: "tests/tensor/test_math.py" - fast-compile: 1 float32: 1 + - part: "--doctest-modules --ignore=pytensor/misc/check_duplicate_key.py pytensor --ignore=pytensor/link" + float32: 1 + - part: "--doctest-modules --ignore=pytensor/misc/check_duplicate_key.py pytensor --ignore=pytensor/link" + fast-compile: 1 include: - install-numba: 1 python-version: "3.10" @@ -149,7 +154,7 @@ jobs: shell: micromamba-shell {0} run: | - micromamba install --yes -q "python~=${PYTHON_VERSION}=*_cpython" mkl numpy scipy pip mkl-service graphviz cython pytest coverage pytest-cov pytest-benchmark pytest-mock sympy + micromamba install --yes -q "python~=${PYTHON_VERSION}=*_cpython" mkl numpy scipy pip mkl-service graphviz cython pytest coverage pytest-cov pytest-benchmark pytest-mock pytest-sphinx if [[ $INSTALL_NUMBA == "1" ]]; then micromamba install --yes -q -c conda-forge "python~=${PYTHON_VERSION}=*_cpython" "numba>=0.57"; fi if [[ $INSTALL_JAX == "1" ]]; then micromamba install --yes -q -c conda-forge "python~=${PYTHON_VERSION}=*_cpython" jax jaxlib numpyro && pip install tensorflow-probability; fi if [[ $INSTALL_TORCH == "1" ]]; then micromamba install --yes -q -c conda-forge "python~=${PYTHON_VERSION}=*_cpython" pytorch pytorch-cuda=12.1 -c pytorch -c nvidia; fi diff --git a/environment.yml b/environment.yml index 54d6913fba..5cc37e038c 100644 --- a/environment.yml +++ b/environment.yml @@ -34,6 +34,8 @@ dependencies: - pytest-xdist - pytest-benchmark - pytest-mock + - pip: + - pytest-sphinx # For building docs - sphinx>=5.1.0,<6 - sphinx_rtd_theme diff --git a/pyproject.toml b/pyproject.toml index 828efe0497..90baf2d697 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -75,6 +75,7 @@ tests = [ "coverage>=5.1", "pytest-benchmark", "pytest-mock", + "pytest-sphinx", ] rtd = ["sphinx>=5.1.0,<6", "pygments", "pydot", "pydot2", "pydot-ng"] jax = ["jax", "jaxlib"] diff --git a/pytensor/gradient.py b/pytensor/gradient.py index 0ca6856eb1..5ca38f1c58 100644 --- a/pytensor/gradient.py +++ b/pytensor/gradient.py @@ -2243,7 +2243,7 @@ def grad_clip(x, lower_bound, upper_bound): >>> z2 = pytensor.gradient.grad(x**2, x) >>> f = pytensor.function([x], outputs = [z, z2]) >>> print(f(2.0)) - [array(1.0), array(4.0)] + [array(1.), array(4.)] Notes ----- diff --git a/pytensor/graph/basic.py b/pytensor/graph/basic.py index f71c591473..0e3e07bba7 100644 --- a/pytensor/graph/basic.py +++ b/pytensor/graph/basic.py @@ -1034,7 +1034,10 @@ def orphans_between( Examples -------- - >>> orphans_between([x], [(x+y).out]) + >>> from pytensor.graph.basic import orphans_between + >>> from pytensor.tensor import scalars + >>> x, y = scalars("xy") + >>> list(orphans_between([x], [(x+y)])) [y] """ diff --git a/pytensor/misc/pkl_utils.py b/pytensor/misc/pkl_utils.py index ae3549f1cb..a866b1053c 100644 --- a/pytensor/misc/pkl_utils.py +++ b/pytensor/misc/pkl_utils.py @@ -239,7 +239,7 @@ def dump( >>> foo_2 = pytensor.shared(1, name='foo') >>> with open('model.zip', 'wb') as f: ... dump((foo_1, foo_2, np.array(2)), f) - >>> np.load('model.zip').keys() + >>> list(np.load('model.zip').keys()) ['foo', 'foo_2', 'array_0', 'pkl'] >>> np.load('model.zip')['foo'] array(0) diff --git a/pytensor/scalar/basic.py b/pytensor/scalar/basic.py index 2a3db168ba..8f0d5d5621 100644 --- a/pytensor/scalar/basic.py +++ b/pytensor/scalar/basic.py @@ -208,6 +208,7 @@ class autocast_float_as: Examples -------- + >>> from pytensor.tensor import fvector >>> with autocast_float_as('float32'): ... assert (fvector() + 1.1).dtype == 'float32' # temporary downcasting >>> assert (fvector() + 1.1).dtype == 'float64' # back to default behaviour diff --git a/pytensor/sparse/basic.py b/pytensor/sparse/basic.py index 957b96037e..67fe9bc559 100644 --- a/pytensor/sparse/basic.py +++ b/pytensor/sparse/basic.py @@ -4326,13 +4326,8 @@ def block_diag(*matrices: TensorVariable, format: Literal["csc", "csr"] = "csc") result_sparse = block_diag(A, B, format='csr', name='X') print(result_sparse) - >>> SparseVariable{csr,int32} - print(result_sparse.toarray().eval()) - >>> array([[1, 2, 0, 0], - >>> [3, 4, 0, 0], - >>> [0, 0, 5, 6], - >>> [0, 0, 7, 8]]) + """ if len(matrices) == 1: return matrices diff --git a/pytensor/tensor/basic.py b/pytensor/tensor/basic.py index 135433a0ab..aa8fb8844f 100644 --- a/pytensor/tensor/basic.py +++ b/pytensor/tensor/basic.py @@ -1112,23 +1112,24 @@ def tril(m, k=0): Examples -------- - >>> at.tril(np.arange(1,13).reshape(4,3), -1).eval() + >>> import pytensor.tensor as pt + >>> pt.tril(pt.arange(1,13).reshape((4,3)), -1).eval() array([[ 0, 0, 0], [ 4, 0, 0], [ 7, 8, 0], [10, 11, 12]]) - >>> at.tril(np.arange(3*4*5).reshape(3, 4, 5)).eval() + >>> pt.tril(pt.arange(3*4*5).reshape((3, 4, 5))).eval() array([[[ 0, 0, 0, 0, 0], [ 5, 6, 0, 0, 0], [10, 11, 12, 0, 0], [15, 16, 17, 18, 0]], - + [[20, 0, 0, 0, 0], [25, 26, 0, 0, 0], [30, 31, 32, 0, 0], [35, 36, 37, 38, 0]], - + [[40, 0, 0, 0, 0], [45, 46, 0, 0, 0], [50, 51, 52, 0, 0], @@ -1154,23 +1155,24 @@ def triu(m, k=0): Examples -------- - >>> at.triu(np.arange(1,13).reshape(4,3), -1).eval() + >>> import pytensor.tensor as pt + >>> pt.triu(pt.arange(1, 13).reshape((4, 3)), -1).eval() array([[ 1, 2, 3], [ 4, 5, 6], [ 0, 8, 9], [ 0, 0, 12]]) - >>> at.triu(np.arange(3*4*5).reshape(3, 4, 5)).eval() + >>> pt.triu(np.arange(3*4*5).reshape((3, 4, 5))).eval() array([[[ 0, 1, 2, 3, 4], [ 0, 6, 7, 8, 9], [ 0, 0, 12, 13, 14], [ 0, 0, 0, 18, 19]], - + [[20, 21, 22, 23, 24], [ 0, 26, 27, 28, 29], [ 0, 0, 32, 33, 34], [ 0, 0, 0, 38, 39]], - + [[40, 41, 42, 43, 44], [ 0, 46, 47, 48, 49], [ 0, 0, 52, 53, 54], @@ -2029,28 +2031,14 @@ def matrix_transpose(x: "TensorLike") -> TensorVariable: Examples -------- - >>> import pytensor as pt - >>> import numpy as np - >>> x = np.arange(24).reshape((2, 3, 4)) - [[[ 0 1 2 3] - [ 4 5 6 7] - [ 8 9 10 11]] - - [[12 13 14 15] - [16 17 18 19] - [20 21 22 23]]] + >>> import pytensor.tensor as pt + >>> x = pt.arange(24).reshape((2, 3, 4)) + >>> x.type.shape + (2, 3, 4) + >>> pt.matrix_transpose(x).type.shape + (2, 4, 3) - >>> pt.matrix_transpose(x).eval() - [[[ 0 4 8] - [ 1 5 9] - [ 2 6 10] - [ 3 7 11]] - - [[12 16 20] - [13 17 21] - [14 18 22] - [15 19 23]]] Notes @@ -2077,15 +2065,21 @@ class Split(COp): Examples -------- - >>> x = vector() - >>> splits = lvector() + >>> from pytensor import function + >>> import pytensor.tensor as pt + >>> x = pt.vector(dtype="int") + >>> splits = pt.vector(dtype="int") + You have to declare right away how many split_points there will be. - >>> ra, rb, rc = split(x, splits, n_splits = 3, axis = 0) + >>> ra, rb, rc = pt.split(x, splits, n_splits = 3, axis = 0) >>> f = function([x, splits], [ra, rb, rc]) >>> a, b, c = f([0,1,2,3,4,5], [3, 2, 1]) - a == [0,1,2] - b == [3, 4] - c == [5] + >>> a + array([0, 1, 2]) + >>> b + array([3, 4]) + >>> c + array([5]) TODO: Don't make a copy in C impl """ @@ -2334,13 +2328,22 @@ class Join(COp): Examples -------- - >>> x, y, z = tensor.matrix(), tensor.matrix(), tensor.matrix() - >>> u = tensor.vector() + >>> import pytensor.tensor as pt + >>> x, y, z = pt.matrix(), pt.matrix(), pt.matrix() + >>> u = pt.vector() + + >>> r = pt.join(0, x, y, z) + >>> c = pt.join(1, x, y, z) + + The axis has to be an index into the shape + >>> pt.join(2, x, y, z) + Traceback (most recent call last): + ValueError: Axis value 2 is out of range for the given input dimensions - >>> r = join(0, x, y, z) - >>> c = join(1, x, y, z) - >>> join(2, x, y, z) # WRONG: the axis has to be an index into the shape - >>> join(0, x, u) # WRONG: joined tensors must have the same rank + Joined tensors must have the same rank + >>> pt.join(0, x, u) + Traceback (most recent call last): + TypeError: Only tensors with the same number of dimensions can be joined. Input ndims were: [2, 1]. """ @@ -3239,28 +3242,29 @@ class _nd_grid: Examples -------- - >>> a = at.mgrid[0:5, 0:3] + >>> import pytensor.tensor as pt + >>> a = pt.mgrid[0:5, 0:3] >>> a[0].eval() array([[0, 0, 0], [1, 1, 1], [2, 2, 2], [3, 3, 3], - [4, 4, 4]], dtype=int8) + [4, 4, 4]]) >>> a[1].eval() array([[0, 1, 2], [0, 1, 2], [0, 1, 2], [0, 1, 2], - [0, 1, 2]], dtype=int8) - >>> b = at.ogrid[0:5, 0:3] + [0, 1, 2]]) + >>> b = pt.ogrid[0:5, 0:3] >>> b[0].eval() array([[0], [1], [2], [3], - [4]], dtype=int8) + [4]]) >>> b[1].eval() - array([[0, 1, 2, 3]], dtype=int8) + array([[0, 1, 2]]) """ @@ -3924,8 +3928,8 @@ def stacklists(arg): >>> X = stacklists([[a, b], [c, d]]) >>> f = function([a, b, c, d], X) >>> f(1, 2, 3, 4) - array([[ 1., 2.], - [ 3., 4.]], dtype=float32) + array([[1., 2.], + [3., 4.]]) We can also stack arbitrarily shaped tensors. Here we stack matrices into a 2 by 2 grid: diff --git a/pytensor/tensor/extra_ops.py b/pytensor/tensor/extra_ops.py index 06a82744b2..7f599772da 100644 --- a/pytensor/tensor/extra_ops.py +++ b/pytensor/tensor/extra_ops.py @@ -254,7 +254,7 @@ def searchsorted(x, v, side="left", sorter=None): -------- >>> from pytensor import tensor as pt >>> from pytensor.tensor import extra_ops - >>> x = ptb.dvector() + >>> x = pt.dvector("x") >>> idx = x.searchsorted(3) >>> idx.eval({x: [1,2,3,4,5]}) array(2) @@ -1165,12 +1165,12 @@ class Unique(Op): >>> x = pytensor.tensor.vector() >>> f = pytensor.function([x], Unique(True, True, False)(x)) >>> f([1, 2., 3, 4, 3, 2, 1.]) - [array([ 1., 2., 3., 4.]), array([0, 1, 2, 3]), array([0, 1, 2, 3, 2, 1, 0])] + [array([1., 2., 3., 4.]), array([0, 1, 2, 3]), array([0, 1, 2, 3, 2, 1, 0])] >>> y = pytensor.tensor.matrix() >>> g = pytensor.function([y], Unique(True, True, False)(y)) >>> g([[1, 1, 1.0], (2, 3, 3.0)]) - [array([ 1., 2., 3.]), array([0, 3, 4]), array([0, 0, 0, 1, 2, 2])] + [array([1., 2., 3.]), array([0, 3, 4]), array([0, 0, 0, 1, 2, 2])] """ diff --git a/pytensor/tensor/math.py b/pytensor/tensor/math.py index d515d51c3a..bc2425478f 100644 --- a/pytensor/tensor/math.py +++ b/pytensor/tensor/math.py @@ -826,31 +826,31 @@ def isclose(a, b, rtol=1.0e-5, atol=1.0e-8, equal_nan=False): >>> a = _asarray([1e10, 1e-7], dtype="float64") >>> b = _asarray([1.00001e10, 1e-8], dtype="float64") >>> pytensor.tensor.isclose(a, b).eval() - array([1, 0], dtype=int8) + array([ True, False]) >>> a = _asarray([1e10, 1e-8], dtype="float64") >>> b = _asarray([1.00001e10, 1e-9], dtype="float64") >>> pytensor.tensor.isclose(a, b).eval() - array([1, 1], dtype=int8) + array([ True, True]) >>> a = _asarray([1e10, 1e-8], dtype="float64") >>> b = _asarray([1.0001e10, 1e-9], dtype="float64") >>> pytensor.tensor.isclose(a, b).eval() - array([0, 1], dtype=int8) + array([False, True]) >>> a = _asarray([1.0, np.nan], dtype="float64") >>> b = _asarray([1.0, np.nan], dtype="float64") >>> pytensor.tensor.isclose(a, b).eval() - array([1, 0], dtype==int8) + array([ True, False]) >>> a = _asarray([1.0, np.nan], dtype="float64") >>> b = _asarray([1.0, np.nan], dtype="float64") >>> pytensor.tensor.isclose(a, b, equal_nan=True).eval() - array([1, 1], dtype==int8) + array([ True, True]) >>> a = _asarray([1.0, np.inf], dtype="float64") >>> b = _asarray([1.0, -np.inf], dtype="float64") >>> pytensor.tensor.isclose(a, b).eval() - array([1, 0], dtype==int8) + array([ True, False]) >>> a = _asarray([1.0, np.inf], dtype="float64") >>> b = _asarray([1.0, np.inf], dtype="float64") >>> pytensor.tensor.isclose(a, b).eval() - array([1, 1], dtype==int8) + array([ True, True]) """ # close will be an int8 array of 1 where within tolerance @@ -2212,7 +2212,7 @@ def tensordot( ... cloop[i,j,k] += a[i,l,m] * b[j,k,m,l] >>> np.allclose(c, cloop) - true + True This specific implementation avoids a loop by transposing a and b such that the summed axes of ``a`` are last and the summed axes of ``b`` are first. The @@ -2224,11 +2224,11 @@ def tensordot( >>> c = np.tensordot(a, b, 0) >>> print(a.shape) - (2,3,4) + (2, 3, 4) >>> print(b.shape) - (5,6,4,3) + (5, 6, 4, 3) >>> print(c.shape) - (2,3,4,5,6,4,3) + (2, 3, 4, 5, 6, 4, 3) See the documentation of numpy.tensordot for more examples. diff --git a/pytensor/tensor/random/utils.py b/pytensor/tensor/random/utils.py index b77580e515..2bbc8a79f1 100644 --- a/pytensor/tensor/random/utils.py +++ b/pytensor/tensor/random/utils.py @@ -78,14 +78,18 @@ def broadcast_params(params, ndims_params): >>> mean = np.array([1, 2, 3]) >>> cov = np.stack([np.eye(3), np.eye(3)]) >>> params = [mean, cov] - >>> res = broadcast_params(params, ndims_params) - [array([[1, 2, 3]]), + >>> mean_bcast, cov_bcast = broadcast_params(params, ndims_params) + >>> mean_bcast + array([[1, 2, 3], + [1, 2, 3]]) + >>> cov_bcast array([[[1., 0., 0.], - [0., 1., 0.], - [0., 0., 1.]], - [[1., 0., 0.], - [0., 1., 0.], - [0., 0., 1.]]])] + [0., 1., 0.], + [0., 0., 1.]], + + [[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]]) Parameters ========== diff --git a/pytensor/tensor/rewriting/math.py b/pytensor/tensor/rewriting/math.py index 630ba87900..00a281a36c 100644 --- a/pytensor/tensor/rewriting/math.py +++ b/pytensor/tensor/rewriting/math.py @@ -789,9 +789,9 @@ class AlgebraicCanonizer(NodeRewriter): -------- >>> import pytensor.tensor as pt >>> from pytensor.tensor.rewriting.math import AlgebraicCanonizer - >>> add_canonizer = AlgebraicCanonizer(add, sub, neg, \\ + >>> add_canonizer = AlgebraicCanonizer(add, sub, neg, \ ... lambda n, d: sum(n) - sum(d)) - >>> mul_canonizer = AlgebraicCanonizer(mul, true_div, inv, \\ + >>> mul_canonizer = AlgebraicCanonizer(mul, true_div, reciprocal, \ ... lambda n, d: prod(n) / prod(d)) Examples of rewrites `mul_canonizer` can perform: diff --git a/pytensor/tensor/shape.py b/pytensor/tensor/shape.py index 66cab27dc6..8b7584c6cd 100644 --- a/pytensor/tensor/shape.py +++ b/pytensor/tensor/shape.py @@ -926,13 +926,13 @@ def shape_padaxis(t, axis): -------- >>> tensor = pytensor.tensor.type.tensor3() >>> pytensor.tensor.shape_padaxis(tensor, axis=0) - DimShuffle{x,0,1,2}.0 + ExpandDims{axis=0}.0 >>> pytensor.tensor.shape_padaxis(tensor, axis=1) - DimShuffle{0,x,1,2}.0 + ExpandDims{axis=1}.0 >>> pytensor.tensor.shape_padaxis(tensor, axis=3) - DimShuffle{0,1,2,x}.0 + ExpandDims{axis=3}.0 >>> pytensor.tensor.shape_padaxis(tensor, axis=-1) - DimShuffle{0,1,2,x}.0 + ExpandDims{axis=3}.0 See Also -------- diff --git a/pytensor/tensor/slinalg.py b/pytensor/tensor/slinalg.py index 4f72c0263a..05fba32462 100644 --- a/pytensor/tensor/slinalg.py +++ b/pytensor/tensor/slinalg.py @@ -963,10 +963,10 @@ def block_diag(*matrices: TensorVariable): result = block_diagonal(A, B, name='X') print(result.eval()) - >>> Out: array([[1, 2, 0, 0], - >>> [3, 4, 0, 0], - >>> [0, 0, 5, 6], - >>> [0, 0, 7, 8]]) + Out: array([[1, 2, 0, 0], + [3, 4, 0, 0], + [0, 0, 5, 6], + [0, 0, 7, 8]]) """ _block_diagonal_matrix = Blockwise(BlockDiagonal(n_inputs=len(matrices))) return _block_diagonal_matrix(*matrices) diff --git a/pytensor/tensor/subtensor.py b/pytensor/tensor/subtensor.py index f0f5555499..428e07e3af 100644 --- a/pytensor/tensor/subtensor.py +++ b/pytensor/tensor/subtensor.py @@ -755,13 +755,18 @@ def get_constant_idx( Examples -------- Example usage where `v` and `a` are appropriately typed PyTensor variables : + >>> from pytensor.scalar import int64 + >>> from pytensor.tensor import matrix + >>> v = int64("v") + >>> a = matrix("a") >>> b = a[v, 1:3] >>> b.owner.op.idx_list (ScalarType(int64), slice(ScalarType(int64), ScalarType(int64), None)) >>> get_constant_idx(b.owner.op.idx_list, b.owner.inputs, allow_partial=True) [v, slice(1, 3, None)] >>> get_constant_idx(b.owner.op.idx_list, b.owner.inputs) - NotScalarConstantError: v + Traceback (most recent call last): + pytensor.tensor.exceptions.NotScalarConstantError """ real_idx = get_idx_list(inputs, idx_list) @@ -1412,8 +1417,8 @@ def set_subtensor(x, y, inplace=False, tolerate_inplace_aliasing=False): Examples -------- To replicate the numpy expression "r[10:] = 5", type - - >>> r = ivector() + >>> from pytensor.tensor import vector + >>> r = vector("r") >>> new_r = set_subtensor(r[10:], 5) """ diff --git a/pytensor/tensor/utils.py b/pytensor/tensor/utils.py index 41218981a0..b8ae1e780b 100644 --- a/pytensor/tensor/utils.py +++ b/pytensor/tensor/utils.py @@ -54,8 +54,9 @@ def shape_of_variables(fgraph, input_shapes): Examples -------- - >>> import pytensor - >>> x = pytensor.tensor.matrix('x') + >>> import pytensor.tensor as pt + >>> from pytensor.graph.fg import FunctionGraph + >>> x = pt.matrix('x') >>> y = x[512:]; y.name = 'y' >>> fgraph = FunctionGraph([x], [y], clone=False) >>> d = shape_of_variables(fgraph, {x: (1024, 1024)}) diff --git a/pytensor/tensor/variable.py b/pytensor/tensor/variable.py index ca66689d2f..bd3014205e 100644 --- a/pytensor/tensor/variable.py +++ b/pytensor/tensor/variable.py @@ -840,7 +840,8 @@ def set(self, y, **kwargs): >>> >>> x = pt.ones((3,)) >>> out = x[1].set(2) - >>> out.eval() # array([1., 2., 1.]) + >>> out.eval() + array([1., 2., 1.]) """ return pt.subtensor.set_subtensor(self, y, **kwargs) @@ -861,7 +862,8 @@ def inc(self, y, **kwargs): >>> >>> x = pt.ones((3,)) >>> out = x[1].inc(2) - >>> out.eval() # array([1., 3., 1.]) + >>> out.eval() + array([1., 3., 1.]) """ return pt.inc_subtensor(self, y, **kwargs)