Skip to content

Commit

Permalink
Enable ruff to format code in docstrings
Browse files Browse the repository at this point in the history
  • Loading branch information
Armavica authored and twiecki committed Sep 17, 2024
1 parent 86c8a00 commit ffb72e4
Show file tree
Hide file tree
Showing 19 changed files with 213 additions and 144 deletions.
3 changes: 3 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,9 @@ testpaths = "tests/"
line-length = 88
exclude = ["doc/", "pytensor/_version.py"]

[tool.ruff.format]
docstring-code-format = true

[tool.ruff.lint]
select = ["C", "E", "F", "I", "UP", "W", "RUF", "PERF", "PTH", "ISC"]
ignore = ["C408", "C901", "E501", "E741", "RUF012", "PERF203", "ISC001"]
Expand Down
17 changes: 11 additions & 6 deletions pytensor/compile/builders.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,8 @@ class OpFromGraph(Op, HasInnerGraph):
from pytensor import function, tensor as pt
from pytensor.compile.builders import OpFromGraph
x, y, z = pt.scalars('xyz')
x, y, z = pt.scalars("xyz")
e = x + y * z
op = OpFromGraph([x, y, z], [e])
# op behaves like a normal pytensor op
Expand All @@ -206,7 +207,7 @@ class OpFromGraph(Op, HasInnerGraph):
from pytensor import config, function, tensor as pt
from pytensor.compile.builders import OpFromGraph
x, y, z = pt.scalars('xyz')
x, y, z = pt.scalars("xyz")
s = pytensor.shared(np.random.random((2, 2)).astype(config.floatX))
e = x + y * z + s
op = OpFromGraph([x, y, z], [e])
Expand All @@ -221,12 +222,16 @@ class OpFromGraph(Op, HasInnerGraph):
from pytensor import function, tensor as pt, grad
from pytensor.compile.builders import OpFromGraph
x, y, z = pt.scalars('xyz')
x, y, z = pt.scalars("xyz")
e = x + y * z
def rescale_dy(inps, outputs, out_grads):
x, y, z = inps
g, = out_grads
return z*2
(g,) = out_grads
return z * 2
op = OpFromGraph(
[x, y, z],
[e],
Expand All @@ -236,7 +241,7 @@ def rescale_dy(inps, outputs, out_grads):
dx, dy, dz = grad(e2, [x, y, z])
fn = function([x, y, z], [dx, dy, dz])
# the gradient wrt y is now doubled
fn(2., 3., 4.) # [1., 8., 3.]
fn(2.0, 3.0, 4.0) # [1., 8., 3.]
"""

Expand Down
31 changes: 16 additions & 15 deletions pytensor/gradient.py
Original file line number Diff line number Diff line change
Expand Up @@ -692,25 +692,24 @@ def subgraph_grad(wrt, end, start=None, cost=None, details=False):
.. code-block:: python
x, t = pytensor.tensor.fvector('x'), pytensor.tensor.fvector('t')
w1 = pytensor.shared(np.random.standard_normal((3,4)))
w2 = pytensor.shared(np.random.standard_normal((4,2)))
a1 = pytensor.tensor.tanh(pytensor.tensor.dot(x,w1))
a2 = pytensor.tensor.tanh(pytensor.tensor.dot(a1,w2))
x, t = pytensor.tensor.fvector("x"), pytensor.tensor.fvector("t")
w1 = pytensor.shared(np.random.standard_normal((3, 4)))
w2 = pytensor.shared(np.random.standard_normal((4, 2)))
a1 = pytensor.tensor.tanh(pytensor.tensor.dot(x, w1))
a2 = pytensor.tensor.tanh(pytensor.tensor.dot(a1, w2))
cost2 = pytensor.tensor.sqr(a2 - t).sum()
cost2 += pytensor.tensor.sqr(w2.sum())
cost1 = pytensor.tensor.sqr(w1.sum())
params = [[w2],[w1]]
costs = [cost2,cost1]
params = [[w2], [w1]]
costs = [cost2, cost1]
grad_ends = [[a1], [x]]
next_grad = None
param_grads = []
for i in range(2):
param_grad, next_grad = pytensor.subgraph_grad(
wrt=params[i], end=grad_ends[i],
start=next_grad, cost=costs[i]
wrt=params[i], end=grad_ends[i], start=next_grad, cost=costs[i]
)
next_grad = dict(zip(grad_ends[i], next_grad))
param_grads.extend(param_grad)
Expand Down Expand Up @@ -1704,9 +1703,11 @@ def verify_grad(
Examples
--------
>>> verify_grad(pytensor.tensor.tanh,
... (np.asarray([[2, 3, 4], [-1, 3.3, 9.9]]),),
... rng=np.random.default_rng(23098))
>>> verify_grad(
... pytensor.tensor.tanh,
... (np.asarray([[2, 3, 4], [-1, 3.3, 9.9]]),),
... rng=np.random.default_rng(23098),
... )
Parameters
----------
Expand Down Expand Up @@ -2342,9 +2343,9 @@ def grad_clip(x, lower_bound, upper_bound):
Examples
--------
>>> x = pytensor.tensor.type.scalar()
>>> z = pytensor.gradient.grad(grad_clip(x, -1, 1)**2, x)
>>> z = pytensor.gradient.grad(grad_clip(x, -1, 1) ** 2, x)
>>> z2 = pytensor.gradient.grad(x**2, x)
>>> f = pytensor.function([x], outputs = [z, z2])
>>> f = pytensor.function([x], outputs=[z, z2])
>>> print(f(2.0))
[array(1.), array(4.)]
Expand Down Expand Up @@ -2383,7 +2384,7 @@ def grad_scale(x, multiplier):
>>> fprime = pytensor.function([x], fp)
>>> print(fprime(2)) # doctest: +ELLIPSIS
-0.416...
>>> f_inverse=grad_scale(fx, -1.)
>>> f_inverse = grad_scale(fx, -1.0)
>>> fpp = pytensor.grad(f_inverse, wrt=x)
>>> fpprime = pytensor.function([x], fpp)
>>> print(fpprime(2)) # doctest: +ELLIPSIS
Expand Down
32 changes: 19 additions & 13 deletions pytensor/graph/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -399,18 +399,24 @@ class Variable(Node, Generic[_TypeType, OptionalApplyType]):
import pytensor
import pytensor.tensor as pt
a = pt.constant(1.5) # declare a symbolic constant
b = pt.fscalar() # declare a symbolic floating-point scalar
a = pt.constant(1.5) # declare a symbolic constant
b = pt.fscalar() # declare a symbolic floating-point scalar
c = a + b # create a simple expression
c = a + b # create a simple expression
f = pytensor.function([b], [c]) # this works because a has a value associated with it already
f = pytensor.function(
[b], [c]
) # this works because a has a value associated with it already
assert 4.0 == f(2.5) # bind 2.5 to an internal copy of b and evaluate an internal c
assert 4.0 == f(2.5) # bind 2.5 to an internal copy of b and evaluate an internal c
pytensor.function([a], [c]) # compilation error because b (required by c) is undefined
pytensor.function(
[a], [c]
) # compilation error because b (required by c) is undefined
pytensor.function([a,b], [c]) # compilation error because a is constant, it can't be an input
pytensor.function(
[a, b], [c]
) # compilation error because a is constant, it can't be an input
The python variables ``a, b, c`` all refer to instances of type
Expand Down Expand Up @@ -587,10 +593,10 @@ def eval(
>>> import numpy as np
>>> import pytensor.tensor as pt
>>> x = pt.dscalar('x')
>>> y = pt.dscalar('y')
>>> x = pt.dscalar("x")
>>> y = pt.dscalar("y")
>>> z = x + y
>>> np.allclose(z.eval({x : 16.3, y : 12.1}), 28.4)
>>> np.allclose(z.eval({x: 16.3, y: 12.1}), 28.4)
True
We passed :meth:`eval` a dictionary mapping symbolic PyTensor
Expand Down Expand Up @@ -963,9 +969,9 @@ def explicit_graph_inputs(
import pytensor.tensor as pt
from pytensor.graph.basic import explicit_graph_inputs
x = pt.vector('x')
x = pt.vector("x")
y = pt.constant(2)
z = pt.mul(x*y)
z = pt.mul(x * y)
inputs = list(explicit_graph_inputs(z))
f = pytensor.function(inputs, z)
Expand Down Expand Up @@ -1041,7 +1047,7 @@ def orphans_between(
>>> from pytensor.graph.basic import orphans_between
>>> from pytensor.tensor import scalars
>>> x, y = scalars("xy")
>>> list(orphans_between([x], [(x+y)]))
>>> list(orphans_between([x], [(x + y)]))
[y]
"""
Expand Down
6 changes: 3 additions & 3 deletions pytensor/link/c/interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def c_headers(self, **kwargs) -> list[str]:
.. code-block:: python
def c_headers(self, **kwargs):
return ['<iostream>', '<math.h>', '/full/path/to/header.h']
return ["<iostream>", "<math.h>", "/full/path/to/header.h"]
"""
Expand All @@ -54,7 +54,7 @@ def c_header_dirs(self, **kwargs) -> list[str]:
.. code-block:: python
def c_header_dirs(self, **kwargs):
return ['/usr/local/include', '/opt/weirdpath/src/include']
return ["/usr/local/include", "/opt/weirdpath/src/include"]
"""
return []
Expand Down Expand Up @@ -134,7 +134,7 @@ def c_compile_args(self, **kwargs) -> list[str]:
.. code-block:: python
def c_compile_args(self, **kwargs):
return ['-ffast-math']
return ["-ffast-math"]
"""
return []
Expand Down
83 changes: 52 additions & 31 deletions pytensor/link/c/params_type.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,9 @@
.. code-block:: python
params_type = ParamsType(attr1=TensorType('int32', shape=(None, None)), attr2=ScalarType('float64'))
params_type = ParamsType(
attr1=TensorType("int32", shape=(None, None)), attr2=ScalarType("float64")
)
If your op contains attributes ``attr1`` **and** ``attr2``, the default ``op.get_params()``
implementation will automatically try to look for it and generate an appropriate Params object.
Expand Down Expand Up @@ -77,38 +79,48 @@ def __init__(value_attr1, value_attr2):
from pytensor.link.c.params_type import ParamsType
from pytensor.link.c.type import EnumType, EnumList
wrapper = ParamsType(enum1=EnumList('CONSTANT_1', 'CONSTANT_2', 'CONSTANT_3'),
enum2=EnumType(PI=3.14, EPSILON=0.001))
wrapper = ParamsType(
enum1=EnumList("CONSTANT_1", "CONSTANT_2", "CONSTANT_3"),
enum2=EnumType(PI=3.14, EPSILON=0.001),
)
# Each enum constant is available as a wrapper attribute:
print(wrapper.CONSTANT_1, wrapper.CONSTANT_2, wrapper.CONSTANT_3,
wrapper.PI, wrapper.EPSILON)
print(
wrapper.CONSTANT_1,
wrapper.CONSTANT_2,
wrapper.CONSTANT_3,
wrapper.PI,
wrapper.EPSILON,
)
# For convenience, you can also look for a constant by name with
# ``ParamsType.get_enum()`` method.
pi = wrapper.get_enum('PI')
epsilon = wrapper.get_enum('EPSILON')
constant_2 = wrapper.get_enum('CONSTANT_2')
pi = wrapper.get_enum("PI")
epsilon = wrapper.get_enum("EPSILON")
constant_2 = wrapper.get_enum("CONSTANT_2")
print(pi, epsilon, constant_2)
This implies that a ParamsType cannot contain different enum types with common enum names::
# Following line will raise an error,
# as there is a "CONSTANT_1" defined both in enum1 and enum2.
wrapper = ParamsType(enum1=EnumList('CONSTANT_1', 'CONSTANT_2'),
enum2=EnumType(CONSTANT_1=0, CONSTANT_3=5))
wrapper = ParamsType(
enum1=EnumList("CONSTANT_1", "CONSTANT_2"),
enum2=EnumType(CONSTANT_1=0, CONSTANT_3=5),
)
If your enum types contain constant aliases, you can retrieve them from ParamsType
with ``ParamsType.enum_from_alias(alias)`` method (see :class:`pytensor.link.c.type.EnumType`
for more info about enumeration aliases).
.. code-block:: python
wrapper = ParamsType(enum1=EnumList('A', ('B', 'beta'), 'C'),
enum2=EnumList(('D', 'delta'), 'E', 'F'))
wrapper = ParamsType(
enum1=EnumList("A", ("B", "beta"), "C"), enum2=EnumList(("D", "delta"), "E", "F")
)
b1 = wrapper.B
b2 = wrapper.get_enum('B')
b3 = wrapper.enum_from_alias('beta')
b2 = wrapper.get_enum("B")
b3 = wrapper.enum_from_alias("beta")
assert b1 == b2 == b3
"""
Expand Down Expand Up @@ -236,10 +248,13 @@ class Params(dict):
from pytensor.link.c.params_type import ParamsType, Params
from pytensor.scalar import ScalarType
# You must create a ParamsType first:
params_type = ParamsType(attr1=ScalarType('int32'),
key2=ScalarType('float32'),
field3=ScalarType('int64'))
params_type = ParamsType(
attr1=ScalarType("int32"),
key2=ScalarType("float32"),
field3=ScalarType("int64"),
)
# Then you can create a Params object with
# the params type defined above and values for attributes.
params = Params(params_type, attr1=1, key2=2.0, field3=3)
Expand Down Expand Up @@ -491,11 +506,13 @@ def get_enum(self, key):
from pytensor.link.c.type import EnumType, EnumList
from pytensor.scalar import ScalarType
wrapper = ParamsType(scalar=ScalarType('int32'),
letters=EnumType(A=1, B=2, C=3),
digits=EnumList('ZERO', 'ONE', 'TWO'))
print(wrapper.get_enum('C')) # 3
print(wrapper.get_enum('TWO')) # 2
wrapper = ParamsType(
scalar=ScalarType("int32"),
letters=EnumType(A=1, B=2, C=3),
digits=EnumList("ZERO", "ONE", "TWO"),
)
print(wrapper.get_enum("C")) # 3
print(wrapper.get_enum("TWO")) # 2
# You can also directly do:
print(wrapper.C)
Expand All @@ -520,17 +537,19 @@ def enum_from_alias(self, alias):
from pytensor.link.c.type import EnumType, EnumList
from pytensor.scalar import ScalarType
wrapper = ParamsType(scalar=ScalarType('int32'),
letters=EnumType(A=(1, 'alpha'), B=(2, 'beta'), C=3),
digits=EnumList(('ZERO', 'nothing'), ('ONE', 'unit'), ('TWO', 'couple')))
print(wrapper.get_enum('C')) # 3
print(wrapper.get_enum('TWO')) # 2
print(wrapper.enum_from_alias('alpha')) # 1
print(wrapper.enum_from_alias('nothing')) # 0
wrapper = ParamsType(
scalar=ScalarType("int32"),
letters=EnumType(A=(1, "alpha"), B=(2, "beta"), C=3),
digits=EnumList(("ZERO", "nothing"), ("ONE", "unit"), ("TWO", "couple")),
)
print(wrapper.get_enum("C")) # 3
print(wrapper.get_enum("TWO")) # 2
print(wrapper.enum_from_alias("alpha")) # 1
print(wrapper.enum_from_alias("nothing")) # 0
# For the following, alias 'C' is not defined, so the method looks for
# a constant named 'C', and finds it.
print(wrapper.enum_from_alias('C')) # 3
print(wrapper.enum_from_alias("C")) # 3
.. note::
Expand Down Expand Up @@ -567,12 +586,14 @@ def get_params(self, *objects, **kwargs) -> Params:
from pytensor.tensor.type import dmatrix
from pytensor.scalar import ScalarType
class MyObject:
def __init__(self):
self.a = 10
self.b = numpy.asarray([[1, 2, 3], [4, 5, 6]])
params_type = ParamsType(a=ScalarType('int32'), b=dmatrix, c=ScalarType('bool'))
params_type = ParamsType(a=ScalarType("int32"), b=dmatrix, c=ScalarType("bool"))
o = MyObject()
value_for_c = False
Expand Down
Loading

0 comments on commit ffb72e4

Please sign in to comment.