Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Fix] Fix doctest #821

Merged
merged 11 commits into from
Mar 26, 2024
Merged
4 changes: 3 additions & 1 deletion ppsci/arch/amgnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -579,7 +579,9 @@ class AMGNet(nn.Layer):

Examples:
>>> import ppsci
>>> model = ppsci.arch.AMGNet(("input", ), ("pred", ), 5, 3, 64, 2)
>>> model = ppsci.arch.AMGNet(
... ("input", ), ("pred", ), 5, 3, 64, 2, "sum", 6, "norm",
... )
"""

def __init__(
Expand Down
2 changes: 1 addition & 1 deletion ppsci/arch/chip_deeponets.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ class ChipDeepONets(base.Arch):

Examples:
>>> import ppsci
>>> model = ppsci.arch.ChipDeepONet(
>>> model = ppsci.arch.ChipDeepONets(
... ('u',),
... ('bc',),
... ('bc_data',),
Expand Down
8 changes: 5 additions & 3 deletions ppsci/arch/epnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,12 +65,12 @@ class Epnn(base.Arch):

Examples:
>>> import ppsci
>>> ann_node_sizes_state = [1]
>>> ann_node_sizes_state = [1, 20]
>>> model = ppsci.arch.Epnn(
... ("x",),
... ("y",),
... node_sizes=ann_node_sizes_state,
... activations=("leaky_relu"),
... activations=("leaky_relu",),
... drop_p=0.0,
... )
"""
Expand All @@ -84,7 +84,9 @@ def __init__(
drop_p: float,
):
super().__init__()
self.active_func = [act_mod.get_activation(i) for i in activations]
self.active_func = [
act_mod.get_activation(act_name) for act_name in activations
]
self.node_sizes = node_sizes
self.drop_p = drop_p
self.layers = []
Expand Down
2 changes: 1 addition & 1 deletion ppsci/arch/he_deeponets.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ class HEDeepONets(base.Arch):

Examples:
>>> import ppsci
>>> model = ppsci.arch.HEDeepONet(
>>> model = ppsci.arch.HEDeepONets(
... ('qm_h',),
... ('qm_c',),
... ("x",'t'),
Expand Down
2 changes: 1 addition & 1 deletion ppsci/constraint/boundary_constraint.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ class BoundaryConstraint(base.Constraint):
... },
... ppsci.loss.MSELoss("mean"),
... name="BC",
... )
... ) # doctest: +SKIP
"""

def __init__(
Expand Down
2 changes: 1 addition & 1 deletion ppsci/constraint/initial_constraint.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ class InitialConstraint(base.Constraint):
... },
... ppsci.loss.MSELoss("mean"),
... name="IC",
... )
... ) # doctest: +SKIP
"""

def __init__(
Expand Down
2 changes: 1 addition & 1 deletion ppsci/constraint/integral_constraint.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ class IntegralConstraint(base.Constraint):
... },
... ppsci.loss.MSELoss("mean"),
... name="IgC",
... )
... ) # doctest: +SKIP
"""

def __init__(
Expand Down
2 changes: 1 addition & 1 deletion ppsci/constraint/interior_constraint.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ class InteriorConstraint(base.Constraint):
... },
... ppsci.loss.MSELoss("mean"),
... name="EQ",
... )
... ) # doctest: +SKIP
"""

def __init__(
Expand Down
6 changes: 3 additions & 3 deletions ppsci/data/dataset/era5_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,10 +175,10 @@ class ERA5SampledDataset(io.Dataset):
... "label_keys": ("output",),
... ) # doctest: +SKIP
>>> # get the length of the dataset
>>> dataset_size = len(dataset)
>>> dataset_size = len(dataset) # doctest: +SKIP
>>> # get the first sample of the data
>>> first_sample = dataset[0]
>>> print("First sample:", first_sample)
>>> first_sample = dataset[0] # doctest: +SKIP
>>> print("First sample:", first_sample) # doctest: +SKIP
"""

def __init__(
Expand Down
6 changes: 3 additions & 3 deletions ppsci/data/dataset/mrms_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,10 +187,10 @@ class MRMSSampledDataset(io.Dataset):
... "num_total_timestamps": 29,
... ) # doctest: +SKIP
>>> # get the length of the dataset
>>> dataset_size = len(dataset)
>>> dataset_size = len(dataset) # doctest: +SKIP
>>> # get the first sample of the data
>>> first_sample = dataset[0]
>>> print("First sample:", first_sample)
>>> first_sample = dataset[0] # doctest: +SKIP
>>> print("First sample:", first_sample) # doctest: +SKIP
"""

def __init__(
Expand Down
4 changes: 2 additions & 2 deletions ppsci/data/dataset/vtu_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,15 +39,15 @@ class VtuDataset(io.Dataset):
transform(s).

Examples:
>>> from ppsci.dataset import VtuDataset
>>> from ppsci.data.dataset import VtuDataset

>>> dataset = VtuDataset(file_path='example.vtu') # doctest: +SKIP

>>> # get the length of the dataset
>>> dataset_size = len(dataset) # doctest: +SKIP
>>> # get the first sample of the data
>>> first_sample = dataset[0] # doctest: +SKIP
>>> print("First sample:", first_sample)
>>> print("First sample:", first_sample) # doctest: +SKIP
"""

# Whether support batch indexing for speeding up fetching process.
Expand Down
21 changes: 12 additions & 9 deletions ppsci/data/process/transform/preprocess.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,18 +32,20 @@ class Translate:
Examples:
>>> import ppsci
>>> import numpy as np
>>> translate = ppsci.data.transform.Translate({"x": 1.0, "y": -1.0})
>>> input_data = np.array([[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]])
>>> input_dict = {"x": input_data[:,:,0], "y": input_data[:,:,1]}

>>> input_dict = {"x": np.array([5.0, 10.0]), "y": np.array([20.0, 40.0])}
>>> label_dict = {"x": np.array([1.0, 2.0]), "y": np.array([3.0, 4.0])}
>>> weight_dict = {"x": np.array([10.0, 20.0]), "y": np.array([30.0, 40.0])}

>>> translate = ppsci.data.transform.Translate({"x": 1.0, "y": -1.0})
>>> translated_input_dict, translated_label_dict, translated_weight_dict = translate(input_dict, label_dict, weight_dict)

>>> print(translated_input_dict)
{"x": array([[2., 3.], [4., 5.]]), "y": array([[0., 1.], [2., 3.]])}
{'x': array([ 6., 11.]), 'y': array([19., 39.])}
>>> print(translated_label_dict)
{"x": array([2., 3.]), "y": array([3., 4.])}
{'x': array([1., 2.]), 'y': array([3., 4.])}
>>> print(translated_weight_dict)
{"x": array([10., 20.]), "y": array([30., 40.])}
{'x': array([10., 20.]), 'y': array([30., 40.])}
"""

def __init__(self, offset: Dict[str, float]):
Expand Down Expand Up @@ -293,23 +295,24 @@ class FunctionalTransform:
>>> # The function will perform some transformations on the data in data_dict, convert all labels in label_dict to uppercase,
>>> # and modify the weights in weight_dict by dividing each weight by 10.
>>> # Finally, it returns the transformed data, labels, and weights as a tuple.
>>> import ppsci
>>> def transform_func(data_dict, label_dict, weight_dict):
... for key in data_dict:
... data_dict[key] = data_dict[key] * 2
... for key in label_dict:
... label_dict[key] = label_dict[key].upper()
... label_dict[key] = label_dict[key] + 1.0
... for key in weight_dict:
... weight_dict[key] = weight_dict[key] / 10
... return data_dict, label_dict, weight_dict
>>> transform = ppsci.data.transform.FunctionalTransform(transform_func)
>>> # Define some sample data, labels, and weights
>>> data = {'feature1': np.array([1, 2, 3]), 'feature2': np.array([4, 5, 6])}
>>> label = {'class': 'class1', 'instance': 'instance1'}
>>> label = {'class': 0.0, 'instance': 0.1}
>>> weight = {'weight1': 0.5, 'weight2': 0.5}
>>> # Apply the transform function to the data, labels, and weights using the FunctionalTransform instance
>>> transformed_data = transform(data, label, weight)
>>> print(transformed_data)
({'feature1': [2, 4, 6], 'feature2': [8, 10, 12]}, {'class': 'CLASS1', 'instance': 'INSTANCE1'}, {'weight1': 0.5, 'weight2': 0.5})
({'feature1': array([2, 4, 6]), 'feature2': array([ 8, 10, 12])}, {'class': 1.0, 'instance': 1.1}, {'weight1': 0.05, 'weight2': 0.05})
"""

def __init__(
Expand Down
4 changes: 3 additions & 1 deletion ppsci/equation/fpde/fractional_poisson.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,9 @@ class FractionalPoisson(PDE):

Examples:
>>> import ppsci
>>> fpde = ppsci.equation.FractionalPoisson(ALPHA, geom["disk"], [8, 100])
>>> geom_disk = ppsci.geometry.Disk([0, 0], 1)
>>> ALPHA = 0.5
>>> fpde = ppsci.equation.FractionalPoisson(ALPHA, geom_disk, [8, 100])
"""
dtype = paddle.get_default_dtype()

Expand Down
26 changes: 13 additions & 13 deletions ppsci/experimental/math_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -427,34 +427,34 @@ def trapezoid_integrate(
>>> res = ppsci.experimental.trapezoid_integrate(y)
>>> print(res)
Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
[2., 8.])
[2., 8.])
>>> res = ppsci.experimental.trapezoid_integrate(y, mode="cumsum")
>>> print(res)
Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
[[0.50000000, 2. ],
[3.50000000, 8. ]])
[[0.50000000, 2. ],
[3.50000000, 8. ]])
>>> res = ppsci.experimental.trapezoid_integrate(
y, x=paddle.to_tensor([[0, 1, 2], [3, 4, 5]], dtype="float32")
)
... y, x=paddle.to_tensor([[0, 1, 2], [3, 4, 5]], dtype="float32")
... )
>>> print(res)
Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
[2., 8.])
[2., 8.])
>>> res = ppsci.experimental.trapezoid_integrate(
y, x=paddle.to_tensor([0, 1], dtype="float32"), axis=0
)
... y, x=paddle.to_tensor([0, 1], dtype="float32"), axis=0
... )
>>> print(res)
Tensor(shape=[3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
[1.50000000, 2.50000000, 3.50000000])
[1.50000000, 2.50000000, 3.50000000])
>>> res = ppsci.experimental.trapezoid_integrate(
y, x=paddle.to_tensor([0, 1, 2], dtype="float32"), axis=1
)
... y, x=paddle.to_tensor([0, 1, 2], dtype="float32"), axis=1
... )
>>> print(res)
Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
[2., 8.])
[2., 8.])
>>> res = ppsci.experimental.trapezoid_integrate(y, dx=2)
>>> print(res)
Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
[4. , 16.])
[4. , 16.])
"""
if mode == "sum":
return paddle.trapezoid(y, x, dx, axis)
Expand Down
34 changes: 17 additions & 17 deletions ppsci/geometry/mesh.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,11 +75,11 @@ def from_pymesh(cls, mesh: "pymesh.Mesh") -> "Mesh":

Examples:
>>> import ppsci
>>> import pymesh
>>> import numpy as np
>>> box = pymesh.generate_box_mesh(np.array([0, 0, 0]), np.array([1, 1, 1]))
>>> mesh = ppsci.geometry.Mesh.from_pymesh(box)
>>> print(mesh.vertices)
>>> import pymesh # doctest: +SKIP
>>> import numpy as np # doctest: +SKIP
>>> box = pymesh.generate_box_mesh(np.array([0, 0, 0]), np.array([1, 1, 1])) # doctest: +SKIP
>>> mesh = ppsci.geometry.Mesh.from_pymesh(box) # doctest: +SKIP
>>> print(mesh.vertices) # doctest: +SKIP
[[0. 0. 0.]
[1. 0. 0.]
[1. 1. 0.]
Expand Down Expand Up @@ -201,11 +201,11 @@ def translate(self, translation: np.ndarray, relative: bool = True) -> "Mesh":

Examples:
>>> import ppsci
>>> import pymesh
>>> import pymesh # doctest: +SKIP
>>> import numpy as np
>>> box = pymesh.generate_box_mesh(np.array([0, 0, 0]), np.array([1, 1, 1]))
>>> mesh = ppsci.geometry.Mesh(box)
>>> print(mesh.vertices)
>>> box = pymesh.generate_box_mesh(np.array([0, 0, 0]), np.array([1, 1, 1])) # doctest: +SKIP
>>> mesh = ppsci.geometry.Mesh(box) # doctest: +SKIP
>>> print(mesh.vertices) # doctest: +SKIP
[[0. 0. 0.]
[1. 0. 0.]
[1. 1. 0.]
Expand All @@ -214,7 +214,7 @@ def translate(self, translation: np.ndarray, relative: bool = True) -> "Mesh":
[1. 0. 1.]
[1. 1. 1.]
[0. 1. 1.]]
>>> print(mesh.translate((-0.5, 0, 0.5), False).vertices) # the center is moved to the translation vector.
>>> print(mesh.translate((-0.5, 0, 0.5), False).vertices) # the center is moved to the translation vector. # doctest: +SKIP
[[-1. -0.5 0. ]
[ 0. -0.5 0. ]
[ 0. 0.5 0. ]
Expand All @@ -223,7 +223,7 @@ def translate(self, translation: np.ndarray, relative: bool = True) -> "Mesh":
[ 0. -0.5 1. ]
[ 0. 0.5 1. ]
[-1. 0.5 1. ]]
>>> print(mesh.translate((-0.5, 0, 0.5), True).vertices) # the translation vector is directly added to the geometry coordinates
>>> print(mesh.translate((-0.5, 0, 0.5), True).vertices) # the translation vector is directly added to the geometry coordinates # doctest: +SKIP
[[-0.5 0. 0.5]
[ 0.5 0. 0.5]
[ 0.5 1. 0.5]
Expand Down Expand Up @@ -274,11 +274,11 @@ def scale(

Examples:
>>> import ppsci
>>> import pymesh
>>> import pymesh # doctest: +SKIP
>>> import numpy as np
>>> box = pymesh.generate_box_mesh(np.array([0, 0, 0]), np.array([1, 1, 1]))
>>> mesh = ppsci.geometry.Mesh(box)
>>> print(mesh.vertices)
>>> box = pymesh.generate_box_mesh(np.array([0, 0, 0]), np.array([1, 1, 1])) # doctest: +SKIP
>>> mesh = ppsci.geometry.Mesh(box) # doctest: +SKIP
>>> print(mesh.vertices) # doctest: +SKIP
[[0. 0. 0.]
[1. 0. 0.]
[1. 1. 0.]
Expand All @@ -287,8 +287,8 @@ def scale(
[1. 0. 1.]
[1. 1. 1.]
[0. 1. 1.]]
>>> mesh = mesh.scale(2, (0.25, 0.5, 0.75))
>>> print(mesh.vertices)
>>> mesh = mesh.scale(2, (0.25, 0.5, 0.75)) # doctest: +SKIP
>>> print(mesh.vertices) # doctest: +SKIP
[[-0.25 -0.5 -0.75]
[ 1.75 -0.5 -0.75]
[ 1.75 1.5 -0.75]
Expand Down
6 changes: 3 additions & 3 deletions ppsci/solver/solver.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ class Solver:
... },
... ppsci.loss.MSELoss("mean"),
... name="EQ",
... )
... ) # doctest: +SKIP
>>> solver = ppsci.solver.Solver(
... model,
... {"EQ": pde_constraint},
Expand Down Expand Up @@ -598,8 +598,8 @@ def predict(
>>> input_dict = {'x': paddle.rand([32, 1]),
... 'y': paddle.rand([32, 1])}
>>> pred = solver.predict(input_dict) # doctest: +SKIP
>>> for k, v in pred.items():
... print(k, v.shape)
>>> for k, v in pred.items(): # doctest: +SKIP
... print(k, v.shape) # doctest: +SKIP
u [32, 1]
v [32, 1]
"""
Expand Down
3 changes: 3 additions & 0 deletions ppsci/utils/misc.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,9 +212,12 @@ class Timer(ContextDecorator):

>>> timer = misc.Timer("cost_of_func", auto_print=False)
>>> timer.start()
>>> def func():
... w = sum(range(0, 10))
>>> func()
>>> timer.end()
>>> print(f"time cost of 'cost_of_func' is {timer.interval:.2f}")
time cost of 'cost_of_func' is 0.00
"""

interval: float # Time cost for code within Timer context
Expand Down
4 changes: 2 additions & 2 deletions ppsci/utils/writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ def save_csv_file(
... use_header=True,
... delimiter=",",
... encoding="utf-8",
... )
... ) # doctest: +SKIP

>>> # == test.csv ==
>>> # A,B
Expand Down Expand Up @@ -150,7 +150,7 @@ def save_tecplot_file(
... num_y=3,
... alias_dict={"X": "x", "Y": "y"},
... num_timestamps=2,
... )
... ) # doctest: +SKIP
>>> # == test_t-0.dat ==
>>> # title = "./test_t-0.dat"
>>> # variables = "X", "Y"
Expand Down
1 change: 1 addition & 0 deletions ppsci/visualize/radar.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ class VisualizerRadar(base.Visualizer):

Examples:
>>> import ppsci
>>> import paddle
>>> frames_tensor = paddle.randn([1, 29, 512, 512, 2])
>>> visualizer = ppsci.visualize.VisualizerRadar(
... {"input": frames_tensor},
Expand Down