Skip to content

Commit

Permalink
Support test_imperative parameterlist and layerdict (#38800)
Browse files Browse the repository at this point in the history
* Rearranged Eager AutoCodeGen directory structure

* Removed USE_OP in Eager AutoCodeGen

* Enabled generation for Operators without Grad/Inputs/Outputs

* Resolved operators without input

* Fixed merge conflicts

* Enabled Eager AutoCodeGen for 10+ more operators

* Refactored Eager AutoCodeGen with more organized helper objects

* Enabled Eager AutoCodeGen for operators with multiple OpBases

* Adjusted Eager AutoCodeGen to Enable Passing Output Tensor as Input Argument

* Handled Dispensable Inputs/Outputs in Eager AutoCodeGen

* Adjusted function generation/call between Python-C API & Dygraph API

* Synchronized auto-generated Python-C API with Dygraph Forward Functions

* support more eager tensor api

* fix merge compile error

* fix compile error and fit develop code

* support pure CPU

* fix some logic error in eager_mode

* support _varbase_creator in eager mode

* Added safe_initialized interface to EagerTensor for use in processing dispensable inputs

* for eager mode

* refine

* support multiple constructor for eager tensor

* add place related code

* polish code

* specific randint with dtype of int64

* Support pure cpu test

* eager logic

* refine test in pure cpu

* eager logic

* eager logic

* eager logic, test=develop

* skip core.eager when in inference, test=develop

* refine, test=develop

* refine, test=develop

* call RetainGrad after run forward kernel, test=develop

* refine, test=develop

* support dygraph util, meta, guard test

* eager test case

* support inference test

* refine test and fix initializer failed

* modify eagertensor patch method

* add eagertensor.clear_grandint, test=develop

* refine, test=develop

* refine, test=develop

* refine, test=develop

* support create varbase and fix retain grad error

* call monkey_patch_varbase in _test_eager_guard, test=develop

* fix windows error

* split clear_gradient to clear_gradient and zero_grads, test=develop

* refine, test=develop

* refine, test=develop

* support test_imperative_basic test in eager mode

* remove additional log in variable.h

* remove additional log in variable.h

* remove additional code create in merge

* eager

* fix some eager logic, test=develop

* refine, test=develop

* refine, test=develop

* refine, test=develop

* patch_tensor_method_func, test=develop

* refine, test=develop

* eager test case, test=develop

* refine, test=develop

* Support eager_guard() in container_layerdict&parameterlist

* eager, test=develop

* eager, test=develop

* eager optimizer, test=develop

* eager optimizer, test=develop

* eager test_imperative_optimizer_v2, test=develop

* eager, test=develop

* refine, test=develop

* refine, test=develop

* eager, test=develop

* add resize in share buffer to, test=develop

* eager, test=develop

* fix _share_buffer_to, test=develop

* refine, test=develop

* refine, test=develop

* support eager for dataloader,test=develop

Co-authored-by: jim19930609 <[email protected]>
Co-authored-by: JiabinYang <[email protected]>
Co-authored-by: Wang Huan <[email protected]>
Co-authored-by: wanghuancoder <[email protected]>
  • Loading branch information
5 people authored Jan 21, 2022
1 parent 29796ef commit f68ef9d
Show file tree
Hide file tree
Showing 2 changed files with 21 additions and 13 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,11 @@
import numpy as np
import paddle
from collections import OrderedDict
from paddle.fluid.framework import _test_eager_guard


class TestLayerDict(unittest.TestCase):
def test_layer_dict(self):
def func_layer_dict(self):
layers = OrderedDict([
('conv1d', paddle.nn.Conv1D(3, 2, 3)),
('conv2d', paddle.nn.Conv2D(3, 2, 3)),
Expand Down Expand Up @@ -89,7 +90,12 @@ def check_layer_dict():
layers_dicts.update(list_format_layers)
check_layer_dict()

def test_layer_dict_error_inputs(self):
def test_layer_dict(self):
with _test_eager_guard():
self.func_layer_dict()
self.func_layer_dict()

def func_layer_dict_error_inputs(self):
layers = [
('conv1d', paddle.nn.Conv1D(3, 2, 3), "conv1d"),
('conv2d', paddle.nn.Conv2D(3, 2, 3)),
Expand All @@ -100,6 +106,11 @@ def test_layer_dict_error_inputs(self):

self.assertRaises(AssertionError, layers_dicts.update, 1)

def test_layer_dict_error_inputs(self):
with _test_eager_guard():
self.func_layer_dict_error_inputs()
self.func_layer_dict_error_inputs()


if __name__ == '__main__':
unittest.main()
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand All @@ -18,6 +18,8 @@
import paddle.fluid as fluid
import numpy as np
import paddle
from paddle import _C_ops
from paddle.fluid.framework import _test_eager_guard


class MyLayer(fluid.Layer):
Expand All @@ -41,15 +43,7 @@ def paddle_imperative_ParameterList(self, num_stacked_param):

def forward(self, x):
for i, p in enumerate(self.params):
tmp = self._helper.create_variable_for_type_inference('float32')
self._helper.append_op(
type="mul",
inputs={"X": x,
"Y": p},
outputs={"Out": tmp},
attrs={"x_num_col_dims": 1,
"y_num_col_dims": 1})
x = tmp
x = _C_ops.mul(x, p)
return x


Expand Down Expand Up @@ -80,8 +74,11 @@ def paramter_list(self, use_fluid_api):
loss.backward()

def test_paramter_list(self):
self.paramter_list(True)
with _test_eager_guard():
self.paramter_list(False)
self.paramter_list(True)
self.paramter_list(False)
self.paramter_list(True)


if __name__ == '__main__':
Expand Down

0 comments on commit f68ef9d

Please sign in to comment.