Skip to content

Commit

Permalink
Merge branch 'unifyai:main' into fix-paddle-test
Browse files Browse the repository at this point in the history
  • Loading branch information
alt-shreya committed Mar 8, 2024
2 parents 08de109 + 36a36bf commit ac0ad98
Show file tree
Hide file tree
Showing 32 changed files with 547 additions and 470 deletions.
45 changes: 44 additions & 1 deletion .github/workflows/pre-release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,40 @@ on:
permissions:
actions: read
jobs:
display_test_results:
if: ${{ always() }}
runs-on: ubuntu-latest
needs:
- run_tests

steps:
- name: Download all test results
uses: actions/download-artifact@v3

- name: Combined Test Results
run: |
find . -name "test_results_*.txt" -exec cat {} + > combined_test_results.txt
echo "Test results summary:"
cat combined_test_results.txt
run_tests:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
branch: [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
101, 102, 103, 104, 105, 106, 107, 108, 109, 110,
111, 112, 113, 114, 115, 116, 117, 118, 119, 120,
121, 122, 123, 124, 125, 126, 127, 128 ]
steps:
- name: Checkout Ivy 🛎
uses: actions/checkout@v3
Expand All @@ -41,10 +73,21 @@ jobs:
mkdir .ivy
touch .ivy/key.pem
echo -n ${{ secrets.USER_API_KEY }} > .ivy/key.pem
python scripts/setup_tests/setup_priority_tests.py
python scripts/setup_tests/setup_priority_tests.py ${{ matrix.branch }}
cd ..
- name: Run CPU Tests
run: |
cd ivy
python scripts/run_tests/run_tests.py ${{ secrets.REDIS_CONNECTION_URL }} ${{ secrets.REDIS_PASSWORD }} ${{ secrets.MONGODB_PASSWORD }} 'false' 'false' ${{ github.run_id }} 'true' ${{ github.event.inputs.tracer }} ${{ github.event.inputs.tracer_each }} ${{ steps.jobs.outputs.html_url }}
continue-on-error: true

- name: Upload test results
uses: actions/upload-artifact@v3
with:
name: test_results_${{ matrix.branch }}
path: ivy/test_results_${{ matrix.branch }}.txt

- name: Check on failures
if: steps.tests.outcome != 'success'
run: exit 1
3 changes: 1 addition & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -1150,8 +1150,7 @@ class IvyNet(ivy.Module):
self.output_channels = output_channels
self.num_classes = num_classes
self.data_format = data_format
self.device = device
super().__init__()
super().__init__(device=device)

def _build(self, *args, **kwargs):
self.extractor = ivy.Sequential(
Expand Down
2 changes: 1 addition & 1 deletion docs/demos
Submodule demos updated from d3fa2b to 15c235
9 changes: 9 additions & 0 deletions docs/overview/volunteer_ranks.rst
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,9 @@ Top Contributors
* - samunder singh
- `samthakur587 <https://github.com/samthakur587>`_
- Merging Master Gold, Merging Wizard, Ivy Inspector Silver
* - BANAFSHEH HEZARDASTAN
- `NripeshN <https://github.com/NripeshN>`_
- Merging Master Bronze, Ivy Inspector Gold
* - V\. Sai Suraj
- `Sai-Suraj-27 <https://github.com/Sai-Suraj-27>`_
- Merging Master Gold, Ivy Inspector Bronze
Expand Down Expand Up @@ -66,6 +69,12 @@ Contributors
* - Jackson McClintock
- `jacksondm33 <https://github.com/jacksondm33>`_
- Merging Master, Merging Wizard, Ivy Inspector
* - Ayush Lokare
- `ayush111111 <https://github.com/ayush111111>`_
- Merging Master, Ivy Inspector
* - Chaitanya Lakhchaura
- `ZenithFlux <https://github.com/ZenithFlux>`_
- Debugging Dynamo, Merging Master
* - David Adlai Nettey
- `Adlai-1 <https://github.com/Adlai-1>`_
- Merging Master, Ivy Inspector
Expand Down
48 changes: 6 additions & 42 deletions ivy/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -304,59 +304,23 @@ def __rmul__(self, other):
return self

def __bool__(self):
if ivy.current_backend_str() == "tensorflow":
return builtins.bool(builtins.tuple(self._shape))
return builtins.bool(self._shape)

def __div__(self, other):
return self._shape // other

def __floordiv__(self, other):
return self._shape // other

def __mod__(self, other):
return self._shape % other

def __rdiv__(self, other):
return other // self._shape

def __rmod__(self, other):
return other % self._shape

def __reduce__(self):
return (self.__class__, (self._shape,))

def as_dimension(self, other):
if isinstance(other, self._shape):
return other
return to_ivy(other)
else:
return self._shape

def __sub__(self, other):
try:
self._shape = self._shape - other
except TypeError:
self._shape = self._shape - list(other)
return self

def __rsub__(self, other):
try:
self._shape = other - self._shape
except TypeError:
self._shape = list(other) - self._shape
return self

def __eq__(self, other):
self._shape = Shape._shape_casting_helper(self._shape, other)
return self._shape == other

def __int__(self):
if hasattr(self._shape, "__int__"):
res = self._shape.__int__()
else:
res = int(self._shape)
if res is NotImplemented:
return res
return to_ivy(res)

def __ge__(self, other):
self._shape = Shape._shape_casting_helper(self._shape, other)
return self._shape >= other
Expand All @@ -378,7 +342,7 @@ def __getattribute__(self, item):

def __getitem__(self, key):
try:
return self._shape[key]
return to_ivy(self._shape[key])
except (TypeError, IndexError):
return None

Expand Down Expand Up @@ -416,11 +380,11 @@ def index(self, index):
if self._shape.rank is None:
return Shape(None)
else:
return self._shape[index]
return to_ivy(self._shape[index])

def as_dimension(self):
if isinstance(self._shape, Shape):
return self._shape
return to_ivy(self._shape)
else:
return Shape(self._shape)

Expand Down
2 changes: 1 addition & 1 deletion ivy/_version.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = "0.0.7.5"
__version__ = "0.0.8.0"
3 changes: 2 additions & 1 deletion ivy/func_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,11 +137,12 @@ def cross_caster(intersect):
dtype = ""
valid_float = sorted(ivy.valid_float_dtypes)
valid_int = sorted(ivy.valid_int_dtypes)
valid_bool = [ivy.bool]
intersect = sorted(intersect)
if set(valid_int).issubset(intersect):
# make dtype equal to default float
dtype = ivy.default_float_dtype()
elif set(valid_float).issubset(intersect):
elif set(valid_float).issubset(intersect) or set(valid_bool).issubset(intersect):
# make dtype equal to default int
dtype = ivy.default_int_dtype()

Expand Down
24 changes: 12 additions & 12 deletions ivy/functional/backends/jax/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ def _array_unflatten(aux_data, children):

# update these to add new dtypes
valid_dtypes = {
"0.4.24 and below": (
"0.4.25 and below": (
ivy.int8,
ivy.int16,
ivy.int32,
Expand All @@ -121,7 +121,7 @@ def _array_unflatten(aux_data, children):
)
}
valid_numeric_dtypes = {
"0.4.24 and below": (
"0.4.25 and below": (
ivy.int8,
ivy.int16,
ivy.int32,
Expand All @@ -140,7 +140,7 @@ def _array_unflatten(aux_data, children):
}

valid_int_dtypes = {
"0.4.24 and below": (
"0.4.25 and below": (
ivy.int8,
ivy.int16,
ivy.int32,
Expand All @@ -153,12 +153,12 @@ def _array_unflatten(aux_data, children):
}

valid_uint_dtypes = {
"0.4.24 and below": (ivy.uint8, ivy.uint16, ivy.uint32, ivy.uint64)
"0.4.25 and below": (ivy.uint8, ivy.uint16, ivy.uint32, ivy.uint64)
}
valid_float_dtypes = {
"0.4.24 and below": (ivy.bfloat16, ivy.float16, ivy.float32, ivy.float64)
"0.4.25 and below": (ivy.bfloat16, ivy.float16, ivy.float32, ivy.float64)
}
valid_complex_dtypes = {"0.4.24 and below": (ivy.complex64, ivy.complex128)}
valid_complex_dtypes = {"0.4.25 and below": (ivy.complex64, ivy.complex128)}


# leave these untouched
Expand All @@ -173,12 +173,12 @@ def _array_unflatten(aux_data, children):
# invalid data types

# update these to add new dtypes
invalid_dtypes = {"0.4.24 and below": ()}
invalid_numeric_dtypes = {"0.4.24 and below": ()}
invalid_int_dtypes = {"0.4.24 and below": ()}
invalid_float_dtypes = {"0.4.24 and below": ()}
invalid_uint_dtypes = {"0.4.24 and below": ()}
invalid_complex_dtypes = {"0.4.24 and below": ()}
invalid_dtypes = {"0.4.25 and below": ()}
invalid_numeric_dtypes = {"0.4.25 and below": ()}
invalid_int_dtypes = {"0.4.25 and below": ()}
invalid_float_dtypes = {"0.4.25 and below": ()}
invalid_uint_dtypes = {"0.4.25 and below": ()}
invalid_complex_dtypes = {"0.4.25 and below": ()}

# leave these untouched
invalid_dtypes = _dtype_from_version(invalid_dtypes, backend_version)
Expand Down
30 changes: 29 additions & 1 deletion ivy/functional/backends/paddle/experimental/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -469,7 +469,35 @@ def interpolate(
antialias: Optional[bool] = False,
out: Optional[paddle.Tensor] = None,
):
raise IvyNotImplementedException()
if mode not in ["linear", "bilinear", "bicubic", "trilinear"]:
align_corners = None
return paddle.nn.functional.interpolate(
x,
size=size,
scale_factor=scale_factor,
mode=mode,
align_corners=align_corners,
)


interpolate.partial_mixed_handler = (
lambda *args, **kwargs: kwargs.get("mode", "linear")
not in [
"tf_area",
"nd",
"tf_bicubic",
"mitchellcubic",
"lanczos3",
"lanczos5",
"gaussian",
]
and (
kwargs.get("mode", "linear") in ["linear", "bilinear", "bicubic", "trilinear"]
or not kwargs.get("align_corners", False)
)
and not kwargs.get("antialias", False)
and not kwargs.get("recompute_scale_factor", False)
)


def adaptive_max_pool2d(
Expand Down
8 changes: 8 additions & 0 deletions ivy/functional/backends/paddle/experimental/manipulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -598,6 +598,14 @@ def expand(
copy: Optional[bool] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
shape = list(shape)
n_extra_dims = len(shape) - x.ndim
if n_extra_dims > 0:
with ivy.ArrayMode(False):
x = paddle_backend.expand_dims(x, tuple(range(n_extra_dims)))
for i, dim in enumerate(shape):
if dim < 0:
shape[i] = x.shape[i]
return paddle_backend.broadcast_to(x, shape)


Expand Down
9 changes: 9 additions & 0 deletions ivy/functional/backends/tensorflow/creation.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,15 @@ def arange(
stop = float(start)
else:
stop = start

# convert builtin types to tf scalars, as is expected by tf.range
if isinstance(start, (float, int)):
start = tf.constant(start)
if isinstance(stop, (float, int)):
stop = tf.constant(stop)
if isinstance(step, (float, int)):
step = tf.constant(step)

if dtype is None:
if isinstance(start, int) and isinstance(stop, int) and isinstance(step, int):
return tf.cast(tf.range(start, stop, delta=step, dtype=tf.int64), tf.int32)
Expand Down
3 changes: 3 additions & 0 deletions ivy/functional/backends/tensorflow/elementwise.py
Original file line number Diff line number Diff line change
Expand Up @@ -706,6 +706,7 @@ def sinh(
return tf.sinh(x)


@with_unsupported_dtypes({"2.15.0 and below": ("integer",)}, backend_version)
def sqrt(
x: Union[tf.Tensor, tf.Variable],
/,
Expand Down Expand Up @@ -740,6 +741,7 @@ def subtract(
return tf.subtract(x1, x2)


@with_unsupported_dtypes({"2.15.0 and below": ("integer",)}, backend_version)
def tan(
x: Union[tf.Tensor, tf.Variable],
/,
Expand All @@ -749,6 +751,7 @@ def tan(
return tf.tan(x)


@with_unsupported_dtypes({"2.15.0 and below": ("integer",)}, backend_version)
def tanh(
x: Union[tf.Tensor, tf.Variable],
/,
Expand Down
15 changes: 12 additions & 3 deletions ivy/functional/backends/tensorflow/experimental/manipulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -294,9 +294,10 @@ def pad(
**kwargs: Optional[Any],
) -> Union[tf.Tensor, tf.Variable]:
pad_width = _to_tf_padding(pad_width, len(input.shape))
if isinstance(constant_values, (tf.Variable, tf.Tensor)):
if constant_values.dtype != input.dtype:
constant_values = tf.cast(constant_values, input.dtype)
if not isinstance(constant_values, (tf.Variable, tf.Tensor)):
constant_values = tf.constant(constant_values)
if constant_values.dtype != input.dtype:
constant_values = tf.cast(constant_values, input.dtype)
return tf.pad(
input,
pad_width,
Expand Down Expand Up @@ -578,6 +579,14 @@ def unflatten(
name: Optional[str] = None,
) -> tf.Tensor:
dim = abs(len(x.shape) + dim) if dim < 0 else dim

# infer the size of any dimensions that are -1
tf_shape = tf.constant(shape)
inferred_size = tf.reduce_prod(tf.shape(x)[dim]) // tf.reduce_prod(
tf.where(tf_shape != -1, x=shape, y=tf.constant(1))
)
shape = tf.where(tf_shape != -1, x=shape, y=inferred_size)

res_shape = x.shape[:dim] + tf.TensorShape(shape) + x.shape[dim + 1 :]
res = tf.reshape(x, res_shape, name)
return res
4 changes: 3 additions & 1 deletion ivy/functional/backends/tensorflow/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -410,7 +410,9 @@ def conv3d_transpose(
return res


@with_unsupported_dtypes({"2.15.0 and below": ("bfloat16", "complex")}, backend_version)
@with_unsupported_dtypes(
{"2.15.0 and below": ("bfloat16", "complex", "integer")}, backend_version
)
def conv_general_dilated(
x: Union[tf.Tensor, tf.Variable],
filters: Union[tf.Tensor, tf.Variable],
Expand Down
Loading

0 comments on commit ac0ad98

Please sign in to comment.