From 20d06838d1069efe0d93430f926b5d90b9daf230 Mon Sep 17 00:00:00 2001 From: David Gomez-Urquiza Date: Sat, 17 Jun 2023 23:39:34 -0600 Subject: [PATCH] Split torch ops into separate files Fixes #29 Bonus additions: - Add `torch.multinomial` - Add `torch.oneHot` - Add `torch.gradient` --- .../scala/torch/nn/functional/sparse.scala | 28 + core/src/main/scala/torch/ops/BLASOps.scala | 25 + .../main/scala/torch/ops/ComparisonOps.scala | 33 + .../main/scala/torch/ops/CreationOps.scala | 353 +++++ .../torch/ops/IndexingSlicingJoiningOps.scala | 79 ++ .../main/scala/torch/ops/PointwiseOps.scala | 674 ++++++++++ .../scala/torch/ops/RandomSamplingOps.scala | 155 +++ .../main/scala/torch/ops/ReductionOps.scala | 73 + core/src/main/scala/torch/torch.scala | 1180 ----------------- .../test/scala/torch/TensorCheckSuite.scala | 4 +- core/src/test/scala/torch/TensorSuite.scala | 828 +----------- .../torch/nn/functional/SparseSuite.scala | 31 + .../scala/torch/ops/CreationOpsSuite.scala | 48 + .../scala/torch/ops/PointwiseOpsSuite.scala | 826 ++++++++++++ .../torch/ops/RandomSamplingOpsSuite.scala | 41 + .../scala/torch/ops/ReductionOpsSuite.scala | 28 + 16 files changed, 2397 insertions(+), 2009 deletions(-) create mode 100644 core/src/main/scala/torch/nn/functional/sparse.scala create mode 100644 core/src/main/scala/torch/ops/BLASOps.scala create mode 100644 core/src/main/scala/torch/ops/ComparisonOps.scala create mode 100644 core/src/main/scala/torch/ops/CreationOps.scala create mode 100644 core/src/main/scala/torch/ops/IndexingSlicingJoiningOps.scala create mode 100644 core/src/main/scala/torch/ops/PointwiseOps.scala create mode 100644 core/src/main/scala/torch/ops/RandomSamplingOps.scala create mode 100644 core/src/main/scala/torch/ops/ReductionOps.scala create mode 100644 core/src/test/scala/torch/nn/functional/SparseSuite.scala create mode 100644 core/src/test/scala/torch/ops/CreationOpsSuite.scala create mode 100644 core/src/test/scala/torch/ops/PointwiseOpsSuite.scala create mode 100644 core/src/test/scala/torch/ops/RandomSamplingOpsSuite.scala create mode 100644 core/src/test/scala/torch/ops/ReductionOpsSuite.scala diff --git a/core/src/main/scala/torch/nn/functional/sparse.scala b/core/src/main/scala/torch/nn/functional/sparse.scala new file mode 100644 index 00000000..fda7710c --- /dev/null +++ b/core/src/main/scala/torch/nn/functional/sparse.scala @@ -0,0 +1,28 @@ +/* + * Copyright 2022 storch.dev + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package torch +package nn +package functional + +import org.bytedeco.pytorch.global.torch as torchNative + +/** Takes LongTensor with index values of shape `(*)` and returns a tensor of shape `(*, + * numClasses)` that have zeros everywhere except where the index of last dimension matches the + * corresponding value of the input tensor, in which case it will be 1. + */ +def oneHot(input: Tensor[Int64], numClasses: Long = -1): Tensor[Int64] = + Tensor(torchNative.one_hot(input.native, numClasses)) diff --git a/core/src/main/scala/torch/ops/BLASOps.scala b/core/src/main/scala/torch/ops/BLASOps.scala new file mode 100644 index 00000000..2b1e0dc9 --- /dev/null +++ b/core/src/main/scala/torch/ops/BLASOps.scala @@ -0,0 +1,25 @@ +/* + * Copyright 2022 storch.dev + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package torch + +/** BLAS and LAPACK Operations + * + * https://pytorch.org/docs/stable/torch.html#blas-and-lapack-operations + */ + +def matmul[D1 <: DType, D2 <: DType](t1: Tensor[D1], t2: Tensor[D2]): Tensor[Promoted[D1, D2]] = + t1.matmul(t2) diff --git a/core/src/main/scala/torch/ops/ComparisonOps.scala b/core/src/main/scala/torch/ops/ComparisonOps.scala new file mode 100644 index 00000000..169a672e --- /dev/null +++ b/core/src/main/scala/torch/ops/ComparisonOps.scala @@ -0,0 +1,33 @@ +/* + * Copyright 2022 storch.dev + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package torch + +import org.bytedeco.pytorch.global.torch as torchNative + +/** Comparison Ops + * + * https://pytorch.org/docs/stable/torch.html#comparison-ops + */ + +def allclose( + input: Tensor[?], + other: Tensor[?], + rtol: Double = 1e-05, + atol: Double = 1e-08, + equalNan: Boolean = false +) = + torchNative.allclose(input.native, other.native, rtol, atol, equalNan) diff --git a/core/src/main/scala/torch/ops/CreationOps.scala b/core/src/main/scala/torch/ops/CreationOps.scala new file mode 100644 index 00000000..40cb85b5 --- /dev/null +++ b/core/src/main/scala/torch/ops/CreationOps.scala @@ -0,0 +1,353 @@ +/* + * Copyright 2022 storch.dev + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package torch + +import internal.NativeConverters +import NativeConverters.* +import Layout.Strided +import Device.CPU +import MemoryFormat.Contiguous + +import org.bytedeco.pytorch +import org.bytedeco.pytorch.{IValue, GenericDict, BoolOptional, Scalar, MemoryFormatOptional} +import org.bytedeco.pytorch.global.torch as torchNative + +import java.nio.file.{Files, Path} +import scala.collection.immutable.{VectorMap, SeqMap} + +/** Creation Ops + * + * https://pytorch.org/docs/stable/torch.html#creation-ops + */ + +// TODO sparse_coo_tensor +// TODO as_tensor +// TODO as_strided +// TODO frombuffer + +/** Returns a tensor filled with the scalar value `0`, with the shape defined by the variable + * argument `size`. + * @param size + * a sequence of integers defining the shape of the output tensor. + * @tparam T + * @return + */ +// def zeros[D <: DType](size: Int*): Tensor[Float32] = +// zeros[D](size.toSeq) +def zeros[D <: DType]( + size: Seq[Int] | Int, + dtype: D = float32, + layout: Layout = Strided, + device: Device = CPU, + requiresGrad: Boolean = false +): Tensor[D] = + val nativeSize = size match + case s: Seq[Int] => s.map(_.toLong).toArray + case s: Int => Array(s.toLong) + Tensor( + torchNative.torch_zeros( + nativeSize, + NativeConverters.tensorOptions(dtype, layout, device, requiresGrad) + ) + ) + +def zerosLike[D <: DType, D2 <: DType | Derive]( + input: Tensor[D], + dtype: D2 = derive, + layout: Layout | Derive = derive, + device: Device | Derive = derive, + requiresGrad: Boolean = false, + memoryFormat: MemoryFormat = MemoryFormat.Preserve +): Tensor[DTypeOrDeriveFromTensor[D, D2]] = + xLike(input, dtype, layout, device, requiresGrad, memoryFormat, torchNative.torch_zeros_like) + +/** Returns a tensor filled with the scalar value `1`, with the shape defined by the variable + * argument `size`. + * @param size + * a sequence of integers defining the shape of the output tensor. + * @tparam T + * @return + */ +def ones[D <: DType]( + size: Seq[Int] | Int, + dtype: D = float32, + layout: Layout = Strided, + device: Device = CPU, + requiresGrad: Boolean = false +): Tensor[D] = + val nativeSize = size match + case s: Seq[Int] => s.map(_.toLong).toArray + case s: Int => Array(s.toLong) + Tensor( + torchNative.torch_ones( + nativeSize, + NativeConverters.tensorOptions(dtype, layout, device, requiresGrad) + ) + ) + +def onesLike[D <: DType, D2 <: DType | Derive]( + input: Tensor[D], + dtype: D2 = derive, + layout: Layout | Derive = derive, + device: Device | Derive = derive, + requiresGrad: Boolean = false, + memoryFormat: MemoryFormat = MemoryFormat.Preserve +): Tensor[DTypeOrDeriveFromTensor[D, D2]] = + xLike(input, dtype, layout, device, requiresGrad, memoryFormat, torchNative.torch_ones_like) + +// format: off +/** Returns a 1-D tensor of size $`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil`$ with values + * from the interval ``[start, end)`` taken with common difference :attr:`step` beginning from `start`. + * + * Note that non-integer `step` is subject to floating point rounding errors when comparing against `end`; + * to avoid inconsistency, we advise adding a small epsilon to `end` in such cases. + * + * $$ + * \text{out}_{{i+1}} = \text{out}_{i} + \text{step} + * $$ + * + * @param start + * The starting value for the set of points. Default: ``0``. + * @param end + * The ending value for the set of points + * @param step + * The gap between each pair of adjacent points. Default: ``1``. + */ +// format: on +def arange[D <: DType | Derive, Start <: ScalaType, End <: ScalaType, Step <: ScalaType]( + start: Start = 0, + end: End, + step: Step = 1, + dtype: D = derive, + layout: Layout = Strided, + device: Device = CPU, + requiresGrad: Boolean = false +): Tensor[DTypeOrDeriveArange[D, Start, End, Step]] = + val derivedDType = dtype match + case _: Derive => derivedArangeType(start, end, step) + case t: DType => t + Tensor( + torchNative.torch_arange( + toScalar(start), + toScalar(end), + toScalar(step), + NativeConverters.tensorOptions(derivedDType, layout, device, requiresGrad) + ) + ) + +def linspace[D <: DType]( + start: Double, + end: Double, + steps: Long, + dtype: D = float32, + layout: Layout = Strided, + device: Device = CPU, + requiresGrad: Boolean = false +): Tensor[D] = + Tensor( + torchNative.torch_linspace( + new Scalar(start), + new Scalar(end), + steps, + NativeConverters.tensorOptions(dtype, layout, device, requiresGrad) + ) + ) + +def logspace[D <: DType]( + start: Double, + end: Float, + steps: Long, + base: Double = 10.0, + dtype: D = float32, + layout: Layout = Strided, + device: Device = CPU, + requiresGrad: Boolean = false +) = Tensor( + torchNative.torch_logspace( + new Scalar(start), + new Scalar(end), + steps, + base, + NativeConverters.tensorOptions(dtype, layout, device, requiresGrad) + ) +) + +/** Returns a 2-D tensor with ones on the diagonal and zeros elsewhere. + * + * @param n + * the number of rows + * @param m + * the number of columns with default being `n` + * @param dtype + * the desired data type of the returned tensor. + * @param layout + * the desired layout of the returned tensor. + * @param device + * the desired device of the returned tensor. + * @param requiresGrad + * If autograd should record operations on the returned tensor. + */ +def eye[D <: DType]( + n: Int, + m: Option[Int] = None, + dtype: D = float32, + layout: Layout = Strided, + device: Device = CPU, + requiresGrad: Boolean = false +): Tensor[D] = Tensor( + torchNative.torch_eye(n, NativeConverters.tensorOptions(dtype, layout, device, requiresGrad)) +) +// def empty(size: Long*): Tensor[D] = Tensor(torchNative.torch_empty(size*)) + +/** Returns a tensor filled with uninitialized data. */ +def empty[D <: DType]( + size: Seq[Int], + dtype: D = float32, + layout: Layout = Strided, + device: Device = CPU, + requiresGrad: Boolean = false, + pinMemory: Boolean = false, + memoryFormat: MemoryFormat = Contiguous +): Tensor[D] = + Tensor( + torchNative.torch_empty( + size.toArray.map(_.toLong), + NativeConverters + .tensorOptions(dtype, layout, device, requiresGrad) + .pinned_memory(BoolOptional(pinMemory)), + new MemoryFormatOptional(memoryFormat.toNative) + ) + ) + +/** Returns an uninitialized tensor with the same size as input. + * + * `torch.empty_like(input)` is equivalent to `torch.empty(input.size(), dtype=input.dtype, + * layout=input.layout, device=input.device`). + */ +def emptyLike[D <: DType, D2 <: DType | Derive]( + input: Tensor[D], + dtype: D2 = derive, + layout: Layout | Derive = derive, + device: Device | Derive = derive, + requiresGrad: Boolean = false, + memoryFormat: MemoryFormat = MemoryFormat.Preserve +): Tensor[DTypeOrDeriveFromTensor[D, D2]] = + xLike(input, dtype, layout, device, requiresGrad, memoryFormat, torchNative.torch_empty_like) + +// // TODO emptyStrided + +/** Creates a tensor of size `size` filled with `fillValue`. The tensor's dtype is inferred from + * `fillValue`. + * + * @param size + * a sequence of integers defining the shape of the output tensor. + * @param fillValue + * the value to fill the output tensor with. + * @param dtype + * the desired data type of the returned tensor. + * @param layout + * the desired layout of the returned Tensor. + * @param device + * the desired device of the returned tensor. + * @param requiresGrad + * If autograd should record operations on the returned tensor. + * @tparam T + * the data type of the returned tensor, or `Default` if the type should be derived from + * `fillValue`. + * @tparam U + * the data type of `fillValue`. + * @return + * the newly created tensor. + */ +def full[D <: DType | Derive, U <: ScalaType]( + size: Seq[Int], + fillValue: U, + dtype: D = derive, + layout: Layout = Strided, + device: Device = CPU, + requiresGrad: Boolean = false +): Tensor[DTypeOrDeriveFromScalar[D, U]] = + val derivedDType = dtype match + case _: Derive => scalaToDType(fillValue) + case t: DType => t + Tensor( + torchNative.torch_full( + size.toArray.map(_.toLong), + toScalar(fillValue), + NativeConverters.tensorOptions(derivedDType, layout, device, requiresGrad) + ) + ) + +// TODO fullLike +// TODO quantize_per_tensor +// TODO quantize_per_channel +// TODO dequantize +// TODO complex +// TODO polar +// TODO heavside + +def pickleLoad(data: Array[Byte]): SeqMap[String, Tensor[DType]] = + val dict: GenericDict = torchNative.pickle_load(data).toGenericDict() + // We need to extract the members in one go or we risk too early deallocation of native objects here + val buffer = new Array[(IValue, IValue)](dict.size().toInt) + val nativeIt = dict.begin() + for (i <- 0 until buffer.size) + buffer(i) = (nativeIt.access().key(), nativeIt.access().value()) + nativeIt.increment() + VectorMap.from(buffer.map { (key, value) => + // TODO better error handling + (key.toStringRef().getString(), Tensor[DType](value.toTensor().clone())) + }) + +def pickleLoad(path: Path): Map[String, Tensor[DType]] = + val data: Array[Byte] = Files.readAllBytes(path) + pickleLoad(data) + +def pickleSave(tensors: SeqMap[String, Tensor[DType]]) = + tensors.map { (k, v) => + (IValue(k), IValue(v.native)) + } + +private def xLike[D <: DType, D2 <: DType | Derive]( + input: Tensor[D], + dtype: D2, + layout: Layout | Derive, + device: Device | Derive, + requiresGrad: Boolean, + memoryFormat: MemoryFormat, + nativeFn: ( + pytorch.Tensor, + pytorch.TensorOptions, + pytorch.MemoryFormatOptional + ) => pytorch.Tensor +): Tensor[DTypeOrDeriveFromTensor[D, D2]] = + val derivedDType = dtype match + case _: Derive => input.dtype + case d: DType => d + val derivedLayout = layout match + case _: Derive => input.layout + case l: Layout => l + val derivedDevice = device match + case _: Derive => input.device + case d: Device => d + Tensor( + nativeFn( + input.native, + NativeConverters.tensorOptions(derivedDType, derivedLayout, derivedDevice, requiresGrad), + new MemoryFormatOptional(memoryFormat.toNative) + ) + ) diff --git a/core/src/main/scala/torch/ops/IndexingSlicingJoiningOps.scala b/core/src/main/scala/torch/ops/IndexingSlicingJoiningOps.scala new file mode 100644 index 00000000..70bcd8ca --- /dev/null +++ b/core/src/main/scala/torch/ops/IndexingSlicingJoiningOps.scala @@ -0,0 +1,79 @@ +/* + * Copyright 2022 storch.dev + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package torch + +import org.bytedeco.pytorch.{TensorArrayRef, TensorVector} +import org.bytedeco.pytorch.global.torch as torchNative + +/** Indexing, Slicing, Joining, Mutating Ops + * + * https://pytorch.org/docs/stable/torch.html#indexing-slicing-joining-mutating-ops + */ + +def cat[D <: DType](tensors: Seq[Tensor[D]], dim: Int = 0): Tensor[D] = Tensor( + torchNative.cat(new TensorArrayRef(new TensorVector(tensors.map(_.native)*)), dim.toLong) +) + +// TODO dsplit +// TODO column_stack +// TODO dstack +// TODO gather +// TODO hsplit +// TODO hstack +// TODO index_add +// TODO index_copy +// TODO index_reduce +// TODO index_select +// TODO masked_select +// TODO movedim +// TODO moveaxis +// TODO narrow +// TODO narrow_copy +// TODO nonzero +// TODO permute +// TODO reshape +// TODO select +// TODO scatter +// TODO diagonal_scatter +// TODO select_scatter +// TODO slice_scatterd +// TODO scatter_add +// TODO scatter_reduce +// TODO split +// TODO squeeze + +/** Concatenates a sequence of tensors along a new dimension. + * + * All tensors need to be of the same size. + */ +def stack[D <: DType](tensors: Seq[Tensor[D]], dim: Int = 0): Tensor[D] = Tensor( + torchNative.stack(new TensorArrayRef(new TensorVector(tensors.map(_.native)*)), dim) +) + +// TODO swapaxes +// TODO swapdims +// TODO t +// TODO take +// TODO take_along_dim +// TODO tensor_split +// TODO tile +// TODO transpose +// TODO unbind +// TODO unsqueeze +// TODO vsplit +// TODO vstack +// TODO where diff --git a/core/src/main/scala/torch/ops/PointwiseOps.scala b/core/src/main/scala/torch/ops/PointwiseOps.scala new file mode 100644 index 00000000..6a1cc4fd --- /dev/null +++ b/core/src/main/scala/torch/ops/PointwiseOps.scala @@ -0,0 +1,674 @@ +/* + * Copyright 2022 storch.dev + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package torch + +import internal.NativeConverters.* +import org.bytedeco.pytorch.global.torch as torchNative + +/** Pointwise Ops + * + * https://pytorch.org/docs/stable/torch.html#pointwise-ops + */ + +/** Computes the absolute value of each element in `input`. */ +def abs[D <: NumericNN](input: Tensor[D]): Tensor[D] = + Tensor(torchNative.abs(input.native)) + +/** Computes the inverse cosine of each element in `input`. */ +def acos[D <: DType](input: Tensor[D]): Tensor[D] = + Tensor(torchNative.acos(input.native)) + +/** Returns a new tensor with the inverse hyperbolic cosine of the elements of `input` . */ +def acosh[D <: DType](input: Tensor[D]): Tensor[D] = + Tensor(torchNative.acosh(input.native)) + +/** Adds `other` to `input`. */ +def add[D <: DType, D2 <: DType](input: Tensor[D], other: Tensor[D2]): Tensor[Promoted[D, D2]] = + Tensor(torchNative.add(input.native, other.native)) + +/** Adds `other` to `input`. */ +def add[D <: DType, S <: ScalaType]( + input: Tensor[D], + other: S +): Tensor[Promoted[D, ScalaToDType[S]]] = + Tensor(torchNative.add(input.native, toScalar(other))) + +/** Performs the element-wise division of tensor1 by tensor2, multiplies the result by the scalar + * value and adds it to input. + */ +def addcdiv[D <: DType, D2 <: DType, D3 <: DType]( + input: Tensor[D], + tensor1: Tensor[D2], + tensor2: Tensor[D3], + value: ScalaType +): Tensor[Promoted[D, Promoted[D2, D3]]] = + Tensor(torchNative.addcdiv(input.native, tensor1.native, tensor2.native, toScalar(value))) + +/** Performs the element-wise multiplication of tensor1 by tensor2, multiplies the result by the + * scalar value and adds it to input. + */ +def addcmul[D <: DType, D2 <: DType, D3 <: DType]( + input: Tensor[D], + tensor1: Tensor[D2], + tensor2: Tensor[D3], + value: ScalaType +): Tensor[Promoted[D, Promoted[D2, D3]]] = + Tensor(torchNative.addcmul(input.native, tensor1.native, tensor2.native, toScalar(value))) + +/** Computes the element-wise angle (in radians) of the given `input` tensor. */ +def angle[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[ComplexToReal[D]]] = + Tensor(torchNative.angle(input.native)) + +/** Returns a new tensor with the arcsine of the elements of `input`. */ +def asin[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[D]] = + Tensor(torchNative.asin(input.native)) + +/** Returns a new tensor with the inverse hyperbolic sine of the elements of `input`. */ +def asinh[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[D]] = + Tensor(torchNative.asinh(input.native)) + +/** Returns a new tensor with the arctangent of the elements of `input`. */ +def atan[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[D]] = + Tensor(torchNative.atan(input.native)) + +/** Returns a new tensor with the inverse hyperbolic tangent of the elements of `input`. */ +def atanh[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[D]] = + Tensor(torchNative.atanh(input.native)) + +/** Element-wise arctangent of (input / other) with consideration of the quadrant. Returns a new + * tensor with the signed angles in radians between vector (other, input) and vector (1, 0). (Note + * that other, the second parameter, is the x-coordinate, while input, the first parameter, is the + * y-coordinate.) + */ +def atan2[D <: RealNN, D2 <: RealNN]( + input: Tensor[D], + other: Tensor[D2] +): Tensor[FloatPromoted[Promoted[D, D2]]] = + Tensor(torchNative.atan2(input.native, other.native)) + +/** Computes the bitwise NOT of the given `input` tensor. The `input` tensor must be of integral or + * Boolean types. For bool tensors, it computes the logical NOT. + */ +def bitwiseNot[D <: BitwiseNN](input: Tensor[D]): Tensor[D] = + Tensor(torchNative.bitwise_not(input.native)) + +/** Computes the bitwise AND of `input` and `other`. For bool tensors, it computes the logical AND. + */ +def bitwiseAnd[D <: BitwiseNN, D2 <: BitwiseNN]( + input: Tensor[D], + other: Tensor[D2] +): Tensor[Promoted[D, D2]] = + Tensor(torchNative.bitwise_and(input.native, other.native)) + +/** Computes the bitwise OR of `input` and `other`. For bool tensors, it computes the logical OR. + */ +def bitwiseOr[D <: BitwiseNN, D2 <: BitwiseNN]( + input: Tensor[D], + other: Tensor[D2] +): Tensor[Promoted[D, D2]] = + Tensor(torchNative.bitwise_or(input.native, other.native)) + +/** Computes the bitwise XOR of `input` and `other`. For bool tensors, it computes the logical XOR. + */ +def bitwiseXor[D <: BitwiseNN, D2 <: BitwiseNN]( + input: Tensor[D], + other: Tensor[D2] +): Tensor[Promoted[D, D2]] = + Tensor(torchNative.bitwise_xor(input.native, other.native)) + +/** Computes the left arithmetic shift of `input` by `other` bits. */ + +def bitwiseLeftShift[D <: BitwiseNN, D2 <: BitwiseNN]( + input: Tensor[D], + other: Tensor[D2] +)(using OnlyOneBool[D, D2]): Tensor[Promoted[D, D2]] = + Tensor(torchNative.bitwise_left_shift(input.native, other.native)) + +/** Computes the right arithmetic s\hift of `input` by `other` bits. */ +def bitwiseRightShift[D <: BitwiseNN, D2 <: BitwiseNN]( + input: Tensor[D], + other: Tensor[D2] +)(using OnlyOneBool[D, D2]): Tensor[Promoted[D, D2]] = + Tensor(torchNative.bitwise_right_shift(input.native, other.native)) + +/** Returns a new tensor with the ceil of the elements of `input`, the smallest integer greater than + * or equal to each element. + */ +def ceil[D <: NumericRealNN](input: Tensor[D]): Tensor[D] = + Tensor(torchNative.ceil(input.native)) + +/** Clamps all elements in `input` into the range [ min, max ]. Letting min_value and max_value be + * min and max, respectively, this returns: `min(max(input, min_value), max_value)` If min is None, + * there is no lower bound. Or, if max is None there is no upper bound. + */ +// TODO Support Tensor for min and max +def clamp[D <: RealNN]( + input: Tensor[D], + min: Option[Real], + max: Option[Real] +): Tensor[D] = + Tensor(torchNative.clamp(input.native, toOptional(min), toOptional(max))) + +/** Computes the element-wise conjugate of the given `input` tensor. If input has a non-complex + * dtype, this function just returns input. + */ +def conjPhysical[D <: DType](input: Tensor[D]): Tensor[D] = + Tensor(torchNative.conj_physical(input.native)) + +/** Create a new floating-point tensor with the magnitude of input and the sign of other, + * elementwise. + */ +def copysign[D <: RealNN, D2 <: RealNN]( + input: Tensor[D], + other: TensorOrReal[D2] +): Tensor[FloatPromoted[D]] = + Tensor( + other match + case other: Tensor[D2] => + torchNative.copysign(input.native, other.native) + case other: Real => + torchNative.copysign(input.native, toScalar(other)) + ) + +/** Returns a new tensor with the cosine of the elements of `input`. */ +def cos[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[D]] = + Tensor(torchNative.cos(input.native)) + +/** Returns a new tensor with the hyperbolic cosine of the elements of `input`. */ +def cosh[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[D]] = + Tensor(torchNative.cosh(input.native)) + +/** Returns a new tensor with each of the elements of `input` converted from angles in degrees to + * radians. + */ +def deg2rad[D <: RealNN](input: Tensor[D]): Tensor[FloatPromoted[D]] = + Tensor(torchNative.deg2rad(input.native)) + +/** Divides each element of the input `input` by the corresponding element of `other`. */ +// TODO handle roundingMode +def div[D <: DType, D2 <: DType]( + input: Tensor[D], + other: Tensor[D2] +): Tensor[FloatPromoted[Promoted[D, D2]]] = + Tensor(torchNative.div(input.native, other.native)) + +def div[D <: DType, S <: ScalaType]( + input: Tensor[D], + other: S +): Tensor[FloatPromoted[Promoted[D, ScalaToDType[S]]]] = + Tensor(torchNative.div(input.native, toScalar(other))) + +export torch.special.digamma +export torch.special.erf +export torch.special.erfc +export torch.special.erfinv + +/** Returns a new tensor with the exponential of the elements of the `input` tensor `input`. */ +def exp[D <: DType](input: Tensor[D]): Tensor[D] = + Tensor(torchNative.exp(input.native)) + +export torch.special.exp2 +export torch.special.expm1 + +/** Returns a new tensor with the data in `input` fake quantized per channel using `scale`, + * `zero_point`, `quant_min` and `quant_max`, across the channel specified by `axis`. + */ +def fakeQuantizePerChannelAffine( + input: Tensor[Float32], + scale: Tensor[Float32], + zeroPoint: Tensor[Int32 | Float16 | Float32], + axis: Long, + quantMin: Long, + quantMax: Long +): Tensor[Float32] = + Tensor( + torchNative.fake_quantize_per_channel_affine( + input.native, + scale.native, + zeroPoint.native, + axis, + quantMin, + quantMax + ) + ) + +/** Returns a new tensor with the data in `input` fake quantized using `scale`, `zero_point`, + * `quant_min` and `quant_max`. + */ +def fakeQuantizePerTensorAffine( + input: Tensor[Float32], + scale: Tensor[Float32], + zeroPoint: Tensor[Int32], + quantMin: Long, + quantMax: Long +): Tensor[Float32] = + Tensor( + torchNative.fake_quantize_per_tensor_affine( + input.native, + scale.native, + zeroPoint.native, + quantMin, + quantMax + ) + ) + +def fakeQuantizePerTensorAffine( + input: Tensor[Float32], + scale: Double, + zeroPoint: Long, + quantMin: Long, + quantMax: Long +): Tensor[Float32] = + Tensor( + torchNative.fake_quantize_per_tensor_affine(input.native, scale, zeroPoint, quantMin, quantMax) + ) + +/** Returns a new tensor with the truncated integer values of the elements of `input`. Alias for + * torch.trunc + */ +def fix[D <: NumericRealNN](input: Tensor[D]): Tensor[D] = + Tensor(torchNative.fix(input.native)) + +/** Raises `input` to the power of `exponent`, elementwise, in double precision. If neither input is + * complex returns a `torch.float64` tensor, and if one or more inputs is complex returns a + * `torch.complex128` tensor. + */ +def floatPower[D <: DType, D2 <: DType]( + input: Tensor[D], + exponent: Tensor[D2] +): Tensor[ComplexPromoted[D, D2]] = + Tensor(torchNative.float_power(input.native, exponent.native)) + +def floatPower[D <: DType, S <: ScalaType]( + input: S, + exponent: Tensor[D] +): Tensor[ComplexPromoted[ScalaToDType[S], D]] = + Tensor(torchNative.float_power(toScalar(input), exponent.native)) + +def floatPower[D <: DType, S <: ScalaType]( + input: Tensor[D], + exponent: ScalaType +): Tensor[ComplexPromoted[D, ScalaToDType[S]]] = + Tensor(torchNative.float_power(input.native, toScalar(exponent))) + +/** Returns a new tensor with the floor of the elements of `input`, the largest integer less than or + * equal to each element. + */ +def floor[D <: NumericRealNN](input: Tensor[D]): Tensor[D] = + Tensor(torchNative.floor(input.native)) + +/** Computes `input` divided by `other`, elementwise, and floors the result. */ +def floorDivide[D <: RealNN, D2 <: RealNN]( + input: Tensor[D], + other: Tensor[D2] +)(using OnlyOneBool[D, D2]): Tensor[Promoted[D, D2]] = + Tensor(torchNative.floor_divide(input.native, other.native)) + +def floorDivide[D <: RealNN, R <: Real]( + input: Tensor[D], + other: R +)(using OnlyOneBool[D, ScalaToDType[R]]): Tensor[Promoted[D, ScalaToDType[R]]] = + Tensor(torchNative.floor_divide(input.native, toScalar(other))) + +/** Applies C++’s `std::fmod` entrywise. The result has the same sign as the dividend `input` and + * its absolute value is less than that of `other`. + */ +// NOTE: When the divisor is zero, returns NaN for floating point dtypes on both CPU and GPU; raises RuntimeError for integer division by zero on CPU; Integer division by zero on GPU may return any value. +def fmod[D <: RealNN, D2 <: RealNN]( + input: Tensor[D], + other: Tensor[D2] +)(using OnlyOneBool[D, D2]): Tensor[Promoted[D, D2]] = + Tensor(torchNative.fmod(input.native, other.native)) + +def fmod[D <: RealNN, S <: ScalaType]( + input: Tensor[D], + other: S +)(using OnlyOneBool[D, ScalaToDType[S]]): Tensor[Promoted[D, ScalaToDType[S]]] = + Tensor(torchNative.fmod(input.native, toScalar(other))) + +/** Computes the fractional portion of each element in `input`. */ +def frac[D <: FloatNN](input: Tensor[D]): Tensor[D] = + Tensor(torchNative.frac(input.native)) + +/** Decomposes `input` into `mantissa` and `exponent` tensors such that `input = mantissa * (2 ** + * exponent)` The range of mantissa is the open interval (-1, 1). + */ +def frexp[D <: FloatNN](input: Tensor[D]): (Tensor[FloatPromoted[D]], Tensor[Int32]) = + val nativeTuple = torchNative.frexp(input.native) + (Tensor(nativeTuple.get0), new Int32Tensor(nativeTuple.get1)) + +/** Estimates the gradient of a function g:Rn → R in one or more dimensions using the second-order + * accurate central differences method. + */ +def gradient[D <: Int8 | Int16 | Int32 | Int64 | FloatNN | ComplexNN]( + input: Tensor[D], + spacing: Float, + dim: Seq[Int], + edgeOrder: Int = 1 +): Array[Tensor[D]] = + torchNative + .gradient(input.native, toScalar(spacing), dim.toArray.map(_.toLong), edgeOrder) + .get + .map(Tensor.apply[D]) + +/** Returns a new tensor containing imaginary values of the `input` tensor. The returned tensor and + * `input` share the same underlying storage. + */ +def imag[D <: ComplexNN](input: Tensor[D]): Tensor[ComplexToReal[D]] = + Tensor(torchNative.imag(input.native)) + +/** Multiplies `input` by 2 ** `other`. */ +def ldexp[D <: DType](input: Tensor[D], other: Tensor[D]): Tensor[D] = + Tensor(torchNative.ldexp(input.native, other.native)) + +/** Does a linear interpolation of two tensors `start` (given by `input`) and `end` (given by + * `other`) based on a scalar or tensor weight and returns the resulting out tensor. out = start + + * weight × (end − start) + */ +def lerp[D <: DType]( + input: Tensor[D], + other: Tensor[D], + weight: Tensor[D] | Float | Double +): Tensor[D] = + Tensor( + weight match + case weight: Tensor[D] => torchNative.lerp(input.native, other.native, weight.native) + case weight: Float => torchNative.lerp(input.native, other.native, toScalar(weight)) + case weight: Double => torchNative.lerp(input.native, other.native, toScalar(weight)) + ) + +/** Computes the natural logarithm of the absolute value of the gamma function on `input`. */ +def lgamma[D <: RealNN](input: Tensor[D]): Tensor[D] = + Tensor(torchNative.lgamma(input.native)) + +/** Returns a new tensor with the natural logarithm of the elements of `input`. */ +def log[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[D]] = + Tensor(torchNative.log(input.native)) + +/** Returns a new tensor with the logarithm to the base 10 of the elements of `input`. */ +def log10[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[D]] = + Tensor(torchNative.log10(input.native)) + +/** Returns a new tensor with the natural logarithm of (1 + input). */ +def log1p[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[D]] = + Tensor(torchNative.log1p(input.native)) + +/** Returns a new tensor with the logarithm to the base 2 of the elements of `input`. */ +def log2[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[D]] = + Tensor(torchNative.log2(input.native)) + +/** Logarithm of the sum of exponentiations of the inputs. Calculates pointwise log `log(e**x + + * e**y)`. This function is useful in statistics where the calculated probabilities of events may + * be so small as to exceed the range of normal floating point numbers. In such cases the logarithm + * of the calculated probability is stored. This function allows adding probabilities stored in + * such a fashion. This op should be disambiguated with `torch.logsumexp()` which performs a + * reduction on a single tensor. + */ +def logaddexp[D <: RealNN, D2 <: RealNN]( + input: Tensor[D], + other: Tensor[D2] +): Tensor[Promoted[D, D2]] = + Tensor(torchNative.logaddexp(input.native, other.native)) + +/** Logarithm of the sum of exponentiations of the inputs in base-2. Calculates pointwise `log2(2**x + * + 2**y)`. See torch.logaddexp() for more details. + */ +def logaddexp2[D <: RealNN, D2 <: RealNN]( + input: Tensor[D], + other: Tensor[D2] +): Tensor[Promoted[D, D2]] = + Tensor(torchNative.logaddexp2(input.native, other.native)) + +/** Computes the element-wise logical AND of the given `input` tensors. Zeros are treated as False + * and nonzeros are treated as True. + */ +def logicalAnd[D <: DType, D2 <: DType](input: Tensor[D], other: Tensor[D2]): Tensor[Bool] = + Tensor(torchNative.logical_and(input.native, other.native)) + +/** Computes the element-wise logical NOT of the given `input` tensor. If the `input` tensor is not + * a bool tensor, zeros are treated as False and non-zeros are treated as True. + * + * TODO If not specified, the output tensor will have the bool dtype. + */ +def logicalNot[D <: DType](input: Tensor[D]): Tensor[Bool] = + Tensor(torchNative.logical_not(input.native)) + +/** Computes the element-wise logical OR of the given `input` tensors. Zeros are treated as False + * and nonzeros are treated as True. + */ +def logicalOr[D <: DType, D2 <: DType](input: Tensor[D], other: Tensor[D2]): Tensor[Bool] = + Tensor(torchNative.logical_or(input.native, other.native)) + +/** Computes the element-wise logical XOR of the given `input` tensors. Zeros are treated as False + * and nonzeros are treated as True. + */ +def logicalXor[D <: DType, D2 <: DType](input: Tensor[D], other: Tensor[D2]): Tensor[Bool] = + Tensor(torchNative.logical_xor(input.native, other.native)) + +export torch.special.logit + +/** Given the legs of a right triangle, return its hypotenuse. */ +// TODO Change `D2 <: RealNN` once we fix property testing compilation +def hypot[D <: RealNN, D2 <: FloatNN]( + input: Tensor[D], + other: Tensor[D2] +)(using AtLeastOneFloat[D, D2]): Tensor[FloatPromoted[Promoted[D, D2]]] = + Tensor(torchNative.hypot(input.native, other.native)) + +export torch.special.i0 +export torch.special.igamma +export torch.special.igammac + +/** Multiplies input by other. */ +def mul[D <: DType, D2 <: DType](input: Tensor[D], other: Tensor[D2]): Tensor[Promoted[D, D2]] = + Tensor(torchNative.mul(input.native, other.native)) + +export torch.special.mvlgamma + +/** Replaces NaN, positive infinity, and negative infinity values in `input` with the values + * specified by nan, posinf, and neginf, respectively. By default, NaNs are replaced with zero, + * positive infinity is replaced with the greatest finite value representable by input’s dtype, and + * negative infinity is replaced with the least finite value representable by input’s dtype. + */ +def nanToNum[D <: RealNN]( + input: Tensor[D], + nan: Option[Double] = None, + posinf: Option[Double] = None, + neginf: Option[Double] = None +): Tensor[D] = + Tensor( + torchNative.nan_to_num(input.native, toOptional(nan), toOptional(posinf), toOptional(neginf)) + ) + +/** Returns a new tensor with the negative of the elements of `input`. */ +def neg[D <: NumericNN](input: Tensor[D]): Tensor[D] = + Tensor(torchNative.neg(input.native)) + +/** Return the next floating-point value after `input` towards `other`, elementwise. */ +// TODO Change `D2 <: RealNN` once we fix property testing compilation +def nextafter[D <: RealNN, D2 <: FloatNN]( + input: Tensor[D], + other: Tensor[D2] +)(using AtLeastOneFloat[D, D2]): Tensor[FloatPromoted[Promoted[D, D2]]] = + Tensor(torchNative.nextafter(input.native, other.native)) + +export torch.special.polygamma + +/** Returns input. Normally throws a runtime error if input is a bool tensor in pytorch. */ +def positive[D <: NumericNN](input: Tensor[D]): Tensor[D] = + Tensor(torchNative.positive(input.native)) + +/** Takes the power of each element in `input` with exponent and returns a tensor with the result. + * `exponent` can be either a single float number or a Tensor with the same number of elements as + * input. + */ +def pow[D <: DType, D2 <: DType]( + input: Tensor[D], + exponent: Tensor[D2] +)(using OnlyOneBool[D, D2]): Tensor[Promoted[D, D2]] = + Tensor(torchNative.pow(input.native, exponent.native)) + +def pow[D <: DType, S <: ScalaType]( + input: Tensor[D], + exponent: S +)(using OnlyOneBool[D, ScalaToDType[S]]): Tensor[Promoted[D, ScalaToDType[S]]] = + Tensor(torchNative.pow(input.native, toScalar(exponent))) + +def pow[S <: ScalaType, D <: DType]( + input: S, + exponent: Tensor[D] +)(using OnlyOneBool[ScalaToDType[S], D]): Tensor[Promoted[ScalaToDType[S], D]] = + Tensor(torchNative.pow(toScalar(input), exponent.native)) + +// TODO Implement creation of QInts +// TODO quantized_batch_norm +// TODO quantized_max_pool1d +// TODO quantized_max_pool2d + +/** Returns a new tensor with each of the elements of `input` converted from angles in radians to + * degrees. + */ +def rad2Deg[D <: RealNN | Bool](input: Tensor[D]): Tensor[FloatPromoted[D]] = + Tensor(torchNative.rad2deg(input.native)) + +/** Returns a new tensor containing real values of the self tensor. The returned tensor and self + * share the same underlying storage. + */ +def real[D <: DType](input: Tensor[D]): Tensor[ComplexToReal[D]] = + Tensor(torchNative.real(input.native)) + +/** Returns a new tensor with the reciprocal of the elements of `input` */ +def reciprocal[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[D]] = + Tensor(torchNative.reciprocal(input.native)) + +/** Computes Python’s modulus operation entrywise. The result has the same sign as the divisor + * `other` and its absolute value is less than that of `other`. + */ +def remainder[D <: RealNN, D2 <: RealNN]( + input: Tensor[D], + other: Tensor[D2] +): Tensor[Promoted[D, D2]] = + Tensor(torchNative.remainder(input.native, other.native)) + +def remainder[D <: DType, R <: Real]( + input: Tensor[D], + other: R +): Tensor[Promoted[D, ScalaToDType[R]]] = + Tensor(torchNative.remainder(input.native, toScalar(other))) + +def remainder[D <: DType, R <: Real]( + input: R, + other: Tensor[D] +): Tensor[Promoted[ScalaToDType[R], D]] = + Tensor(torchNative.remainder(toScalar(input), other.native)) + +/** Rounds elements of `input` to the nearest integer. If decimals is negative, it specifies the + * number of positions to the left of the decimal point. + */ +def round[D <: FloatNN](input: Tensor[D], decimals: Long = 0): Tensor[D] = + Tensor(torchNative.round(input.native, decimals)) + +/** Returns a new tensor with the reciprocal of the square-root of each of the elements of `input`. + */ +def rsqrt[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[D]] = + Tensor(torchNative.rsqrt(input.native)) + +export torch.special.sigmoid + +/** Returns a new tensor with the signs of the elements of `input`. */ +def sign[D <: RealNN](input: Tensor[D]): Tensor[D] = + Tensor(torchNative.sign(input.native)) + +/** This function is an extension of `torch.sign()` to complex tensors. It computes a new tensor + * whose elements have the same angles as the corresponding elements of `input` and absolute values + * (i.e. magnitudes) of one for complex tensors and is equivalent to torch.sign() for non-complex + * tensors. + */ +def sgn[D <: DType](input: Tensor[D]): Tensor[D] = + Tensor(torchNative.sgn(input.native)) + +/** Tests if each element of `input`` has its sign bit set or not. */ +def signbit[D <: RealNN](input: Tensor[D]): Tensor[Bool] = + Tensor(torchNative.signbit(input.native)) + +/** Returns a new tensor with the sine of the elements of `input`. */ +def sin[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[D]] = + Tensor(torchNative.sin(input.native)) + +export torch.special.sinc + +/** Returns a new tensor with the hyperbolic sine of the elements of `input`. */ +def sinh[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[D]] = + Tensor(torchNative.sinh(input.native)) + +export torch.nn.functional.softmax + +/** Returns a new tensor with the square-root of the elements of `input`. */ +def sqrt[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[D]] = + Tensor(torchNative.sqrt(input.native)) + +/** Returns a new tensor with the square of the elements of `input`. */ +def square[D <: DType](input: Tensor[D]): Tensor[NumericPromoted[D]] = + Tensor(torchNative.square(input.native)) + +/** Subtracts `other`, scaled by `alpha`, from `input`. */ +def sub[D <: NumericNN, D2 <: NumericNN]( + input: Tensor[D], + other: Tensor[D2] +): Tensor[Promoted[D, D2]] = + Tensor(torchNative.sub(input.native, other.native)) + +def sub[D <: NumericNN, D2 <: NumericNN]( + input: Tensor[D], + other: Tensor[D2], + alpha: ScalaType +): Tensor[Promoted[D, D2]] = + Tensor(torchNative.sub(input.native, other.native, toScalar(alpha))) + +def sub[D <: NumericNN, D2 <: NumericNN]( + input: Tensor[D], + other: Numeric, + alpha: ScalaType +): Tensor[Promoted[D, D2]] = + Tensor(torchNative.sub(input.native, toScalar(other), toScalar(alpha))) + +/** Returns a new tensor with the tangent of the elements of `input`. */ +def tan[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[D]] = + Tensor(torchNative.tan(input.native)) + +/** Returns a new tensor with the hyperbolic tangent of the elements of `input`. */ +def tanh[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[D]] = + Tensor(torchNative.tanh(input.native)) + +/** Alias for `torch.div()` with `rounding_mode=None` */ +def trueDivide[D <: DType, D2 <: DType]( + input: Tensor[D], + other: Tensor[D2] +): Tensor[FloatPromoted[Promoted[D, D2]]] = + Tensor(torchNative.true_divide(input.native, other.native)) + +def trueDivide[D <: DType, S <: ScalaType]( + input: Tensor[D], + other: S +): Tensor[FloatPromoted[Promoted[D, ScalaToDType[S]]]] = + Tensor(torchNative.true_divide(input.native, toScalar(other))) + +/** Returns a new tensor with the truncated integer values of the elements of `input`. */ +def trunc[D <: NumericRealNN](input: Tensor[D]): Tensor[D] = + Tensor(torchNative.trunc(input.native)) + +export torch.special.xlogy diff --git a/core/src/main/scala/torch/ops/RandomSamplingOps.scala b/core/src/main/scala/torch/ops/RandomSamplingOps.scala new file mode 100644 index 00000000..b39f22da --- /dev/null +++ b/core/src/main/scala/torch/ops/RandomSamplingOps.scala @@ -0,0 +1,155 @@ +/* + * Copyright 2022 storch.dev + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package torch + +import Layout.Strided +import Device.CPU +import internal.NativeConverters +import NativeConverters.* + +import org.bytedeco.pytorch.global.torch as torchNative + +/** Random Sampling + * + * https://pytorch.org/docs/stable/torch.html#random-sampling + */ + +// TODO seed Sets the seed for generating random numbers to a non-deterministic random number. +// TODO manual_seed Sets the seed for generating random numbers. +// TODO initial_seed Returns the initial seed for generating random numbers as a Python long. +// TODO get_rng_state Returns the random number generator state as a torch.ByteTensor. +// TODO set_rng_state Sets the random number generator state. +// TODO bernoulli Draws binary random numbers (0 or 1) from a Bernoulli distribution. + +/* Returns a tensor where each row contains `numSamples` indices sampled from the multinomial probability distribution located in the corresponding row of tensor `input`. */ +// TODO Demote Float to Int +def multinomial[D <: FloatNN]( + input: Tensor[D], + numSamples: Long, + replacement: Boolean = false, + generator: Option[?] = None +): Tensor[D] = + val generator = new org.bytedeco.pytorch.GeneratorOptional() + Tensor(torchNative.multinomial(input.native, numSamples, replacement, generator)) + +// TODO normal Returns a tensor of random numbers drawn from separate normal distributions whose mean and standard deviation are given. +// TODO poisson Returns a tensor of the same size as input with each element sampled from a Poisson distribution with rate parameter given by the corresponding element in input i.e., + +/** Returns a tensor filled with random numbers from a uniform distribution on the interval `[0,1)` + * + * The shape of the tensor is defined by the variable argument `size`. + * + * @param size + * a sequence of integers defining the shape of the output tensor. + * @param dtype + * the desired data type of returned tensor. + * @param layout + * the desired layout of returned Tensor. + * @param device + * the desired device of returned tensor. + * @param requiresGrad + * If autograd should record operations on the returned tensor. + * @tparam T + * the dtype of the created tensor. + */ +def rand[D <: FloatNN | ComplexNN]( + size: Seq[Int], + dtype: D = float32, + layout: Layout = Strided, + device: Device = CPU, + requiresGrad: Boolean = false +): Tensor[D] = + Tensor( + torchNative.torch_rand( + size.toArray.map(_.toLong), + NativeConverters.tensorOptions(dtype, layout, device, requiresGrad) + ) + ) + +/** Returns a tensor with the same size as `input` that is filled with random numbers from a uniform + * distribution on the interval $[0, 1)$. + * + * `torch.randLike(input)` is equivalent to `torch.rand(input.size(), dtype=input.dtype, + * layout=input.layout, device=input.device)`. + * + * @param input + * the size of `input` will determine size of the output tensor. + * @param dtype + * the desired data type of returned Tensor. If `derive`, defaults to the dtype of `input`. + * @param layout + * the desired layout of returned tensor. If `derive`, defaults to the layout of `input`. + * @param device + * the desired device of returned tensor. If `derive` , defaults to the device of `input`. + * @param requiresGrad + * If autograd should record operations on the returned tensor. + * @param memoryFormat + * the desired memory format of returned Tensor. + */ +def randLike[D <: DType, D2 <: DType | Derive]( + input: Tensor[D], + dtype: D2 = derive, + layout: Layout | Derive = derive, + device: Device | Derive = derive, + requiresGrad: Boolean = false, + memoryFormat: MemoryFormat = MemoryFormat.Preserve +): Tensor[DTypeOrDeriveFromTensor[D, D2]] = + xLike(input, dtype, layout, device, requiresGrad, memoryFormat, torchNative.torch_rand_like) + +// Returns a tensor filled with random integers generated uniformly between low (inclusive) and high (exclusive). +def randint(low: Long, high: Int, size: Seq[Int]) = + val generator = new org.bytedeco.pytorch.GeneratorOptional() + Tensor( + torchNative.torch_randint(low, high, size.toArray.map(_.toLong), generator) + ) + +// TODO randint_like Returns a tensor with the same shape as Tensor input filled with random integers generated uniformly between low (inclusive) and high (exclusive). + +// TODO Randnd acepts Seq[Int] | Int +def randn[D <: FloatNN]( + size: Seq[Int], + dtype: D = float32, + layout: Layout = Strided, + device: Device = CPU, + requiresGrad: Boolean = false +): Tensor[D] = + Tensor( + torchNative.torch_randn( + size.toArray.map(_.toLong), + NativeConverters.tensorOptions(dtype, layout, device, requiresGrad) + ) + ) + +// TODO randn_like Returns a tensor with the same size as input that is filled with random numbers from a normal distribution with mean 0 and variance 1. + +/** Returns a random permutation of integers from 0 to n - 1. + * + * TODO support custom generator + */ +def randperm[D <: DType]( + n: Long, + dtype: D = int64, + layout: Layout = Strided, + device: Device = CPU, + requiresGrad: Boolean = false, + pinMemory: Boolean = false +): Tensor[D] = + Tensor( + torchNative.torch_randperm( + n, + NativeConverters.tensorOptions(dtype, layout, device, requiresGrad, pinMemory) + ) + ) diff --git a/core/src/main/scala/torch/ops/ReductionOps.scala b/core/src/main/scala/torch/ops/ReductionOps.scala new file mode 100644 index 00000000..5f9fac14 --- /dev/null +++ b/core/src/main/scala/torch/ops/ReductionOps.scala @@ -0,0 +1,73 @@ +/* + * Copyright 2022 storch.dev + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package torch + +import internal.NativeConverters.* + +import org.bytedeco.pytorch.global.torch as torchNative + +/** Reduction Ops + * + * https://pytorch.org/docs/stable/torch.html#reduction-ops + */ + +// TODO argmax Returns the indices of the maximum value of all elements in the `input` tensor. +// TODO argmin Returns the indices of the minimum value(s) of the flattened tensor or along a dimension +// TODO amax Returns the maximum value of each slice of the `input` tensor in the given dimension(s) dim. +// TODO amin Returns the minimum value of each slice of the `input` tensor in the given dimension(s) dim. +// TODO aminmax Computes the minimum and maximum values of the `input` tensor. +// TODO all Tests if all elements in `input` evaluate to True. +// TODO any Tests if any element in `input` evaluates to True. +// TODO max Returns the maximum value of all elements in the `input` tensor. +// TODO min Returns the minimum value of all elements in the `input` tensor. +// TODO dist Returns the p-norm of (input - other) +// TODO logsumexp Returns the log of summed exponentials of each row of the `input` tensor in the given dimension dim. +// TODO mean Returns the mean value of all elements in the `input` tensor. +// TODO nanmean Computes the mean of all non-NaN elements along the specified dimensions. +// TODO median Returns the median of the values in input. +// TODO nanmedian Returns the median of the values in input, ignoring NaN values. +// TODO mode Returns a namedtuple (values, indices) where values is the mode value of each row of the `input` tensor in the given dimension dim, i.e. a value which appears most often in that row, and indices is the index location of each mode value found. +// TODO norm Returns the matrix norm or vector norm of a given tensor. +// TODO nansum Returns the sum of all elements, treating Not a Numbers (NaNs) as zero. +// TODO prod Returns the product of all elements in the `input` tensor. +// TODO quantile Computes the q-th quantiles of each row of the `input` tensor along the dimension dim. +// TODO nanquantile This is a variant of torch.quantile() that "ignores" NaN values, computing the quantiles q as if NaN values in `input` did not exist. +// TODO std Calculates the standard deviation over the dimensions specified by dim. +// TODO std_mean Calculates the standard deviation and mean over the dimensions specified by dim. + +/* Returns the sum of all elements in the `input` tensor. */ +def sum[D <: DType]( + input: Tensor[D], + dim: Array[Long] = Array(), + keepdim: Boolean = false, + dtype: Option[DType] = None +): Tensor[D] = + val lar = new org.bytedeco.pytorch.LongArrayRef(dim, dim.size) + val laro = new org.bytedeco.pytorch.LongArrayRefOptional(lar) + // TODO Add dtype + val sto = new org.bytedeco.pytorch.ScalarTypeOptional() + Tensor(torchNative.sum(input.native, dim, keepdim, sto)) + +// TODO unique Returns the unique elements of the `input` tensor. +// TODO unique_consecutive Eliminates all but the first element from every consecutive group of equivalent elements. + +/* TODO Calculates the variance over the dimensions specified by dim. */ +//def variance[D <: DType](input: Tensor[D], dim: Seq[Int] = Nil, correction: Option[Int] = None, keepdim: Boolean = false) = +// Tensor(torchNative.`var`(input.native, dim.toArray.map(_.toLong), toOptional(correction), keepdim)) + +// TODO var_mean Calculates the variance and mean over the dimensions specified by dim. +// TODO count_nonzero Counts the number of non-zero values in the tensor `input` along the given dim. diff --git a/core/src/main/scala/torch/torch.scala b/core/src/main/scala/torch/torch.scala index a25b5faa..93292fc1 100644 --- a/core/src/main/scala/torch/torch.scala +++ b/core/src/main/scala/torch/torch.scala @@ -16,1189 +16,9 @@ package torch -import org.bytedeco.javacpp.* -import org.bytedeco.pytorch import org.bytedeco.pytorch.global.torch as torchNative -import org.bytedeco.pytorch.global.torch.{ScalarType, toComplexType} -import org.bytedeco.pytorch.{ - BoolOptional, - DeviceOptional, - LayoutOptional, - LinearImpl, - LogSoftmaxFuncOptions, - LongOptional, - MemoryFormatOptional, - Module, - Scalar, - ScalarTypeOptional, - TensorArrayRef, - TensorVector -} - -import java.nio.{ - ByteBuffer, - CharBuffer, - DoubleBuffer, - FloatBuffer, - IntBuffer, - LongBuffer, - ShortBuffer -} -import scala.annotation.{targetName, varargs} -import scala.reflect.ClassTag -import internal.NativeConverters.* -import Layout.Strided -import Device.CPU -import torch.internal.NativeConverters -import MemoryFormat.Contiguous - -import java.nio.file.Path -import java.nio.file.Files -import org.bytedeco.pytorch.GenericDict -import org.bytedeco.pytorch.IValue - -import scala.collection.immutable.VectorMap -import scala.collection.immutable.SeqMap import scala.util.Using -// Creation Ops - -// // TODO sparse_coo_tensor -// // TODO as_tensor -// // TODO as_strided -// // TODO frombuffer - -/** Returns a tensor filled with the scalar value `0`, with the shape defined by the variable - * argument `size`. - * @param size - * a sequence of integers defining the shape of the output tensor. - * @tparam T - * @return - */ -// def zeros[D <: DType](size: Int*): Tensor[Float32] = -// zeros[D](size.toSeq) -def zeros[D <: DType]( - size: Seq[Int] | Int, - dtype: D = float32, - layout: Layout = Strided, - device: Device = CPU, - requiresGrad: Boolean = false -): Tensor[D] = - val nativeSize = size match - case s: Seq[Int] => s.map(_.toLong).toArray - case s: Int => Array(s.toLong) - Tensor( - torchNative.torch_zeros( - nativeSize, - NativeConverters.tensorOptions(dtype, layout, device, requiresGrad) - ) - ) - -def zerosLike[D <: DType, D2 <: DType | Derive]( - input: Tensor[D], - dtype: D2 = derive, - layout: Layout | Derive = derive, - device: Device | Derive = derive, - requiresGrad: Boolean = false, - memoryFormat: MemoryFormat = MemoryFormat.Preserve -): Tensor[DTypeOrDeriveFromTensor[D, D2]] = - xLike(input, dtype, layout, device, requiresGrad, memoryFormat, torchNative.torch_zeros_like) - -/** Returns a tensor filled with the scalar value `1`, with the shape defined by the variable - * argument `size`. - * @param size - * a sequence of integers defining the shape of the output tensor. - * @tparam T - * @return - */ -def ones[D <: DType]( - size: Seq[Int] | Int, - dtype: D = float32, - layout: Layout = Strided, - device: Device = CPU, - requiresGrad: Boolean = false -): Tensor[D] = - val nativeSize = size match - case s: Seq[Int] => s.map(_.toLong).toArray - case s: Int => Array(s.toLong) - Tensor( - torchNative.torch_ones( - nativeSize, - NativeConverters.tensorOptions(dtype, layout, device, requiresGrad) - ) - ) - -def onesLike[D <: DType, D2 <: DType | Derive]( - input: Tensor[D], - dtype: D2 = derive, - layout: Layout | Derive = derive, - device: Device | Derive = derive, - requiresGrad: Boolean = false, - memoryFormat: MemoryFormat = MemoryFormat.Preserve -): Tensor[DTypeOrDeriveFromTensor[D, D2]] = - xLike(input, dtype, layout, device, requiresGrad, memoryFormat, torchNative.torch_ones_like) - -// format: off -/** Returns a 1-D tensor of size $`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil`$ with values - * from the interval ``[start, end)`` taken with common difference :attr:`step` beginning from `start`. - * - * Note that non-integer `step` is subject to floating point rounding errors when comparing against `end`; - * to avoid inconsistency, we advise adding a small epsilon to `end` in such cases. - * - * $$ - * \text{out}_{{i+1}} = \text{out}_{i} + \text{step} - * $$ - * - * @param start - * The starting value for the set of points. Default: ``0``. - * @param end - * The ending value for the set of points - * @param step - * The gap between each pair of adjacent points. Default: ``1``. - */ -// format: on -def arange[D <: DType | Derive, Start <: ScalaType, End <: ScalaType, Step <: ScalaType]( - start: Start = 0, - end: End, - step: Step = 1, - dtype: D = derive, - layout: Layout = Strided, - device: Device = CPU, - requiresGrad: Boolean = false -): Tensor[DTypeOrDeriveArange[D, Start, End, Step]] = - val derivedDType = dtype match - case _: Derive => derivedArangeType(start, end, step) - case t: DType => t - Tensor( - torchNative.torch_arange( - toScalar(start), - toScalar(end), - toScalar(step), - NativeConverters.tensorOptions(derivedDType, layout, device, requiresGrad) - ) - ) - -def linspace[D <: DType]( - start: Double, - end: Double, - steps: Long, - dtype: D = float32, - layout: Layout = Strided, - device: Device = CPU, - requiresGrad: Boolean = false -): Tensor[D] = - Tensor( - torchNative.torch_linspace( - new Scalar(start), - new Scalar(end), - steps, - NativeConverters.tensorOptions(dtype, layout, device, requiresGrad) - ) - ) - -def logspace[D <: DType]( - start: Double, - end: Float, - steps: Long, - base: Double = 10.0, - dtype: D = float32, - layout: Layout = Strided, - device: Device = CPU, - requiresGrad: Boolean = false -) = Tensor( - torchNative.torch_logspace( - new Scalar(start), - new Scalar(end), - steps, - base, - NativeConverters.tensorOptions(dtype, layout, device, requiresGrad) - ) -) - -/** Returns a 2-D tensor with ones on the diagonal and zeros elsewhere. - * - * @param n - * the number of rows - * @param m - * the number of columns with default being `n` - * @param dtype - * the desired data type of the returned tensor. - * @param layout - * the desired layout of the returned tensor. - * @param device - * the desired device of the returned tensor. - * @param requiresGrad - * If autograd should record operations on the returned tensor. - */ -def eye[D <: DType]( - n: Int, - m: Option[Int] = None, - dtype: D = float32, - layout: Layout = Strided, - device: Device = CPU, - requiresGrad: Boolean = false -): Tensor[D] = Tensor( - torchNative.torch_eye(n, NativeConverters.tensorOptions(dtype, layout, device, requiresGrad)) -) -// def empty(size: Long*): Tensor[D] = Tensor(torchNative.torch_empty(size*)) - -/** Returns a tensor filled with uninitialized data. */ -def empty[D <: DType]( - size: Seq[Int], - dtype: D = float32, - layout: Layout = Strided, - device: Device = CPU, - requiresGrad: Boolean = false, - pinMemory: Boolean = false, - memoryFormat: MemoryFormat = Contiguous -): Tensor[D] = - Tensor( - torchNative.torch_empty( - size.toArray.map(_.toLong), - NativeConverters - .tensorOptions(dtype, layout, device, requiresGrad) - .pinned_memory(BoolOptional(pinMemory)), - new MemoryFormatOptional(memoryFormat.toNative) - ) - ) - -/** Returns an uninitialized tensor with the same size as input. - * - * `torch.empty_like(input)` is equivalent to `torch.empty(input.size(), dtype=input.dtype, - * layout=input.layout, device=input.device`). - */ -def emptyLike[D <: DType, D2 <: DType | Derive]( - input: Tensor[D], - dtype: D2 = derive, - layout: Layout | Derive = derive, - device: Device | Derive = derive, - requiresGrad: Boolean = false, - memoryFormat: MemoryFormat = MemoryFormat.Preserve -): Tensor[DTypeOrDeriveFromTensor[D, D2]] = - xLike(input, dtype, layout, device, requiresGrad, memoryFormat, torchNative.torch_empty_like) - -// // TODO emptyStrided - -/** Creates a tensor of size `size` filled with `fillValue`. The tensor's dtype is inferred from - * `fillValue`. - * - * @param size - * a sequence of integers defining the shape of the output tensor. - * @param fillValue - * the value to fill the output tensor with. - * @param dtype - * the desired data type of the returned tensor. - * @param layout - * the desired layout of the returned Tensor. - * @param device - * the desired device of the returned tensor. - * @param requiresGrad - * If autograd should record operations on the returned tensor. - * @tparam T - * the data type of the returned tensor, or `Default` if the type should be derived from - * `fillValue`. - * @tparam U - * the data type of `fillValue`. - * @return - * the newly created tensor. - */ -def full[D <: DType | Derive, U <: ScalaType]( - size: Seq[Int], - fillValue: U, - dtype: D = derive, - layout: Layout = Strided, - device: Device = CPU, - requiresGrad: Boolean = false -): Tensor[DTypeOrDeriveFromScalar[D, U]] = - val derivedDType = dtype match - case _: Derive => scalaToDType(fillValue) - case t: DType => t - Tensor( - torchNative.torch_full( - size.toArray.map(_.toLong), - toScalar(fillValue), - NativeConverters.tensorOptions(derivedDType, layout, device, requiresGrad) - ) - ) -// TODO fullLike -// TODO quantize_per_tensor -// TODO quantize_per_channel -// TODO dequantize -// TODO complex -// TODO polar -// TODO heavside - -def pickleLoad(data: Array[Byte]): SeqMap[String, Tensor[DType]] = - val dict: GenericDict = torchNative.pickle_load(data).toGenericDict() - // We need to extract the members in one go or we risk too early deallocation of native objects here - val buffer = new Array[(IValue, IValue)](dict.size().toInt) - val nativeIt = dict.begin() - for (i <- 0 until buffer.size) - buffer(i) = (nativeIt.access().key(), nativeIt.access().value()) - nativeIt.increment() - VectorMap.from(buffer.map { (key, value) => - // TODO better error handling - (key.toStringRef().getString(), Tensor[DType](value.toTensor().clone())) - }) - -def pickleLoad(path: Path): Map[String, Tensor[DType]] = - val data: Array[Byte] = Files.readAllBytes(path) - pickleLoad(data) - -def pickle_save(tensors: SeqMap[String, Tensor[DType]]) = - tensors.map { (k, v) => - (IValue(k), IValue(v.native)) - } - -/** Returns a tensor filled with random numbers from a uniform distribution on the interval `[0,1)` - * - * The shape of the tensor is defined by the variable argument `size`. - * - * @param size - * a sequence of integers defining the shape of the output tensor. - * @param dtype - * the desired data type of returned tensor. - * @param layout - * the desired layout of returned Tensor. - * @param device - * the desired device of returned tensor. - * @param requiresGrad - * If autograd should record operations on the returned tensor. - * @tparam T - * the dtype of the created tensor. - */ -def rand[D <: FloatNN | ComplexNN]( - size: Seq[Int], - dtype: D = float32, - layout: Layout = Strided, - device: Device = CPU, - requiresGrad: Boolean = false -): Tensor[D] = - Tensor( - torchNative.torch_rand( - size.toArray.map(_.toLong), - NativeConverters.tensorOptions(dtype, layout, device, requiresGrad) - ) - ) - -/** Returns a tensor with the same size as `input` that is filled with random numbers from a uniform - * distribution on the interval $[0, 1)$. - * - * `torch.randLike(input)` is equivalent to `torch.rand(input.size(), dtype=input.dtype, - * layout=input.layout, device=input.device)`. - * - * @param input - * the size of `input` will determine size of the output tensor. - * @param dtype - * the desired data type of returned Tensor. If `derive`, defaults to the dtype of `input`. - * @param layout - * the desired layout of returned tensor. If `derive`, defaults to the layout of `input`. - * @param device - * the desired device of returned tensor. If `derive` , defaults to the device of `input`. - * @param requiresGrad - * If autograd should record operations on the returned tensor. - * @param memoryFormat - * the desired memory format of returned Tensor. - */ -def randLike[D <: DType, D2 <: DType | Derive]( - input: Tensor[D], - dtype: D2 = derive, - layout: Layout | Derive = derive, - device: Device | Derive = derive, - requiresGrad: Boolean = false, - memoryFormat: MemoryFormat = MemoryFormat.Preserve -): Tensor[DTypeOrDeriveFromTensor[D, D2]] = - xLike(input, dtype, layout, device, requiresGrad, memoryFormat, torchNative.torch_rand_like) - -def randn[D <: FloatNN]( - size: Seq[Int], - dtype: D = float32, - layout: Layout = Strided, - device: Device = CPU, - requiresGrad: Boolean = false -): Tensor[D] = - Tensor( - torchNative.torch_randn( - size.toArray.map(_.toLong), - NativeConverters.tensorOptions(dtype, layout, device, requiresGrad) - ) - ) - -/** Returns a random permutation of integers from 0 to n - 1. - * - * TODO support custom generator - */ -def randperm[D <: DType]( - n: Long, - dtype: D = int64, - layout: Layout = Strided, - device: Device = CPU, - requiresGrad: Boolean = false, - pinMemory: Boolean = false -): Tensor[D] = - Tensor( - torchNative.torch_randperm( - n, - NativeConverters.tensorOptions(dtype, layout, device, requiresGrad, pinMemory) - ) - ) - -private def xLike[D <: DType, D2 <: DType | Derive]( - input: Tensor[D], - dtype: D2, - layout: Layout | Derive, - device: Device | Derive, - requiresGrad: Boolean, - memoryFormat: MemoryFormat, - nativeFn: ( - pytorch.Tensor, - pytorch.TensorOptions, - pytorch.MemoryFormatOptional - ) => pytorch.Tensor -): Tensor[DTypeOrDeriveFromTensor[D, D2]] = - val derivedDType = dtype match - case _: Derive => input.dtype - case d: DType => d - val derivedLayout = layout match - case _: Derive => input.layout - case l: Layout => l - val derivedDevice = device match - case _: Derive => input.device - case d: Device => d - Tensor( - nativeFn( - input.native, - NativeConverters.tensorOptions(derivedDType, derivedLayout, derivedDevice, requiresGrad), - new MemoryFormatOptional(memoryFormat.toNative) - ) - ) - -// End Creation Ops - -// Indexing, Slicing, Joining, Mutating Ops - -def cat[D <: DType](tensors: Seq[Tensor[D]], dim: Int = 0): Tensor[D] = Tensor( - torchNative.cat(new TensorArrayRef(new TensorVector(tensors.map(_.native)*)), dim.toLong) -) - -// TODO dsplit -// TODO column_stack -// TODO dstack -// TODO gather -// TODO hsplit -// TODO hstack -// TODO index_add -// TODO index_copy -// TODO index_reduce -// TODO index_select -// TODO masked_select -// TODO movedim -// TODO moveaxis -// TODO narrow -// TODO narrow_copy -// TODO nonzero -// TODO permute -// TODO reshape -// TODO select -// TODO scatter -// TODO diagonal_scatter -// TODO select_scatter -// TODO slice_scatter -// TODO scatter_add -// TODO scatter_reduce -// TODO split -// TODO squeeze - -/** Concatenates a sequence of tensors along a new dimension. - * - * All tensors need to be of the same size. - */ -def stack[D <: DType](tensors: Seq[Tensor[D]], dim: Int = 0): Tensor[D] = Tensor( - torchNative.stack(new TensorArrayRef(new TensorVector(tensors.map(_.native)*)), dim) -) - -// TODO swapaxes -// TODO swapdims -// TODO t -// TODO take -// TODO take_along_dim -// TODO tensor_split -// TODO tile -// TODO transpose -// TODO unbind -// TODO unsqueeze -// TODO vsplit -// TODO vstack -// TODO where - -// End Indexing, Slicing, Joining, Mutating Ops - -// Math operations - -// Pointwise Ops - -/** Computes the absolute value of each element in `input`. */ -def abs[D <: NumericNN](input: Tensor[D]): Tensor[D] = - Tensor(torchNative.abs(input.native)) - -/** Computes the inverse cosine of each element in `input`. */ -def acos[D <: DType](input: Tensor[D]): Tensor[D] = - Tensor(torchNative.acos(input.native)) - -/** Returns a new tensor with the inverse hyperbolic cosine of the elements of `input` . */ -def acosh[D <: DType](input: Tensor[D]): Tensor[D] = - Tensor(torchNative.acosh(input.native)) - -/** Adds `other` to `input`. */ -def add[D <: DType, D2 <: DType](input: Tensor[D], other: Tensor[D2]): Tensor[Promoted[D, D2]] = - Tensor(torchNative.add(input.native, other.native)) - -/** Adds `other` to `input`. */ -def add[D <: DType, S <: ScalaType]( - input: Tensor[D], - other: S -): Tensor[Promoted[D, ScalaToDType[S]]] = - Tensor(torchNative.add(input.native, toScalar(other))) - -/** Performs the element-wise division of tensor1 by tensor2, multiplies the result by the scalar - * value and adds it to input. - */ -def addcdiv[D <: DType, D2 <: DType, D3 <: DType]( - input: Tensor[D], - tensor1: Tensor[D2], - tensor2: Tensor[D3], - value: ScalaType -): Tensor[Promoted[D, Promoted[D2, D3]]] = - Tensor(torchNative.addcdiv(input.native, tensor1.native, tensor2.native, toScalar(value))) - -/** Performs the element-wise multiplication of tensor1 by tensor2, multiplies the result by the - * scalar value and adds it to input. - */ -def addcmul[D <: DType, D2 <: DType, D3 <: DType]( - input: Tensor[D], - tensor1: Tensor[D2], - tensor2: Tensor[D3], - value: ScalaType -): Tensor[Promoted[D, Promoted[D2, D3]]] = - Tensor(torchNative.addcmul(input.native, tensor1.native, tensor2.native, toScalar(value))) - -/** Computes the element-wise angle (in radians) of the given `input` tensor. */ -def angle[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[ComplexToReal[D]]] = - Tensor(torchNative.angle(input.native)) - -/** Returns a new tensor with the arcsine of the elements of `input`. */ -def asin[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[D]] = - Tensor(torchNative.asin(input.native)) - -/** Returns a new tensor with the inverse hyperbolic sine of the elements of `input`. */ -def asinh[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[D]] = - Tensor(torchNative.asinh(input.native)) - -/** Returns a new tensor with the arctangent of the elements of `input`. */ -def atan[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[D]] = - Tensor(torchNative.atan(input.native)) - -/** Returns a new tensor with the inverse hyperbolic tangent of the elements of `input`. */ -def atanh[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[D]] = - Tensor(torchNative.atanh(input.native)) - -/** Element-wise arctangent of (input / other) with consideration of the quadrant. Returns a new - * tensor with the signed angles in radians between vector (other, input) and vector (1, 0). (Note - * that other, the second parameter, is the x-coordinate, while input, the first parameter, is the - * y-coordinate.) - */ -def atan2[D <: RealNN, D2 <: RealNN]( - input: Tensor[D], - other: Tensor[D2] -): Tensor[FloatPromoted[Promoted[D, D2]]] = - Tensor(torchNative.atan2(input.native, other.native)) - -/** Computes the bitwise NOT of the given input tensor. The input tensor must be of integral or - * Boolean types. For bool tensors, it computes the logical NOT. - */ -def bitwiseNot[D <: BitwiseNN](input: Tensor[D]): Tensor[D] = - Tensor(torchNative.bitwise_not(input.native)) - -/** Computes the bitwise AND of `input` and `other`. For bool tensors, it computes the logical AND. - */ -def bitwiseAnd[D <: BitwiseNN, D2 <: BitwiseNN]( - input: Tensor[D], - other: Tensor[D2] -): Tensor[Promoted[D, D2]] = - Tensor(torchNative.bitwise_and(input.native, other.native)) - -/** Computes the bitwise OR of `input` and `other`. For bool tensors, it computes the logical OR. - */ -def bitwiseOr[D <: BitwiseNN, D2 <: BitwiseNN]( - input: Tensor[D], - other: Tensor[D2] -): Tensor[Promoted[D, D2]] = - Tensor(torchNative.bitwise_or(input.native, other.native)) - -/** Computes the bitwise XOR of `input` and `other`. For bool tensors, it computes the logical XOR. - */ -def bitwiseXor[D <: BitwiseNN, D2 <: BitwiseNN]( - input: Tensor[D], - other: Tensor[D2] -): Tensor[Promoted[D, D2]] = - Tensor(torchNative.bitwise_xor(input.native, other.native)) - -/** Computes the left arithmetic shift of `input` by `other` bits. */ - -def bitwiseLeftShift[D <: BitwiseNN, D2 <: BitwiseNN]( - input: Tensor[D], - other: Tensor[D2] -)(using OnlyOneBool[D, D2]): Tensor[Promoted[D, D2]] = - Tensor(torchNative.bitwise_left_shift(input.native, other.native)) - -/** Computes the right arithmetic s\hift of `input` by `other` bits. */ -def bitwiseRightShift[D <: BitwiseNN, D2 <: BitwiseNN]( - input: Tensor[D], - other: Tensor[D2] -)(using OnlyOneBool[D, D2]): Tensor[Promoted[D, D2]] = - Tensor(torchNative.bitwise_right_shift(input.native, other.native)) - -/** Returns a new tensor with the ceil of the elements of `input`, the smallest integer greater than - * or equal to each element. - */ -def ceil[D <: NumericRealNN](input: Tensor[D]): Tensor[D] = - Tensor(torchNative.ceil(input.native)) - -/** Clamps all elements in input into the range [ min, max ]. Letting min_value and max_value be min - * and max, respectively, this returns: `min(max(input, min_value), max_value)` If min is None, - * there is no lower bound. Or, if max is None there is no upper bound. - */ -// TODO Support Tensor for min and max -def clamp[D <: RealNN]( - input: Tensor[D], - min: Option[Real], - max: Option[Real] -): Tensor[D] = - Tensor(torchNative.clamp(input.native, toOptional(min), toOptional(max))) - -/** Computes the element-wise conjugate of the given input tensor. If input has a non-complex dtype, - * this function just returns input. - */ -def conjPhysical[D <: DType](input: Tensor[D]): Tensor[D] = - Tensor(torchNative.conj_physical(input.native)) - -/** Create a new floating-point tensor with the magnitude of input and the sign of other, - * elementwise. - */ -def copysign[D <: RealNN, D2 <: RealNN]( - input: Tensor[D], - other: TensorOrReal[D2] -): Tensor[FloatPromoted[D]] = - Tensor( - other match - case other: Tensor[D2] => - torchNative.copysign(input.native, other.native) - case other: Real => - torchNative.copysign(input.native, toScalar(other)) - ) - -/** Returns a new tensor with the cosine of the elements of `input`. */ -def cos[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[D]] = - Tensor(torchNative.cos(input.native)) - -/** Returns a new tensor with the hyperbolic cosine of the elements of `input`. */ -def cosh[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[D]] = - Tensor(torchNative.cosh(input.native)) - -/** Returns a new tensor with each of the elements of `input` converted from angles in degrees to - * radians. - */ -def deg2rad[D <: RealNN](input: Tensor[D]): Tensor[FloatPromoted[D]] = - Tensor(torchNative.deg2rad(input.native)) - -/** Divides each element of the input `input` by the corresponding element of `other`. */ -// TODO handle roundingMode - -def div[D <: DType, D2 <: DType]( - input: Tensor[D], - other: Tensor[D2] -): Tensor[FloatPromoted[Promoted[D, D2]]] = - Tensor(torchNative.div(input.native, other.native)) - -def div[D <: DType, S <: ScalaType]( - input: Tensor[D], - other: S -): Tensor[FloatPromoted[Promoted[D, ScalaToDType[S]]]] = - Tensor(torchNative.div(input.native, toScalar(other))) - -export torch.special.digamma -export torch.special.erf -export torch.special.erfc -export torch.special.erfinv - -/** Returns a new tensor with the exponential of the elements of the input tensor `input`. */ -def exp[D <: DType](input: Tensor[D]): Tensor[D] = - Tensor(torchNative.exp(input.native)) - -export torch.special.exp2 -export torch.special.expm1 - -/** Returns a new tensor with the data in `input` fake quantized per channel using `scale`, - * `zero_point`, `quant_min` and `quant_max`, across the channel specified by `axis`. - */ -def fakeQuantizePerChannelAffine( - input: Tensor[Float32], - scale: Tensor[Float32], - zeroPoint: Tensor[Int32 | Float16 | Float32], - axis: Long, - quantMin: Long, - quantMax: Long -): Tensor[Float32] = - Tensor( - torchNative.fake_quantize_per_channel_affine( - input.native, - scale.native, - zeroPoint.native, - axis, - quantMin, - quantMax - ) - ) - -/** Returns a new tensor with the data in `input` fake quantized using `scale`, `zero_point`, - * `quant_min` and `quant_max`. - */ -def fakeQuantizePerTensorAffine( - input: Tensor[Float32], - scale: Tensor[Float32], - zeroPoint: Tensor[Int32], - quantMin: Long, - quantMax: Long -): Tensor[Float32] = - Tensor( - torchNative.fake_quantize_per_tensor_affine( - input.native, - scale.native, - zeroPoint.native, - quantMin, - quantMax - ) - ) - -def fakeQuantizePerTensorAffine( - input: Tensor[Float32], - scale: Double, - zeroPoint: Long, - quantMin: Long, - quantMax: Long -): Tensor[Float32] = - Tensor( - torchNative.fake_quantize_per_tensor_affine(input.native, scale, zeroPoint, quantMin, quantMax) - ) - -/** Returns a new tensor with the truncated integer values of the elements of `input`. Alias for - * torch.trunc - */ -def fix[D <: NumericRealNN](input: Tensor[D]): Tensor[D] = - Tensor(torchNative.fix(input.native)) - -/** Raises `input` to the power of `exponent`, elementwise, in double precision. If neither input is - * complex returns a `torch.float64` tensor, and if one or more inputs is complex returns a - * `torch.complex128` tensor. - */ -def floatPower[D <: DType, D2 <: DType]( - input: Tensor[D], - exponent: Tensor[D2] -): Tensor[ComplexPromoted[D, D2]] = - Tensor(torchNative.float_power(input.native, exponent.native)) - -def floatPower[D <: DType, S <: ScalaType]( - input: S, - exponent: Tensor[D] -): Tensor[ComplexPromoted[ScalaToDType[S], D]] = - Tensor(torchNative.float_power(toScalar(input), exponent.native)) - -def floatPower[D <: DType, S <: ScalaType]( - input: Tensor[D], - exponent: ScalaType -): Tensor[ComplexPromoted[D, ScalaToDType[S]]] = - Tensor(torchNative.float_power(input.native, toScalar(exponent))) - -/** Returns a new tensor with the floor of the elements of `input`, the largest integer less than or - * equal to each element. - */ -def floor[D <: NumericRealNN](input: Tensor[D]): Tensor[D] = - Tensor(torchNative.floor(input.native)) - -/** Computes `input` divided by `other`, elementwise, and floors the result. */ -def floorDivide[D <: RealNN, D2 <: RealNN]( - input: Tensor[D], - other: Tensor[D2] -)(using OnlyOneBool[D, D2]): Tensor[Promoted[D, D2]] = - Tensor(torchNative.floor_divide(input.native, other.native)) - -def floorDivide[D <: RealNN, R <: Real]( - input: Tensor[D], - other: R -)(using OnlyOneBool[D, ScalaToDType[R]]): Tensor[Promoted[D, ScalaToDType[R]]] = - Tensor(torchNative.floor_divide(input.native, toScalar(other))) - -/** Applies C++’s `std::fmod` entrywise. The result has the same sign as the dividend `input` and - * its absolute value is less than that of `other`. - */ -// NOTE: When the divisor is zero, returns NaN for floating point dtypes on both CPU and GPU; raises RuntimeError for integer division by zero on CPU; Integer division by zero on GPU may return any value. -def fmod[D <: RealNN, D2 <: RealNN]( - input: Tensor[D], - other: Tensor[D2] -)(using OnlyOneBool[D, D2]): Tensor[Promoted[D, D2]] = - Tensor(torchNative.fmod(input.native, other.native)) - -def fmod[D <: RealNN, S <: ScalaType]( - input: Tensor[D], - other: S -)(using OnlyOneBool[D, ScalaToDType[S]]): Tensor[Promoted[D, ScalaToDType[S]]] = - Tensor(torchNative.fmod(input.native, toScalar(other))) - -/** Computes the fractional portion of each element in `input`. */ -def frac[D <: FloatNN](input: Tensor[D]): Tensor[D] = - Tensor(torchNative.frac(input.native)) - -/** Decomposes `input` into `mantissa` and `exponent` tensors such that `input = mantissa * (2 ** - * exponent)` The range of mantissa is the open interval (-1, 1). - */ -def frexp[D <: FloatNN](input: Tensor[D]): (Tensor[FloatPromoted[D]], Tensor[Int32]) = - val nativeTuple = torchNative.frexp(input.native) - (Tensor(nativeTuple.get0), new Int32Tensor(nativeTuple.get1)) - -/** Estimates the gradient of a function g:Rn → R in one or more dimensions using the second-order - * accurate central differences method. - */ -// TODO handle other spacing and dim invariants -// def gradient[D <: DType](input: Tensor[D], spacing: Float, dim: Option[Long], edgeOrder: Long = 1): Tensor[D] = -// Tensor(torchNative.gradient(input.native, toScalar(spacing), toOptional(dim), edgeOrder)) - -/** Returns a new tensor containing imaginary values of the `input` tensor. The returned tensor and - * `input` share the same underlying storage. - */ -def imag[D <: ComplexNN](input: Tensor[D]): Tensor[ComplexToReal[D]] = - Tensor(torchNative.imag(input.native)) - -/** Multiplies `input` by 2 ** `other`. */ -def ldexp[D <: DType](input: Tensor[D], other: Tensor[D]): Tensor[D] = - Tensor(torchNative.ldexp(input.native, other.native)) - -/** Does a linear interpolation of two tensors `start` (given by `input`) and `end` (given by - * `other`) based on a scalar or tensor weight and returns the resulting out tensor. out = start + - * weight × (end − start) - */ -def lerp[D <: DType]( - input: Tensor[D], - other: Tensor[D], - weight: Tensor[D] | Float | Double -): Tensor[D] = - Tensor( - weight match - case weight: Tensor[D] => torchNative.lerp(input.native, other.native, weight.native) - case weight: Float => torchNative.lerp(input.native, other.native, toScalar(weight)) - case weight: Double => torchNative.lerp(input.native, other.native, toScalar(weight)) - ) - -/** Computes the natural logarithm of the absolute value of the gamma function on `input`. */ -def lgamma[D <: RealNN](input: Tensor[D]): Tensor[D] = - Tensor(torchNative.lgamma(input.native)) - -/** Returns a new tensor with the natural logarithm of the elements of `input`. */ -def log[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[D]] = - Tensor(torchNative.log(input.native)) - -/** Returns a new tensor with the logarithm to the base 10 of the elements of `input`. */ -def log10[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[D]] = - Tensor(torchNative.log10(input.native)) - -/** Returns a new tensor with the natural logarithm of (1 + input). */ -def log1p[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[D]] = - Tensor(torchNative.log1p(input.native)) - -/** Returns a new tensor with the logarithm to the base 2 of the elements of `input`. */ -def log2[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[D]] = - Tensor(torchNative.log2(input.native)) - -/** Logarithm of the sum of exponentiations of the inputs. Calculates pointwise log `log(e**x + - * e**y)`. This function is useful in statistics where the calculated probabilities of events may - * be so small as to exceed the range of normal floating point numbers. In such cases the logarithm - * of the calculated probability is stored. This function allows adding probabilities stored in - * such a fashion. This op should be disambiguated with `torch.logsumexp()` which performs a - * reduction on a single tensor. - */ -def logaddexp[D <: RealNN, D2 <: RealNN]( - input: Tensor[D], - other: Tensor[D2] -): Tensor[Promoted[D, D2]] = - Tensor(torchNative.logaddexp(input.native, other.native)) - -/** Logarithm of the sum of exponentiations of the inputs in base-2. Calculates pointwise `log2(2**x - * + 2**y)`. See torch.logaddexp() for more details. - */ -def logaddexp2[D <: RealNN, D2 <: RealNN]( - input: Tensor[D], - other: Tensor[D2] -): Tensor[Promoted[D, D2]] = - Tensor(torchNative.logaddexp2(input.native, other.native)) - -/** Computes the element-wise logical AND of the given input tensors. Zeros are treated as False and - * nonzeros are treated as True. - */ -def logicalAnd[D <: DType, D2 <: DType](input: Tensor[D], other: Tensor[D2]): Tensor[Bool] = - Tensor(torchNative.logical_and(input.native, other.native)) - -/** Computes the element-wise logical NOT of the given input tensor. If the input tensor is not a - * bool tensor, zeros are treated as False and non-zeros are treated as True. TODO If not - * specified, the output tensor will have the bool dtype. - */ -def logicalNot[D <: DType](input: Tensor[D]): Tensor[Bool] = - Tensor(torchNative.logical_not(input.native)) - -/** Computes the element-wise logical OR of the given input tensors. Zeros are treated as False and - * nonzeros are treated as True. - */ -def logicalOr[D <: DType, D2 <: DType](input: Tensor[D], other: Tensor[D2]): Tensor[Bool] = - Tensor(torchNative.logical_or(input.native, other.native)) - -/** Computes the element-wise logical XOR of the given input tensors. Zeros are treated as False and - * nonzeros are treated as True. - */ -def logicalXor[D <: DType, D2 <: DType](input: Tensor[D], other: Tensor[D2]): Tensor[Bool] = - Tensor(torchNative.logical_xor(input.native, other.native)) - -export torch.special.logit - -/** Given the legs of a right triangle, return its hypotenuse. */ -// TODO Change `D2 <: RealNN` once we fix property testing compilation -def hypot[D <: RealNN, D2 <: FloatNN]( - input: Tensor[D], - other: Tensor[D2] -)(using AtLeastOneFloat[D, D2]): Tensor[FloatPromoted[Promoted[D, D2]]] = - Tensor(torchNative.hypot(input.native, other.native)) - -export torch.special.i0 -export torch.special.igamma -export torch.special.igammac - -/** Multiplies input by other. */ -def mul[D <: DType, D2 <: DType](input: Tensor[D], other: Tensor[D2]): Tensor[Promoted[D, D2]] = - Tensor(torchNative.mul(input.native, other.native)) - -export torch.special.mvlgamma - -/** Replaces NaN, positive infinity, and negative infinity values in `input` with the values - * specified by nan, posinf, and neginf, respectively. By default, NaNs are replaced with zero, - * positive infinity is replaced with the greatest finite value representable by input’s dtype, and - * negative infinity is replaced with the least finite value representable by input’s dtype. - */ -def nanToNum[D <: RealNN]( - input: Tensor[D], - nan: Option[Double] = None, - posinf: Option[Double] = None, - neginf: Option[Double] = None -): Tensor[D] = - Tensor( - torchNative.nan_to_num(input.native, toOptional(nan), toOptional(posinf), toOptional(neginf)) - ) - -/** Returns a new tensor with the negative of the elements of `input`. */ -def neg[D <: NumericNN](input: Tensor[D]): Tensor[D] = - Tensor(torchNative.neg(input.native)) - -/** Return the next floating-point value after `input` towards `other`, elementwise. */ -// TODO Change `D2 <: RealNN` once we fix property testing compilation -def nextafter[D <: RealNN, D2 <: FloatNN]( - input: Tensor[D], - other: Tensor[D2] -)(using AtLeastOneFloat[D, D2]): Tensor[FloatPromoted[Promoted[D, D2]]] = - Tensor(torchNative.nextafter(input.native, other.native)) - -export torch.special.polygamma - -/** Returns input. Normally throws a runtime error if input is a bool tensor in pytorch. */ -def positive[D <: NumericNN](input: Tensor[D]): Tensor[D] = - Tensor(torchNative.positive(input.native)) - -/** Takes the power of each element in `input` with exponent and returns a tensor with the result. - * `exponent` can be either a single float number or a Tensor with the same number of elements as - * input. - */ -def pow[D <: DType, D2 <: DType]( - input: Tensor[D], - exponent: Tensor[D2] -)(using OnlyOneBool[D, D2]): Tensor[Promoted[D, D2]] = - Tensor(torchNative.pow(input.native, exponent.native)) - -def pow[D <: DType, S <: ScalaType]( - input: Tensor[D], - exponent: S -)(using OnlyOneBool[D, ScalaToDType[S]]): Tensor[Promoted[D, ScalaToDType[S]]] = - Tensor(torchNative.pow(input.native, toScalar(exponent))) - -def pow[S <: ScalaType, D <: DType]( - input: S, - exponent: Tensor[D] -)(using OnlyOneBool[ScalaToDType[S], D]): Tensor[Promoted[ScalaToDType[S], D]] = - Tensor(torchNative.pow(toScalar(input), exponent.native)) - -// TODO Implement creation of QInts -// TODO quantized_batch_norm -// TODO quantized_max_pool1d -// TODO quantized_max_pool2d - -/** Returns a new tensor with each of the elements of `input` converted from angles in radians to - * degrees. - */ -def rad2Deg[D <: RealNN | Bool](input: Tensor[D]): Tensor[FloatPromoted[D]] = - Tensor(torchNative.rad2deg(input.native)) - -/** Returns a new tensor containing real values of the self tensor. The returned tensor and self - * share the same underlying storage. - */ -def real[D <: DType](input: Tensor[D]): Tensor[ComplexToReal[D]] = - Tensor(torchNative.real(input.native)) - -/** Returns a new tensor with the reciprocal of the elements of `input` */ -def reciprocal[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[D]] = - Tensor(torchNative.reciprocal(input.native)) - -/** Computes Python’s modulus operation entrywise. The result has the same sign as the divisor - * `other` and its absolute value is less than that of `other`. - */ -def remainder[D <: RealNN, D2 <: RealNN]( - input: Tensor[D], - other: Tensor[D2] -): Tensor[Promoted[D, D2]] = - Tensor(torchNative.remainder(input.native, other.native)) - -def remainder[D <: DType, R <: Real]( - input: Tensor[D], - other: R -): Tensor[Promoted[D, ScalaToDType[R]]] = - Tensor(torchNative.remainder(input.native, toScalar(other))) - -def remainder[D <: DType, R <: Real]( - input: R, - other: Tensor[D] -): Tensor[Promoted[ScalaToDType[R], D]] = - Tensor(torchNative.remainder(toScalar(input), other.native)) - -/** Rounds elements of `input` to the nearest integer. If decimals is negative, it specifies the - * number of positions to the left of the decimal point. - */ -def round[D <: FloatNN](input: Tensor[D], decimals: Long = 0): Tensor[D] = - Tensor(torchNative.round(input.native, decimals)) - -/** Returns a new tensor with the reciprocal of the square-root of each of the elements of `input`. - */ -def rsqrt[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[D]] = - Tensor(torchNative.rsqrt(input.native)) - -export torch.special.sigmoid - -/** Returns a new tensor with the signs of the elements of `input`. */ -def sign[D <: RealNN](input: Tensor[D]): Tensor[D] = - Tensor(torchNative.sign(input.native)) - -/** This function is an extension of `torch.sign()` to complex tensors. It computes a new tensor - * whose elements have the same angles as the corresponding elements of `input` and absolute values - * (i.e. magnitudes) of one for complex tensors and is equivalent to torch.sign() for non-complex - * tensors. - */ -def sgn[D <: DType](input: Tensor[D]): Tensor[D] = - Tensor(torchNative.sgn(input.native)) - -/** Tests if each element of `input`` has its sign bit set or not. */ -def signbit[D <: RealNN](input: Tensor[D]): Tensor[Bool] = - Tensor(torchNative.signbit(input.native)) - -/** Returns a new tensor with the sine of the elements of `input`. */ -def sin[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[D]] = - Tensor(torchNative.sin(input.native)) - -export torch.special.sinc - -/** Returns a new tensor with the hyperbolic sine of the elements of `input`. */ -def sinh[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[D]] = - Tensor(torchNative.sinh(input.native)) - -export torch.nn.functional.softmax - -/** Returns a new tensor with the square-root of the elements of `input`. */ -def sqrt[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[D]] = - Tensor(torchNative.sqrt(input.native)) - -/** Returns a new tensor with the square of the elements of `input`. */ -def square[D <: DType](input: Tensor[D]): Tensor[NumericPromoted[D]] = - Tensor(torchNative.square(input.native)) - -/** Subtracts `other`, scaled by `alpha`, from `input`. */ -def sub[D <: NumericNN, D2 <: NumericNN]( - input: Tensor[D], - other: Tensor[D2] -): Tensor[Promoted[D, D2]] = - Tensor(torchNative.sub(input.native, other.native)) - -def sub[D <: NumericNN, D2 <: NumericNN]( - input: Tensor[D], - other: Tensor[D2], - alpha: ScalaType -): Tensor[Promoted[D, D2]] = - Tensor(torchNative.sub(input.native, other.native, toScalar(alpha))) - -def sub[D <: NumericNN, D2 <: NumericNN]( - input: Tensor[D], - other: Numeric, - alpha: ScalaType -): Tensor[Promoted[D, D2]] = - Tensor(torchNative.sub(input.native, toScalar(other), toScalar(alpha))) - -/** Returns a new tensor with the tangent of the elements of `input`. */ -def tan[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[D]] = - Tensor(torchNative.tan(input.native)) - -/** Returns a new tensor with the hyperbolic tangent of the elements of `input`. */ -def tanh[D <: DType](input: Tensor[D]): Tensor[FloatPromoted[D]] = - Tensor(torchNative.tanh(input.native)) - -/** Alias for `torch.div()` with `rounding_mode=None` */ -def trueDivide[D <: DType, D2 <: DType]( - input: Tensor[D], - other: Tensor[D2] -): Tensor[FloatPromoted[Promoted[D, D2]]] = - Tensor(torchNative.true_divide(input.native, other.native)) - -def trueDivide[D <: DType, S <: ScalaType]( - input: Tensor[D], - other: S -): Tensor[FloatPromoted[Promoted[D, ScalaToDType[S]]]] = - Tensor(torchNative.true_divide(input.native, toScalar(other))) - -/** Returns a new tensor with the truncated integer values of the elements of `input`. */ -def trunc[D <: NumericRealNN](input: Tensor[D]): Tensor[D] = - Tensor(torchNative.trunc(input.native)) - -export torch.special.xlogy - -// End Pointwise Ops - -// Comparison Ops - -def allclose( - input: Tensor[?], - other: Tensor[?], - rtol: Double = 1e-05, - atol: Double = 1e-08, - equalNan: Boolean = false -) = - torchNative.allclose(input.native, other.native, rtol, atol, equalNan) - -// End Comparison Ops - -// End Math operations - -def matmul[D1 <: DType, D2 <: DType](t1: Tensor[D1], t2: Tensor[D2]): Tensor[Promoted[D1, D2]] = - t1.matmul(t2) - def manualSeed(seed: Long) = torchNative.manual_seed(seed) /** Disable gradient calculation for [[op]]. diff --git a/core/src/test/scala/torch/TensorCheckSuite.scala b/core/src/test/scala/torch/TensorCheckSuite.scala index de8af28c..94582bcc 100644 --- a/core/src/test/scala/torch/TensorCheckSuite.scala +++ b/core/src/test/scala/torch/TensorCheckSuite.scala @@ -42,7 +42,7 @@ trait TensorCheckSuite extends ScalaCheckSuite { opName: String, skipPropertyTestReason: Option[String] = None ): Unit = - test(propertyTestName(opName)) { + property(propertyTestName(opName)) { assume(skipPropertyTestReason.isEmpty, skipPropertyTestReason) // TODO Validate output types @@ -76,7 +76,7 @@ trait TensorCheckSuite extends ScalaCheckSuite { op: Function1[Tensor[In], ?], opName: String ): Unit = - test(propertyTestName(opName)) { + property(propertyTestName(opName)) { // TODO Validate output types val tensorInCase = TypeCase[Tensor[In]] forAll(genTensor) { diff --git a/core/src/test/scala/torch/TensorSuite.scala b/core/src/test/scala/torch/TensorSuite.scala index cfbabd9c..1bf50bb5 100644 --- a/core/src/test/scala/torch/TensorSuite.scala +++ b/core/src/test/scala/torch/TensorSuite.scala @@ -17,18 +17,10 @@ package torch import org.scalacheck.Prop.* -import Generators.{*, given} -import spire.math.Complex +import Generators.given class TensorSuite extends TensorCheckSuite { - test("arange") { - val t0 = arange(0, 10) - assertEquals(t0.toSeq, Seq.range(0, 10)) - val t1 = arange(0, 10, 2) - assertEquals(t1.toSeq, Seq.range(0, 10, 2)) - } - test("tensor properties") { val t = ones(Seq(2, 3), dtype = float32) assertEquals(t.size, Seq[Int](2, 3)) @@ -36,13 +28,6 @@ class TensorSuite extends TensorCheckSuite { assertEquals(t.numel, 2L * 3) } - property("tensor dtypes") { - forAll { (dtype: DType) => - val t = ones(Seq(2, 3), dtype) - assertEquals(t.dtype, dtype) - } - } - // property("tensor requiresGrad") { // forAll { (dtype: FloatNN | ComplexNN, requiresGrad: Boolean) => // val t = ones(Seq(2, 3), dtype, requiresGrad=requiresGrad) @@ -50,23 +35,6 @@ class TensorSuite extends TensorCheckSuite { // } // } - property("tensor ones") { - forAll(genTensorSize, genDType) { (size, dtype) => - val t = ones(size, dtype) - assertEquals(t.dtype, dtype) - assertEquals(t.size, size) - assertEquals(t.numel, size.product.toLong) - assertEquals(t.toSeq.length, size.product.toInt) - } - } - - test("ones") { - val t = ones[Float32](Seq(2, 3)) - assertEquals(t.size, Seq(2, 3)) - assertEquals(t.numel, 2L * 3) - assertEquals(t.toSeq, Seq.fill[Float](2 * 3)(1f)) - } - test("exp and log") { val t = Tensor(Seq(1.0, 2.0, 3.0)) assertEquals(t.log(0), Tensor(0.0)) @@ -106,800 +74,6 @@ class TensorSuite extends TensorCheckSuite { assertEquals(tensor(---, -1), Tensor(Seq(3, 7, 11, 15))) } - // Random sampling - - test("randn.unit-test") { - val randnTensor = randn(Seq(100000)) - val randnMean = randnTensor.mean - val expectedMean = Tensor(0.0).to(dtype = float32) - val randnVariance = randnTensor.variance - val expectedVariance = Tensor(1.0).to(dtype = float32) - - assert( - allclose(randnMean, expectedMean, atol = 1e-2) && - allclose(randnVariance, expectedVariance, atol = 1e-2) - ) - } - - // End Random sampling - testUnaryOp( - op = abs, - opName = "abs", - inputTensor = Tensor(Seq(-1, -2, 3)), - expectedTensor = Tensor(Seq(1, 2, 3)) - ) - - testUnaryOp( - op = acos, - opName = "acos", - inputTensor = Tensor(Seq(0.3348, -0.5889, 0.2005, -0.1584)), - expectedTensor = Tensor(Seq(1.2294, 2.2004, 1.3690, 1.7298)) - ) - - testUnaryOp( - op = acosh, - opName = "acosh", - inputTensor = Tensor(Seq(1.3192, 1.9915, 1.9674, 1.7151)), - expectedTensor = Tensor(Seq(0.7791, 1.3120, 1.2979, 1.1341)) - ) - - testUnaryOp( - op = acosh, - opName = "acosh", - inputTensor = Tensor(Seq(1.3192, 1.9915, 1.9674, 1.7151)), - expectedTensor = Tensor(Seq(0.7791, 1.3120, 1.2979, 1.1341)) - ) - - testUnaryOp( - op = add(_, other = 20), - opName = "add", - inputTensor = Tensor(Seq(0.0202, 1.0985, 1.3506, -0.6056)), - expectedTensor = Tensor(Seq(20.0202, 21.0985, 21.3506, 19.3944)) - ) - - // TODO addcdiv - // TODO addcmul - - testUnaryOp( - op = angle, - opName = "angle", - inputTensor = Tensor(Seq(Complex(-1.0, 1.0), Complex(-2.0, 2.0), Complex(3.0, -3.0))), - expectedTensor = Tensor(Seq(2.3562, 2.3562, -0.7854)) - ) - - testUnaryOp( - op = asin, - opName = "asin", - inputTensor = Tensor(Seq(-0.5962, 1.4985, -0.4396, 1.4525)), - expectedTensor = Tensor(Seq(-0.6387, Double.NaN, -0.4552, Double.NaN)) - ) - testUnaryOp( - op = asinh, - opName = "asinh", - inputTensor = Tensor(Seq(0.1606, -1.4267, -1.0899, -1.0250)), - expectedTensor = Tensor(Seq(0.1599, -1.1534, -0.9435, -0.8990)) - ) - testUnaryOp( - op = atan, - opName = "atan", - inputTensor = Tensor(Seq(0.2341, 0.2539, -0.6256, -0.6448)), - expectedTensor = Tensor(Seq(0.2299, 0.2487, -0.5591, -0.5727)) - ) - testUnaryOp( - op = atanh, - opName = "atanh", - inputTensor = Tensor(Seq(-0.9385, 0.2968, -0.8591, -0.1871)), - expectedTensor = Tensor(Seq(-1.7253, 0.3060, -1.2899, -0.1893)) - ) - - testBinaryOp( - op = atan2, - opName = "atan2", - inputTensors = ( - Tensor(Seq(0.9041, 0.0196, -0.3108, -2.4423)), - Tensor(Seq(1.3104, -1.5804, 0.6674, 0.7710)) - ), - expectedTensor = Tensor(Seq(0.6039, 3.1292, -0.4358, -1.2650)) - ) - - // TODO Test boolean cases for bitwise operations - - testUnaryOp( - op = bitwiseNot, - opName = "bitwiseNot", - inputTensor = Tensor(Seq(-1, -2, 3)), - expectedTensor = Tensor(Seq(0, 1, -4)) - ) - - testBinaryOp( - op = bitwiseAnd, - opName = "bitwiseAnd", - inputTensors = ( - Tensor(Seq(-1, -2, 3)), - Tensor(Seq(1, 0, 3)) - ), - expectedTensor = Tensor(Seq(1, 0, 3)) - ) - - testBinaryOp( - op = bitwiseOr, - opName = "bitwiseOr", - inputTensors = ( - Tensor(Seq(-1, -2, 3)), - Tensor(Seq(1, 0, 3)) - ), - expectedTensor = Tensor(Seq(-1, -2, 3)) - ) - - testBinaryOp( - op = bitwiseXor, - opName = "bitwiseXor", - inputTensors = ( - Tensor(Seq(-1, -2, 3)), - Tensor(Seq(1, 0, 3)) - ), - expectedTensor = Tensor(Seq(-2, -2, 0)) - ) - - // TODO Enable property test once we figure out to consider OnlyOneBool evidence in genDType - unitTestBinaryOp( - op = bitwiseLeftShift, - opName = "bitwiseLeftShift", - inputTensors = ( - Tensor(Seq(-1, -2, 3)), - Tensor(Seq(1, 0, 3)) - ), - expectedTensor = Tensor(Seq(-2, -2, 24)) - ) - - // TODO Enable property test once we figure out to consider OnlyOneBool evidence in genDType - unitTestBinaryOp( - op = bitwiseRightShift, - opName = "bitwiseRightShift", - inputTensors = ( - Tensor(Seq(-2, -7, 31)), - Tensor(Seq(1, 0, 3)) - ), - expectedTensor = Tensor(Seq(-1, -7, 3)) - ) - - testUnaryOp( - op = ceil, - opName = "ceil", - inputTensor = Tensor(Seq(-0.6341, -1.4208, -1.0900, 0.5826)), - expectedTensor = Tensor(Seq(-0.0, -1.0, -1.0, 1.0)) - ) - - // TODO test min max inputs - testUnaryOp( - op = clamp(_, min = Some(-0.5), max = Some(0.5)), - opName = "clamp", - inputTensor = Tensor(Seq(-1.7120, 0.1734, -0.0478, -0.0922)), - expectedTensor = Tensor(Seq(-0.5, 0.1734, -0.0478, -0.0922)) - ) - - testUnaryOp( - op = conjPhysical, - opName = "conjPhysical", - inputTensor = Tensor(Seq(Complex(-1.0, 1.0), Complex(-2.0, 2.0), Complex(3.0, -3.0))), - expectedTensor = Tensor(Seq(Complex(-1.0, -1.0), Complex(-2.0, -2.0), Complex(3.0, 3.0))) - ) - - testBinaryOp( - op = copysign, - opName = "copysign", - inputTensors = ( - Tensor(Seq(0.7079, 0.2778, -1.0249, 0.5719)), - Tensor(Seq(0.2373, 0.3120, 0.3190, -1.1128)) - ), - expectedTensor = Tensor(Seq(0.7079, 0.2778, 1.0249, -0.5719)) - ) - - testUnaryOp( - op = cos, - opName = "cos", - inputTensor = Tensor(Seq(1.4309, 1.2706, -0.8562, 0.9796)), - expectedTensor = Tensor(Seq(0.1395, 0.2957, 0.6553, 0.5574)) - ) - - testUnaryOp( - op = cosh, - opName = "cosh", - inputTensor = Tensor(Seq(0.1632, 1.1835, -0.6979, -0.7325)), - expectedTensor = Tensor(Seq(1.0133, 1.7860, 1.2536, 1.2805)) - ) - - testUnaryOp( - op = deg2rad, - opName = "deg2rad", - inputTensor = Tensor(Seq(180.0, -180.0, 360.0, -360.0, 90.0, -90.0)), - expectedTensor = Tensor(Seq(3.1416, -3.1416, 6.2832, -6.2832, 1.5708, -1.5708)) - ) - - testBinaryOp( - op = div, - opName = "div", - inputTensors = ( - Tensor(Seq(-0.3711, -1.9353, -0.4605, -0.2917)), - Tensor(Seq(0.8032, 0.2930, -0.8113, -0.2308)) - ), - expectedTensor = Tensor(Seq(-0.4620, -6.6051, 0.5676, 1.2639)) - ) - - testUnaryOp( - op = digamma, - opName = "digamma", - inputTensor = Tensor(Seq(1, 0.5)), - expectedTensor = Tensor(Seq(-0.5772, -1.9635)) - ) - - testUnaryOp( - op = erf, - opName = "erf", - inputTensor = Tensor(Seq(0, -1.0, 10.0)), - expectedTensor = Tensor(Seq(0.0, -0.8427, 1.0)) - ) - - testUnaryOp( - op = erfc, - opName = "erfc", - inputTensor = Tensor(Seq(0, -1.0, 10.0)), - expectedTensor = Tensor(Seq(1.0, 1.8427, 0.0)) - ) - - testUnaryOp( - op = erfinv, - opName = "erfinv", - inputTensor = Tensor(Seq(0.0, 0.5, -1.0)), - expectedTensor = Tensor(Seq(0.0, 0.4769, Double.NegativeInfinity)) - ) - - testUnaryOp( - op = exp, - opName = "exp", - inputTensor = Tensor(Seq(0, 0.6931)), - expectedTensor = Tensor(Seq(1.0, 2.0)) - ) - - testUnaryOp( - op = exp2, - opName = "exp2", - inputTensor = Tensor(Seq(0.0, 1.0, 3.0, 4.0)), - expectedTensor = Tensor(Seq(1.0, 2.0, 8.0, 16.0)) - ) - - testUnaryOp( - op = expm1, - opName = "expm1", - inputTensor = Tensor(Seq(0, 0.6931)), - expectedTensor = Tensor(Seq(0.0, 1.0)) - ) - - // TODO fakeQuantizePerChannelAffine - // TODO fakeQuantizePerTensorAffine - - testUnaryOp( - op = fix, - opName = "fix", - inputTensor = Tensor(Seq(3.4742, 0.5466, -0.8008, -0.9079)), - expectedTensor = Tensor(Seq(3.0, 0.0, -0.0, -0.0)) - ) - - testBinaryOp( - op = floatPower, - opName = "floatPower", - inputTensors = ( - Tensor(Seq(1, 2, 3, 4)), - Tensor(Seq(2, -3, 4, -5)) - ), - expectedTensor = Tensor(Seq(1.0, 0.125, 81.0, 9.7656e-4)) - ) - - testUnaryOp( - op = floor, - opName = "floor", - inputTensor = Tensor(Seq(-0.8166, 1.5308, -0.2530, -0.2091)), - expectedTensor = Tensor(Seq(-1.0, 1.0, -1.0, -1.0)) - ) - - // TODO Enable property test once we figure out to consider OnlyOneBool evidence in genDType - unitTestBinaryOp( - op = floorDivide, - opName = "floorDivide", - inputTensors = ( - Tensor(Seq(4.0, 3.0)), - Tensor(Seq(2.0, 2.0)) - ), - expectedTensor = Tensor(Seq(2.0, 1.0)) - ) - - // TODO Enable property test once we figure out to consider OnlyOneBool evidence in genDType - unitTestBinaryOp( - op = fmod, - opName = "fmod", - inputTensors = ( - Tensor(Seq(-3.0, -2.0, -1.0, 1.0, 2.0, 3.0)), - Tensor(Seq(2.0, 2.0, 2.0, 2.0, 2.0, 2.0)) - ), - expectedTensor = Tensor(Seq(-1.0, -0.0, -1.0, 1.0, 0.0, 1.0)) - ) - - testUnaryOp( - op = frac, - opName = "frac", - inputTensor = Tensor(Seq(1, 2.5, -3.2)), - expectedTensor = Tensor(Seq(0.0, 0.5, -0.2)) - ) - - // TODO Handle Tuple Tensor Output - // https://pytorch.org/docs/stable/generated/torch.frexp.html - // testUnaryOp( - // op = frexp, - // opName = "frexp", - // inputTensor = Tensor(Seq(0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0)), - // expectedTensor = Tensor(Seq(0.5724, 0.0, -0.1208)) - // ) - - // TODO gradient - - testUnaryOp( - op = imag, - opName = "imag", - inputTensor = Tensor( - Seq( - Complex(0.31, 0.3553), - Complex(-0.5445, -0.7896), - Complex(-1.6492, -0.0633), - Complex(-0.0638, -0.8119) - ) - ), - expectedTensor = Tensor(Seq(0.3553, -0.7896, -0.0633, -0.8119)) - ) - - testBinaryOp( - op = ldexp, - opName = "ldexp", - inputTensors = ( - Tensor(Seq(1.0)), - Tensor(Seq(1, 2, 3, 4)) - ), - expectedTensor = Tensor(Seq(2.0, 4.0, 8.0, 16.0)) - ) - - // TODO Test weight as tensor - // TODO Lerp must accepts the same type so we wrap this for generators to work properly - // testBinaryOp( - // op = lerp(_, _, weight = 0.5), - // opName = "lerp", - // inputTensors = ( - // Tensor(Seq(1.0, 2.0, 3.0, 4.0)), - // Tensor(Seq(10.0, 10.0, 10.0, 10.0)) - // ), - // expectedTensor = Tensor(Seq(5.5, 6.0, 6.5, 7.0)) - // ) - unitTestBinaryOp( - op = lerp(_, _, weight = 0.5), - opName = "lerp", - inputTensors = ( - Tensor(Seq(1.0, 2.0, 3.0, 4.0)), - Tensor(Seq(10.0, 10.0, 10.0, 10.0)) - ), - expectedTensor = Tensor(Seq(5.5, 6.0, 6.5, 7.0)) - ) - - testUnaryOp( - op = lgamma, - opName = "lgamma", - inputTensor = Tensor(Seq(0.5, 1.0, 1.5)), - expectedTensor = Tensor(Seq(0.5724, 0.0, -0.1208)) - ) - - testUnaryOp( - op = log, - opName = "log", - inputTensor = Tensor(Seq(4.7767, 4.3234, 1.2156, 0.2411, 4.5739)), - expectedTensor = Tensor(Seq(1.5637, 1.4640, 0.1952, -1.4226, 1.5204)) - ) - - testUnaryOp( - op = log10, - opName = "log10", - inputTensor = Tensor(Seq(0.5224, 0.9354, 0.7257, 0.1301, 0.2251)), - expectedTensor = Tensor(Seq(-0.2820, -0.0290, -0.1392, -0.8857, -0.6476)) - ) - - testUnaryOp( - op = log1p, - opName = "log1p", - inputTensor = Tensor(Seq(-1.0090, -0.9923, 1.0249, -0.5372, 0.2492)), - expectedTensor = Tensor(Seq(Double.NaN, -4.8653, 0.7055, -0.7705, 0.2225)), - absolutePrecision = 1e-2 - ) - - testUnaryOp( - op = log2, - opName = "log2", - inputTensor = Tensor(Seq(0.8419, 0.8003, 0.9971, 0.5287, 0.0490)), - expectedTensor = Tensor(Seq(-0.2483, -0.3213, -0.0042, -0.9196, -4.3504)), - absolutePrecision = 1e-2 - ) - - // TODO Enable property test once we figure out to consider OnlyOneBool evidence in genDType - unitTestBinaryOp( - op = logaddexp, - opName = "logaddexp", - inputTensors = ( - Tensor(Seq(-100.0, -200.0, -300.0)), - Tensor(Seq(-1.0, -2.0, -3.0)) - ), - expectedTensor = Tensor(Seq(-1.0, -2.0, -3.0)) - ) - - // TODO Enable property test once we figure out to consider OnlyOneBool evidence in genDType - unitTestBinaryOp( - op = logaddexp2, - opName = "logaddexp2", - inputTensors = ( - Tensor(Seq(-100.0, -200.0, -300.0)), - Tensor(Seq(-1.0, -2.0, -3.0)) - ), - expectedTensor = Tensor(Seq(-1.0, -2.0, -3.0)) - ) - - // TODO Test int32 tensors - testBinaryOp( - op = logicalAnd, - opName = "logicalAnd", - inputTensors = ( - Tensor(Seq(true, false, true)), - Tensor(Seq(true, false, false)) - ), - expectedTensor = Tensor(Seq(true, false, false)) - ) - - // TODO Test int32 tensors - testUnaryOp( - op = logicalNot, - opName = "logicalNot", - inputTensor = Tensor(Seq(true, false)), - expectedTensor = Tensor(Seq(false, true)) - ) - - // TODO Test int32 tensors - testBinaryOp( - op = logicalOr, - opName = "logicalOr", - inputTensors = ( - Tensor(Seq(true, false, true)), - Tensor(Seq(true, false, false)) - ), - expectedTensor = Tensor(Seq(true, false, true)) - ) - - // TODO Test int32 tensors - testBinaryOp( - op = logicalXor, - opName = "logicalXor", - inputTensors = ( - Tensor(Seq(true, false, true)), - Tensor(Seq(true, false, false)) - ), - expectedTensor = Tensor(Seq(false, false, true)) - ) - - testUnaryOp( - op = logit(_, Some(1e-6)), - opName = "logit", - inputTensor = Tensor(Seq(0.2796, 0.9331, 0.6486, 0.1523, 0.6516)), - expectedTensor = Tensor(Seq(-0.9466, 2.6352, 0.6131, -1.7169, 0.6261)), - absolutePrecision = 1e-3 - ) - - // TODO Enable property test once we figure out to compile properly with AtLeastOneFloat - unitTestBinaryOp( - op = hypot, - opName = "hypot", - inputTensors = (Tensor(Seq(4.0)), Tensor(Seq(3.0, 4.0, 5.0))), - expectedTensor = Tensor(Seq(5.0, 5.6569, 6.4031)) - ) - - testUnaryOp( - op = i0, - opName = "i0", - inputTensor = Tensor(Seq(0.0, 1.0, 2.0, 3.0, 4.0)), - expectedTensor = Tensor(Seq(1.0, 1.2661, 2.2796, 4.8808, 11.3019)) - ) - - // TODO Enable property test once we figure out to compile properly with AtLeastOneFloat - unitTestBinaryOp( - op = igamma, - opName = "igamma", - inputTensors = ( - Tensor(Seq(4.0)), - Tensor(Seq(3.0, 4.0, 5.0)) - ), - expectedTensor = Tensor(Seq(0.3528, 0.5665, 0.7350)) - ) - - // TODO Enable property test once we figure out to compile properly with AtLeastOneFloat - unitTestBinaryOp( - op = igammac, - opName = "igammac", - inputTensors = ( - Tensor(Seq(4.0)), - Tensor(Seq(3.0, 4.0, 5.0)) - ), - expectedTensor = Tensor(Seq(0.6472, 0.4335, 0.2650)) - ) - - testBinaryOp( - op = mul, - opName = "mul", - inputTensors = ( - Tensor(Seq(1.1207)), - Tensor(Seq(0.5146, 0.1216, -0.5244, 2.2382)) - ), - expectedTensor = Tensor(Seq(0.5767, 0.1363, -0.5877, 2.5083)) - ) - - testUnaryOp( - op = mvlgamma(_, p = 2), - opName = "mvlgamma", - inputTensor = Tensor(Seq(1.6835, 1.8474, 1.1929)), - expectedTensor = Tensor(Seq(0.3928, 0.4007, 0.7586)) - ) - - // TODO Test nan, posinf, neginf arguments - // TODO Test float32 - testUnaryOp( - op = nanToNum(_, nan = None, posinf = None, neginf = None), - opName = "nanToNum", - inputTensor = Tensor(Seq(Double.NaN, Double.PositiveInfinity, Double.NegativeInfinity, 3.14)), - expectedTensor = Tensor(Seq(0.0, 1.7976931348623157e308, -1.7976931348623157e308, 3.14)) - ) - - testUnaryOp( - op = neg, - opName = "neg", - inputTensor = Tensor(Seq(0.0090, -0.2262, -0.0682, -0.2866, 0.3940)), - expectedTensor = Tensor(Seq(-0.0090, 0.2262, 0.0682, 0.2866, -0.3940)) - ) - - // TODO Enable property test once we figure out to compile properly with AtLeastOneFloat - // TODO Fix this unit test, as is not really significant due to fp precision - unitTestBinaryOp( - op = nextafter, - opName = "nextafter", - inputTensors = ( - Tensor(Seq(1.0, 2.0)), - Tensor(Seq(2.0, 1.0)) - ), - expectedTensor = Tensor(Seq(1.0, 2.0)), - absolutePrecision = 1e-8 - ) - - // TODO Test multiple values of `n` - testUnaryOp( - op = polygamma(1, _), - opName = "polygamma", - inputTensor = Tensor(Seq(1.0, 0.5)), - expectedTensor = Tensor(Seq(1.64493, 4.9348)) - ) - - testUnaryOp( - op = positive, - opName = "positive", - inputTensor = Tensor(Seq(0.0090, -0.2262, -0.0682, -0.2866, 0.3940)), - expectedTensor = Tensor(Seq(0.0090, -0.2262, -0.0682, -0.2866, 0.3940)) - ) - - // TODO Test scalar exponent - // TODO Enable property test once we figure out to consider OnlyOneBool evidence in genDType - unitTestBinaryOp( - op = pow, - opName = "pow", - inputTensors = ( - Tensor(Seq(1.0, 2.0, 3.0, 4.0)), - Tensor(Seq(1.0, 2.0, 3.0, 4.0)) - ), - expectedTensor = Tensor(Seq(1.0, 4.0, 27.0, 256.0)) - ) - - // TODO quantized_batch_norm - // TODO quantized_max_pool1d - // TODO quantized_max_pool2d - - testUnaryOp( - op = rad2Deg, - opName = "rad2Deg", - inputTensor = Tensor(Seq(3.142, -3.142, 6.283, -6.283, 1.570, -1.570)), - expectedTensor = Tensor(Seq(180.0233, -180.0233, 359.9894, -359.9894, 89.9544, -89.9544)) - ) - - testUnaryOp( - op = real, - opName = "real", - inputTensor = Tensor( - Seq( - Complex(0.31, 0.3553), - Complex(-0.5445, -0.7896), - Complex(-1.6492, -0.0633), - Complex(-0.0638, -0.8119) - ) - ), - expectedTensor = Tensor(Seq(0.3100, -0.5445, -1.6492, -0.0638)) - ) - - testUnaryOp( - op = reciprocal, - opName = "reciprocal", - inputTensor = Tensor(Seq(-0.4595, -2.1219, -1.4314, 0.7298)), - expectedTensor = Tensor(Seq(-2.1763, -0.4713, -0.6986, 1.3702)) - ) - - // TODO Enable property test once we figure out to consider OnlyOneBool evidence in genDType - // propertyTestBinaryOp(remainder, "remainder") - test("remainder.unit-test") { - val result = remainder(Tensor(Seq(-3.0, -2.0, -1.0, 1.0, 2.0, 3.0)), 2) - val expected = Tensor(Seq(1.0, 0.0, 1.0, 1.0, 0.0, 1.0)) - assert(allclose(result, expected)) - - val result2 = remainder(-1.5, Tensor(Seq(1, 2, 3, 4, 5))).to(dtype = float64) - val expected2 = Tensor(Seq(0.5, 0.5, 1.5, 2.5, 3.5)) - assert(allclose(result2, expected2)) - - val result3 = remainder(Tensor(Seq(1, 2, 3, 4, 5)), Tensor(Seq(1, 2, 3, 4, 5))) - val expected3 = Tensor(Seq(0, 0, 0, 0, 0)) - println(expected3) - assert(allclose(result3, expected3)) - } - - testUnaryOp( - op = round(_, decimals = 0), - opName = "round", - inputTensor = Tensor(Seq(4.7, -2.3, 9.1, -7.7)), - expectedTensor = Tensor(Seq(5.0, -2.0, 9.0, -8.0)) - ) - test("round.unit-test.decimals") { - val input = Tensor(Seq(0.1234567)) - val result = round(input, decimals = 3) - assert(allclose(result, Tensor(Seq(0.123)), atol = 1e-3)) - } - - testUnaryOp( - op = rsqrt, - opName = "rsqrt", - inputTensor = Tensor(Seq(-0.0370, 0.2970, 1.5420, -0.9105)), - expectedTensor = Tensor(Seq(Double.NaN, 1.8351, 0.8053, Double.NaN)), - absolutePrecision = 1e-3 - ) - - testUnaryOp( - op = sigmoid, - opName = "sigmoid", - inputTensor = Tensor(Seq(0.9213, 1.0887, -0.8858, -1.7683)), - expectedTensor = Tensor(Seq(0.7153, 0.7481, 0.2920, 0.1458)) - ) - - testUnaryOp( - op = sign, - opName = "sign", - inputTensor = Tensor(Seq(0.7, -1.2, 0.0, 2.3)), - expectedTensor = Tensor(Seq(1.0, -1.0, 0.0, 1.0)) - ) - - testUnaryOp( - op = sgn, - opName = "sgn", - inputTensor = - Tensor(Seq(Complex(3.0, 4.0), Complex(7.0, -24.0), Complex(0.0, 0.0), Complex(1.0, 2.0))), - expectedTensor = Tensor( - Seq(Complex(0.6, 0.8), Complex(0.28, -0.96), Complex(0.0, 0.0), Complex(0.4472, 0.8944)) - ) - ) - - testUnaryOp( - op = signbit, - opName = "signbit", - inputTensor = Tensor(Seq(0.7, -1.2, 0.0, -0.0, 2.3)), - expectedTensor = Tensor(Seq(false, true, false, true, false)) - ) - - testUnaryOp( - op = sin, - opName = "sin", - inputTensor = Tensor(Seq(-0.5461, 0.1347, -2.7266, -0.2746)), - expectedTensor = Tensor(Seq(-0.5194, 0.1343, -0.4032, -0.2711)) - ) - - testUnaryOp( - op = sinc, - opName = "sinc", - inputTensor = Tensor(Seq(0.2252, -0.2948, 1.0267, -1.1566)), - expectedTensor = Tensor(Seq(0.9186, 0.8631, -0.0259, -0.1300)) - ) - - testUnaryOp( - op = sinh, - opName = "sinh", - inputTensor = Tensor(Seq(0.5380, -0.8632, -0.1265, 0.9399)), - expectedTensor = Tensor(Seq(0.5644, -0.9744, -0.1268, 1.0845)) - ) - - testUnaryOp( - op = sqrt, - opName = "sqrt", - inputTensor = Tensor(Seq(-2.0755, 1.0226, 0.0831, 0.4806)), - expectedTensor = Tensor(Seq(Double.NaN, 1.0112, 0.2883, 0.6933)) - ) - - testUnaryOp( - op = square, - opName = "square", - inputTensor = Tensor(Seq(-2.0755, 1.0226, 0.0831, 0.4806)), - expectedTensor = Tensor(Seq(4.3077, 1.0457, 0.0069, 0.2310)) - ) - - testBinaryOp( - op = sub, - opName = "sub", - inputTensors = ( - Tensor(Seq(1, 2)), - Tensor(Seq(0, 1)) - ), - expectedTensor = Tensor(Seq(1, 1)) - ) - test("sub.unit-test.alpha") { - val a = Tensor(Seq(1, 2)) - val b = Tensor(Seq(0, 1)) - val resAlpha = sub(a, b, alpha = 2) - assertEquals( - resAlpha, - Tensor(Seq(1, 0)) - ) - } - - testUnaryOp( - op = tan, - opName = "tan", - inputTensor = Tensor(Seq(-1.2027, -1.7687, 0.4412, -1.3856)), - expectedTensor = Tensor(Seq(-2.5930, 4.9859, 0.4722, -5.3366)), - absolutePrecision = 1e-2 - ) - - testUnaryOp( - op = tanh, - opName = "tanh", - inputTensor = Tensor(Seq(0.8986, -0.7279, 1.1745, 0.2611)), - expectedTensor = Tensor(Seq(0.7156, -0.6218, 0.8257, 0.2553)) - ) - - testBinaryOp( - op = trueDivide, - opName = "trueDivide", - inputTensors = ( - Tensor(Seq(-0.3711, -1.9353, -0.4605, -0.2917)), - Tensor(Seq(0.8032, 0.2930, -0.8113, -0.2308)) - ), - expectedTensor = Tensor(Seq(-0.4620, -6.6051, 0.5676, 1.2639)) - ) - - testUnaryOp( - op = trunc, - opName = "trunc", - inputTensor = Tensor(Seq(3.4742, 0.5466, -0.8008, -0.9079)), - expectedTensor = Tensor(Seq(3.0, 0.0, -0.0, -0.0)) - ) - - testBinaryOp( - op = xlogy, - opName = "xlogy", - inputTensors = ( - Tensor(Seq(0, 0, 0, 0, 0)), - Tensor(Seq(-1.0, 0.0, 1.0, Double.PositiveInfinity, Double.NaN)) - ), - expectedTensor = Tensor(Seq(0.0, 0.0, 0.0, 0.0, Double.NaN)) - ) test("Tensor creation properly handling buffers") { val value = 100L val data = Seq.fill(10000)(value) diff --git a/core/src/test/scala/torch/nn/functional/SparseSuite.scala b/core/src/test/scala/torch/nn/functional/SparseSuite.scala new file mode 100644 index 00000000..d9e7bab3 --- /dev/null +++ b/core/src/test/scala/torch/nn/functional/SparseSuite.scala @@ -0,0 +1,31 @@ +/* + * Copyright 2022 storch.dev + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package torch +package nn +package functional + +class SparseSuite extends TensorCheckSuite { + + // TODO Test multi-dimensional tensors + testUnaryOp( + op = nn.functional.oneHot(_, numClasses = 6), + opName = "nn.functional.oneHot", + inputTensor = Tensor(3L), + expectedTensor = Tensor(Seq(0L, 0L, 0L, 1L, 0L, 0L)) + ) + +} diff --git a/core/src/test/scala/torch/ops/CreationOpsSuite.scala b/core/src/test/scala/torch/ops/CreationOpsSuite.scala new file mode 100644 index 00000000..310a446e --- /dev/null +++ b/core/src/test/scala/torch/ops/CreationOpsSuite.scala @@ -0,0 +1,48 @@ +/* + * Copyright 2022 storch.dev + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package torch + +import org.scalacheck.Prop.* +import Generators.* + +class CreationOpsSuite extends TensorCheckSuite { + + test("arange.unit-test") { + val t0 = arange(0, 10) + assertEquals(t0.toSeq, Seq.range(0, 10)) + val t1 = arange(0, 10, 2) + assertEquals(t1.toSeq, Seq.range(0, 10, 2)) + } + + property("ones.property-test") { + forAll(genTensorSize, genDType) { (size, dtype) => + val t = ones(size, dtype) + assertEquals(t.dtype, dtype) + assertEquals(t.size, size) + assertEquals(t.numel, size.product.toLong) + assertEquals(t.toSeq.length, size.product.toInt) + } + } + + test("ones.unit-test") { + val t = ones[Float32](Seq(2, 3)) + assertEquals(t.size, Seq(2, 3)) + assertEquals(t.numel, 2L * 3) + assertEquals(t.toSeq, Seq.fill[Float](2 * 3)(1f)) + } + +} diff --git a/core/src/test/scala/torch/ops/PointwiseOpsSuite.scala b/core/src/test/scala/torch/ops/PointwiseOpsSuite.scala new file mode 100644 index 00000000..3c24815a --- /dev/null +++ b/core/src/test/scala/torch/ops/PointwiseOpsSuite.scala @@ -0,0 +1,826 @@ +/* + * Copyright 2022 storch.dev + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package torch + +import spire.math.Complex + +class PointwiseOpsSuite extends TensorCheckSuite { + + testUnaryOp( + op = abs, + opName = "abs", + inputTensor = Tensor(Seq(-1, -2, 3)), + expectedTensor = Tensor(Seq(1, 2, 3)) + ) + + testUnaryOp( + op = acos, + opName = "acos", + inputTensor = Tensor(Seq(0.3348, -0.5889, 0.2005, -0.1584)), + expectedTensor = Tensor(Seq(1.2294, 2.2004, 1.3690, 1.7298)) + ) + + testUnaryOp( + op = acosh, + opName = "acosh", + inputTensor = Tensor(Seq(1.3192, 1.9915, 1.9674, 1.7151)), + expectedTensor = Tensor(Seq(0.7791, 1.3120, 1.2979, 1.1341)) + ) + + testUnaryOp( + op = acosh, + opName = "acosh", + inputTensor = Tensor(Seq(1.3192, 1.9915, 1.9674, 1.7151)), + expectedTensor = Tensor(Seq(0.7791, 1.3120, 1.2979, 1.1341)) + ) + + testUnaryOp( + op = add(_, other = 20), + opName = "add", + inputTensor = Tensor(Seq(0.0202, 1.0985, 1.3506, -0.6056)), + expectedTensor = Tensor(Seq(20.0202, 21.0985, 21.3506, 19.3944)) + ) + + // TODO addcdiv + // TODO addcmul + + testUnaryOp( + op = angle, + opName = "angle", + inputTensor = Tensor(Seq(Complex(-1.0, 1.0), Complex(-2.0, 2.0), Complex(3.0, -3.0))), + expectedTensor = Tensor(Seq(2.3562, 2.3562, -0.7854)) + ) + + testUnaryOp( + op = asin, + opName = "asin", + inputTensor = Tensor(Seq(-0.5962, 1.4985, -0.4396, 1.4525)), + expectedTensor = Tensor(Seq(-0.6387, Double.NaN, -0.4552, Double.NaN)) + ) + testUnaryOp( + op = asinh, + opName = "asinh", + inputTensor = Tensor(Seq(0.1606, -1.4267, -1.0899, -1.0250)), + expectedTensor = Tensor(Seq(0.1599, -1.1534, -0.9435, -0.8990)) + ) + testUnaryOp( + op = atan, + opName = "atan", + inputTensor = Tensor(Seq(0.2341, 0.2539, -0.6256, -0.6448)), + expectedTensor = Tensor(Seq(0.2299, 0.2487, -0.5591, -0.5727)) + ) + testUnaryOp( + op = atanh, + opName = "atanh", + inputTensor = Tensor(Seq(-0.9385, 0.2968, -0.8591, -0.1871)), + expectedTensor = Tensor(Seq(-1.7253, 0.3060, -1.2899, -0.1893)) + ) + + testBinaryOp( + op = atan2, + opName = "atan2", + inputTensors = ( + Tensor(Seq(0.9041, 0.0196, -0.3108, -2.4423)), + Tensor(Seq(1.3104, -1.5804, 0.6674, 0.7710)) + ), + expectedTensor = Tensor(Seq(0.6039, 3.1292, -0.4358, -1.2650)) + ) + + // TODO Test boolean cases for bitwise operations + + testUnaryOp( + op = bitwiseNot, + opName = "bitwiseNot", + inputTensor = Tensor(Seq(-1, -2, 3)), + expectedTensor = Tensor(Seq(0, 1, -4)) + ) + + testBinaryOp( + op = bitwiseAnd, + opName = "bitwiseAnd", + inputTensors = ( + Tensor(Seq(-1, -2, 3)), + Tensor(Seq(1, 0, 3)) + ), + expectedTensor = Tensor(Seq(1, 0, 3)) + ) + + testBinaryOp( + op = bitwiseOr, + opName = "bitwiseOr", + inputTensors = ( + Tensor(Seq(-1, -2, 3)), + Tensor(Seq(1, 0, 3)) + ), + expectedTensor = Tensor(Seq(-1, -2, 3)) + ) + + testBinaryOp( + op = bitwiseXor, + opName = "bitwiseXor", + inputTensors = ( + Tensor(Seq(-1, -2, 3)), + Tensor(Seq(1, 0, 3)) + ), + expectedTensor = Tensor(Seq(-2, -2, 0)) + ) + + // TODO Enable property test once we figure out to consider OnlyOneBool evidence in genDType + unitTestBinaryOp( + op = bitwiseLeftShift, + opName = "bitwiseLeftShift", + inputTensors = ( + Tensor(Seq(-1, -2, 3)), + Tensor(Seq(1, 0, 3)) + ), + expectedTensor = Tensor(Seq(-2, -2, 24)) + ) + + // TODO Enable property test once we figure out to consider OnlyOneBool evidence in genDType + unitTestBinaryOp( + op = bitwiseRightShift, + opName = "bitwiseRightShift", + inputTensors = ( + Tensor(Seq(-2, -7, 31)), + Tensor(Seq(1, 0, 3)) + ), + expectedTensor = Tensor(Seq(-1, -7, 3)) + ) + + testUnaryOp( + op = ceil, + opName = "ceil", + inputTensor = Tensor(Seq(-0.6341, -1.4208, -1.0900, 0.5826)), + expectedTensor = Tensor(Seq(-0.0, -1.0, -1.0, 1.0)) + ) + + // TODO test min max inputs + testUnaryOp( + op = clamp(_, min = Some(-0.5), max = Some(0.5)), + opName = "clamp", + inputTensor = Tensor(Seq(-1.7120, 0.1734, -0.0478, -0.0922)), + expectedTensor = Tensor(Seq(-0.5, 0.1734, -0.0478, -0.0922)) + ) + + testUnaryOp( + op = conjPhysical, + opName = "conjPhysical", + inputTensor = Tensor(Seq(Complex(-1.0, 1.0), Complex(-2.0, 2.0), Complex(3.0, -3.0))), + expectedTensor = Tensor(Seq(Complex(-1.0, -1.0), Complex(-2.0, -2.0), Complex(3.0, 3.0))) + ) + + testBinaryOp( + op = copysign, + opName = "copysign", + inputTensors = ( + Tensor(Seq(0.7079, 0.2778, -1.0249, 0.5719)), + Tensor(Seq(0.2373, 0.3120, 0.3190, -1.1128)) + ), + expectedTensor = Tensor(Seq(0.7079, 0.2778, 1.0249, -0.5719)) + ) + + testUnaryOp( + op = cos, + opName = "cos", + inputTensor = Tensor(Seq(1.4309, 1.2706, -0.8562, 0.9796)), + expectedTensor = Tensor(Seq(0.1395, 0.2957, 0.6553, 0.5574)) + ) + + testUnaryOp( + op = cosh, + opName = "cosh", + inputTensor = Tensor(Seq(0.1632, 1.1835, -0.6979, -0.7325)), + expectedTensor = Tensor(Seq(1.0133, 1.7860, 1.2536, 1.2805)) + ) + + testUnaryOp( + op = deg2rad, + opName = "deg2rad", + inputTensor = Tensor(Seq(180.0, -180.0, 360.0, -360.0, 90.0, -90.0)), + expectedTensor = Tensor(Seq(3.1416, -3.1416, 6.2832, -6.2832, 1.5708, -1.5708)) + ) + + testBinaryOp( + op = div, + opName = "div", + inputTensors = ( + Tensor(Seq(-0.3711, -1.9353, -0.4605, -0.2917)), + Tensor(Seq(0.8032, 0.2930, -0.8113, -0.2308)) + ), + expectedTensor = Tensor(Seq(-0.4620, -6.6051, 0.5676, 1.2639)) + ) + + testUnaryOp( + op = digamma, + opName = "digamma", + inputTensor = Tensor(Seq(1, 0.5)), + expectedTensor = Tensor(Seq(-0.5772, -1.9635)) + ) + + testUnaryOp( + op = erf, + opName = "erf", + inputTensor = Tensor(Seq(0, -1.0, 10.0)), + expectedTensor = Tensor(Seq(0.0, -0.8427, 1.0)) + ) + + testUnaryOp( + op = erfc, + opName = "erfc", + inputTensor = Tensor(Seq(0, -1.0, 10.0)), + expectedTensor = Tensor(Seq(1.0, 1.8427, 0.0)) + ) + + testUnaryOp( + op = erfinv, + opName = "erfinv", + inputTensor = Tensor(Seq(0.0, 0.5, -1.0)), + expectedTensor = Tensor(Seq(0.0, 0.4769, Double.NegativeInfinity)) + ) + + testUnaryOp( + op = exp, + opName = "exp", + inputTensor = Tensor(Seq(0, 0.6931)), + expectedTensor = Tensor(Seq(1.0, 2.0)) + ) + + testUnaryOp( + op = exp2, + opName = "exp2", + inputTensor = Tensor(Seq(0.0, 1.0, 3.0, 4.0)), + expectedTensor = Tensor(Seq(1.0, 2.0, 8.0, 16.0)) + ) + + testUnaryOp( + op = expm1, + opName = "expm1", + inputTensor = Tensor(Seq(0, 0.6931)), + expectedTensor = Tensor(Seq(0.0, 1.0)) + ) + + // TODO fakeQuantizePerChannelAffine + // TODO fakeQuantizePerTensorAffine + + testUnaryOp( + op = fix, + opName = "fix", + inputTensor = Tensor(Seq(3.4742, 0.5466, -0.8008, -0.9079)), + expectedTensor = Tensor(Seq(3.0, 0.0, -0.0, -0.0)) + ) + + testBinaryOp( + op = floatPower, + opName = "floatPower", + inputTensors = ( + Tensor(Seq(1, 2, 3, 4)), + Tensor(Seq(2, -3, 4, -5)) + ), + expectedTensor = Tensor(Seq(1.0, 0.125, 81.0, 9.7656e-4)) + ) + + testUnaryOp( + op = floor, + opName = "floor", + inputTensor = Tensor(Seq(-0.8166, 1.5308, -0.2530, -0.2091)), + expectedTensor = Tensor(Seq(-1.0, 1.0, -1.0, -1.0)) + ) + + // TODO Enable property test once we figure out to consider OnlyOneBool evidence in genDType + unitTestBinaryOp( + op = floorDivide, + opName = "floorDivide", + inputTensors = ( + Tensor(Seq(4.0, 3.0)), + Tensor(Seq(2.0, 2.0)) + ), + expectedTensor = Tensor(Seq(2.0, 1.0)) + ) + + // TODO Enable property test once we figure out to consider OnlyOneBool evidence in genDType + unitTestBinaryOp( + op = fmod, + opName = "fmod", + inputTensors = ( + Tensor(Seq(-3.0, -2.0, -1.0, 1.0, 2.0, 3.0)), + Tensor(Seq(2.0, 2.0, 2.0, 2.0, 2.0, 2.0)) + ), + expectedTensor = Tensor(Seq(-1.0, -0.0, -1.0, 1.0, 0.0, 1.0)) + ) + + testUnaryOp( + op = frac, + opName = "frac", + inputTensor = Tensor(Seq(1, 2.5, -3.2)), + expectedTensor = Tensor(Seq(0.0, 0.5, -0.2)) + ) + + propertyTestUnaryOp( + op = frexp, + opName = "frexp" + ) + test("frexp.unit-test") { + val input = arange(0.0, 9.0) + val expectedMantissa = + Tensor(Seq(0.0, 0.5, 0.5, 0.75, 0.5, 0.6250, 0.75, 0.8750, 0.5)).to(dtype = float32) + val expectedExponent = Tensor(Seq(0, 1, 2, 2, 3, 3, 3, 3, 4)) + val (mantissa, exponent) = frexp(input) + assert( + allclose(mantissa, expectedMantissa) && + allclose(exponent, expectedExponent) + ) + } + + propertyTestUnaryOp( + op = gradient(_, 1.0, Seq(0), 1), + opName = "gradient" + ) + test("gradient.unit-test") { + val input = Tensor(Seq(1, 2, 4, 8, 10, 20, 40, 80)).view(-1, 4) + val results = gradient(input, spacing = 1, dim = Seq(0)) + val expectedTensors = Seq( + Tensor(Seq(9.0, 18.0, 36.0, 72.0, 9.0, 18.0, 36.0, 72.0)).view(-1, 4).to(dtype = float32), + Tensor(Seq(1.0, 1.5, 3.0, 4.0, 10.0, 15.0, 30.0, 40.0)).view(-1, 4).to(dtype = float32) + ) + + assert( + results.zip(expectedTensors).forall { (result, expectedTensor) => + allclose(result, expectedTensor) + } + ) + } + + testUnaryOp( + op = imag, + opName = "imag", + inputTensor = Tensor( + Seq( + Complex(0.31, 0.3553), + Complex(-0.5445, -0.7896), + Complex(-1.6492, -0.0633), + Complex(-0.0638, -0.8119) + ) + ), + expectedTensor = Tensor(Seq(0.3553, -0.7896, -0.0633, -0.8119)) + ) + + testBinaryOp( + op = ldexp, + opName = "ldexp", + inputTensors = ( + Tensor(Seq(1.0)), + Tensor(Seq(1, 2, 3, 4)) + ), + expectedTensor = Tensor(Seq(2.0, 4.0, 8.0, 16.0)) + ) + + // TODO Test weight as tensor + // TODO Lerp must accept the same type so we need to fix generators to work properly + // testBinaryOp( + // op = lerp(_, _, weight = 0.5), + // opName = "lerp", + // inputTensors = ( + // Tensor(Seq(1.0, 2.0, 3.0, 4.0)), + // Tensor(Seq(10.0, 10.0, 10.0, 10.0)) + // ), + // expectedTensor = Tensor(Seq(5.5, 6.0, 6.5, 7.0)) + // ) + unitTestBinaryOp( + op = lerp(_, _, weight = 0.5), + opName = "lerp", + inputTensors = ( + Tensor(Seq(1.0, 2.0, 3.0, 4.0)), + Tensor(Seq(10.0, 10.0, 10.0, 10.0)) + ), + expectedTensor = Tensor(Seq(5.5, 6.0, 6.5, 7.0)) + ) + + testUnaryOp( + op = lgamma, + opName = "lgamma", + inputTensor = Tensor(Seq(0.5, 1.0, 1.5)), + expectedTensor = Tensor(Seq(0.5724, 0.0, -0.1208)) + ) + + testUnaryOp( + op = log, + opName = "log", + inputTensor = Tensor(Seq(4.7767, 4.3234, 1.2156, 0.2411, 4.5739)), + expectedTensor = Tensor(Seq(1.5637, 1.4640, 0.1952, -1.4226, 1.5204)) + ) + + testUnaryOp( + op = log10, + opName = "log10", + inputTensor = Tensor(Seq(0.5224, 0.9354, 0.7257, 0.1301, 0.2251)), + expectedTensor = Tensor(Seq(-0.2820, -0.0290, -0.1392, -0.8857, -0.6476)) + ) + + testUnaryOp( + op = log1p, + opName = "log1p", + inputTensor = Tensor(Seq(-1.0090, -0.9923, 1.0249, -0.5372, 0.2492)), + expectedTensor = Tensor(Seq(Double.NaN, -4.8653, 0.7055, -0.7705, 0.2225)), + absolutePrecision = 1e-2 + ) + + testUnaryOp( + op = log2, + opName = "log2", + inputTensor = Tensor(Seq(0.8419, 0.8003, 0.9971, 0.5287, 0.0490)), + expectedTensor = Tensor(Seq(-0.2483, -0.3213, -0.0042, -0.9196, -4.3504)), + absolutePrecision = 1e-2 + ) + + // TODO Enable property test once we figure out to consider OnlyOneBool evidence in genDType + unitTestBinaryOp( + op = logaddexp, + opName = "logaddexp", + inputTensors = ( + Tensor(Seq(-100.0, -200.0, -300.0)), + Tensor(Seq(-1.0, -2.0, -3.0)) + ), + expectedTensor = Tensor(Seq(-1.0, -2.0, -3.0)) + ) + + // TODO Enable property test once we figure out to consider OnlyOneBool evidence in genDType + unitTestBinaryOp( + op = logaddexp2, + opName = "logaddexp2", + inputTensors = ( + Tensor(Seq(-100.0, -200.0, -300.0)), + Tensor(Seq(-1.0, -2.0, -3.0)) + ), + expectedTensor = Tensor(Seq(-1.0, -2.0, -3.0)) + ) + + // TODO Test int32 tensors + testBinaryOp( + op = logicalAnd, + opName = "logicalAnd", + inputTensors = ( + Tensor(Seq(true, false, true)), + Tensor(Seq(true, false, false)) + ), + expectedTensor = Tensor(Seq(true, false, false)) + ) + + // TODO Test int32 tensors + testUnaryOp( + op = logicalNot, + opName = "logicalNot", + inputTensor = Tensor(Seq(true, false)), + expectedTensor = Tensor(Seq(false, true)) + ) + + // TODO Test int32 tensors + testBinaryOp( + op = logicalOr, + opName = "logicalOr", + inputTensors = ( + Tensor(Seq(true, false, true)), + Tensor(Seq(true, false, false)) + ), + expectedTensor = Tensor(Seq(true, false, true)) + ) + + // TODO Test int32 tensors + testBinaryOp( + op = logicalXor, + opName = "logicalXor", + inputTensors = ( + Tensor(Seq(true, false, true)), + Tensor(Seq(true, false, false)) + ), + expectedTensor = Tensor(Seq(false, false, true)) + ) + + testUnaryOp( + op = logit(_, Some(1e-6)), + opName = "logit", + inputTensor = Tensor(Seq(0.2796, 0.9331, 0.6486, 0.1523, 0.6516)), + expectedTensor = Tensor(Seq(-0.9466, 2.6352, 0.6131, -1.7169, 0.6261)), + absolutePrecision = 1e-3 + ) + + // TODO Enable property test once we figure out to compile properly with AtLeastOneFloat + unitTestBinaryOp( + op = hypot, + opName = "hypot", + inputTensors = (Tensor(Seq(4.0)), Tensor(Seq(3.0, 4.0, 5.0))), + expectedTensor = Tensor(Seq(5.0, 5.6569, 6.4031)) + ) + + testUnaryOp( + op = i0, + opName = "i0", + inputTensor = Tensor(Seq(0.0, 1.0, 2.0, 3.0, 4.0)), + expectedTensor = Tensor(Seq(1.0, 1.2661, 2.2796, 4.8808, 11.3019)) + ) + + // TODO Enable property test once we figure out to compile properly with AtLeastOneFloat + unitTestBinaryOp( + op = igamma, + opName = "igamma", + inputTensors = ( + Tensor(Seq(4.0)), + Tensor(Seq(3.0, 4.0, 5.0)) + ), + expectedTensor = Tensor(Seq(0.3528, 0.5665, 0.7350)) + ) + + // TODO Enable property test once we figure out to compile properly with AtLeastOneFloat + unitTestBinaryOp( + op = igammac, + opName = "igammac", + inputTensors = ( + Tensor(Seq(4.0)), + Tensor(Seq(3.0, 4.0, 5.0)) + ), + expectedTensor = Tensor(Seq(0.6472, 0.4335, 0.2650)) + ) + + testBinaryOp( + op = mul, + opName = "mul", + inputTensors = ( + Tensor(Seq(1.1207)), + Tensor(Seq(0.5146, 0.1216, -0.5244, 2.2382)) + ), + expectedTensor = Tensor(Seq(0.5767, 0.1363, -0.5877, 2.5083)) + ) + + testUnaryOp( + op = mvlgamma(_, p = 2), + opName = "mvlgamma", + inputTensor = Tensor(Seq(1.6835, 1.8474, 1.1929)), + expectedTensor = Tensor(Seq(0.3928, 0.4007, 0.7586)) + ) + + // TODO Test nan, posinf, neginf arguments + // TODO Test float32 + testUnaryOp( + op = nanToNum(_, nan = None, posinf = None, neginf = None), + opName = "nanToNum", + inputTensor = Tensor(Seq(Double.NaN, Double.PositiveInfinity, Double.NegativeInfinity, 3.14)), + expectedTensor = Tensor(Seq(0.0, 1.7976931348623157e308, -1.7976931348623157e308, 3.14)) + ) + + testUnaryOp( + op = neg, + opName = "neg", + inputTensor = Tensor(Seq(0.0090, -0.2262, -0.0682, -0.2866, 0.3940)), + expectedTensor = Tensor(Seq(-0.0090, 0.2262, 0.0682, 0.2866, -0.3940)) + ) + + // TODO Enable property test once we figure out to compile properly with AtLeastOneFloat + // TODO Fix this unit test, as is not really significant due to fp precision + unitTestBinaryOp( + op = nextafter, + opName = "nextafter", + inputTensors = ( + Tensor(Seq(1.0, 2.0)), + Tensor(Seq(2.0, 1.0)) + ), + expectedTensor = Tensor(Seq(1.0, 2.0)), + absolutePrecision = 1e-8 + ) + + // TODO Test multiple values of `n` + testUnaryOp( + op = polygamma(1, _), + opName = "polygamma", + inputTensor = Tensor(Seq(1.0, 0.5)), + expectedTensor = Tensor(Seq(1.64493, 4.9348)) + ) + + testUnaryOp( + op = positive, + opName = "positive", + inputTensor = Tensor(Seq(0.0090, -0.2262, -0.0682, -0.2866, 0.3940)), + expectedTensor = Tensor(Seq(0.0090, -0.2262, -0.0682, -0.2866, 0.3940)) + ) + + // TODO Test scalar exponent + // TODO Enable property test once we figure out to consider OnlyOneBool evidence in genDType + unitTestBinaryOp( + op = pow, + opName = "pow", + inputTensors = ( + Tensor(Seq(1.0, 2.0, 3.0, 4.0)), + Tensor(Seq(1.0, 2.0, 3.0, 4.0)) + ), + expectedTensor = Tensor(Seq(1.0, 4.0, 27.0, 256.0)) + ) + + // TODO quantized_batch_norm + // TODO quantized_max_pool1d + // TODO quantized_max_pool2d + + testUnaryOp( + op = rad2Deg, + opName = "rad2Deg", + inputTensor = Tensor(Seq(3.142, -3.142, 6.283, -6.283, 1.570, -1.570)), + expectedTensor = Tensor(Seq(180.0233, -180.0233, 359.9894, -359.9894, 89.9544, -89.9544)) + ) + + testUnaryOp( + op = real, + opName = "real", + inputTensor = Tensor( + Seq( + Complex(0.31, 0.3553), + Complex(-0.5445, -0.7896), + Complex(-1.6492, -0.0633), + Complex(-0.0638, -0.8119) + ) + ), + expectedTensor = Tensor(Seq(0.3100, -0.5445, -1.6492, -0.0638)) + ) + + testUnaryOp( + op = reciprocal, + opName = "reciprocal", + inputTensor = Tensor(Seq(-0.4595, -2.1219, -1.4314, 0.7298)), + expectedTensor = Tensor(Seq(-2.1763, -0.4713, -0.6986, 1.3702)) + ) + + // TODO Enable property test once we figure out to consider OnlyOneBool evidence in genDType + // propertyTestBinaryOp(remainder, "remainder") + test("remainder.unit-test") { + val result = remainder(Tensor(Seq(-3.0, -2.0, -1.0, 1.0, 2.0, 3.0)), 2) + val expected = Tensor(Seq(1.0, 0.0, 1.0, 1.0, 0.0, 1.0)) + assert(allclose(result, expected)) + + val result2 = remainder(-1.5, Tensor(Seq(1, 2, 3, 4, 5))).to(dtype = float64) + val expected2 = Tensor(Seq(0.5, 0.5, 1.5, 2.5, 3.5)) + assert(allclose(result2, expected2)) + + val result3 = remainder(Tensor(Seq(1, 2, 3, 4, 5)), Tensor(Seq(1, 2, 3, 4, 5))) + val expected3 = Tensor(Seq(0, 0, 0, 0, 0)) + + assert(allclose(result3, expected3)) + } + + testUnaryOp( + op = round(_, decimals = 0), + opName = "round", + inputTensor = Tensor(Seq(4.7, -2.3, 9.1, -7.7)), + expectedTensor = Tensor(Seq(5.0, -2.0, 9.0, -8.0)) + ) + test("round.unit-test.decimals") { + val input = Tensor(Seq(0.1234567)) + val result = round(input, decimals = 3) + assert(allclose(result, Tensor(Seq(0.123)), atol = 1e-3)) + } + + testUnaryOp( + op = rsqrt, + opName = "rsqrt", + inputTensor = Tensor(Seq(-0.0370, 0.2970, 1.5420, -0.9105)), + expectedTensor = Tensor(Seq(Double.NaN, 1.8351, 0.8053, Double.NaN)), + absolutePrecision = 1e-3 + ) + + testUnaryOp( + op = sigmoid, + opName = "sigmoid", + inputTensor = Tensor(Seq(0.9213, 1.0887, -0.8858, -1.7683)), + expectedTensor = Tensor(Seq(0.7153, 0.7481, 0.2920, 0.1458)) + ) + + testUnaryOp( + op = sign, + opName = "sign", + inputTensor = Tensor(Seq(0.7, -1.2, 0.0, 2.3)), + expectedTensor = Tensor(Seq(1.0, -1.0, 0.0, 1.0)) + ) + + testUnaryOp( + op = sgn, + opName = "sgn", + inputTensor = + Tensor(Seq(Complex(3.0, 4.0), Complex(7.0, -24.0), Complex(0.0, 0.0), Complex(1.0, 2.0))), + expectedTensor = Tensor( + Seq(Complex(0.6, 0.8), Complex(0.28, -0.96), Complex(0.0, 0.0), Complex(0.4472, 0.8944)) + ) + ) + + testUnaryOp( + op = signbit, + opName = "signbit", + inputTensor = Tensor(Seq(0.7, -1.2, 0.0, -0.0, 2.3)), + expectedTensor = Tensor(Seq(false, true, false, true, false)) + ) + + testUnaryOp( + op = sin, + opName = "sin", + inputTensor = Tensor(Seq(-0.5461, 0.1347, -2.7266, -0.2746)), + expectedTensor = Tensor(Seq(-0.5194, 0.1343, -0.4032, -0.2711)) + ) + + testUnaryOp( + op = sinc, + opName = "sinc", + inputTensor = Tensor(Seq(0.2252, -0.2948, 1.0267, -1.1566)), + expectedTensor = Tensor(Seq(0.9186, 0.8631, -0.0259, -0.1300)) + ) + + testUnaryOp( + op = sinh, + opName = "sinh", + inputTensor = Tensor(Seq(0.5380, -0.8632, -0.1265, 0.9399)), + expectedTensor = Tensor(Seq(0.5644, -0.9744, -0.1268, 1.0845)) + ) + + testUnaryOp( + op = sqrt, + opName = "sqrt", + inputTensor = Tensor(Seq(-2.0755, 1.0226, 0.0831, 0.4806)), + expectedTensor = Tensor(Seq(Double.NaN, 1.0112, 0.2883, 0.6933)) + ) + + testUnaryOp( + op = square, + opName = "square", + inputTensor = Tensor(Seq(-2.0755, 1.0226, 0.0831, 0.4806)), + expectedTensor = Tensor(Seq(4.3077, 1.0457, 0.0069, 0.2310)) + ) + + testBinaryOp( + op = sub, + opName = "sub", + inputTensors = ( + Tensor(Seq(1, 2)), + Tensor(Seq(0, 1)) + ), + expectedTensor = Tensor(Seq(1, 1)) + ) + test("sub.unit-test.alpha") { + val a = Tensor(Seq(1, 2)) + val b = Tensor(Seq(0, 1)) + val resAlpha = sub(a, b, alpha = 2) + assertEquals( + resAlpha, + Tensor(Seq(1, 0)) + ) + } + + testUnaryOp( + op = tan, + opName = "tan", + inputTensor = Tensor(Seq(-1.2027, -1.7687, 0.4412, -1.3856)), + expectedTensor = Tensor(Seq(-2.5930, 4.9859, 0.4722, -5.3366)), + absolutePrecision = 1e-2 + ) + + testUnaryOp( + op = tanh, + opName = "tanh", + inputTensor = Tensor(Seq(0.8986, -0.7279, 1.1745, 0.2611)), + expectedTensor = Tensor(Seq(0.7156, -0.6218, 0.8257, 0.2553)) + ) + + testBinaryOp( + op = trueDivide, + opName = "trueDivide", + inputTensors = ( + Tensor(Seq(-0.3711, -1.9353, -0.4605, -0.2917)), + Tensor(Seq(0.8032, 0.2930, -0.8113, -0.2308)) + ), + expectedTensor = Tensor(Seq(-0.4620, -6.6051, 0.5676, 1.2639)) + ) + + testUnaryOp( + op = trunc, + opName = "trunc", + inputTensor = Tensor(Seq(3.4742, 0.5466, -0.8008, -0.9079)), + expectedTensor = Tensor(Seq(3.0, 0.0, -0.0, -0.0)) + ) + + testBinaryOp( + op = xlogy, + opName = "xlogy", + inputTensors = ( + Tensor(Seq(0, 0, 0, 0, 0)), + Tensor(Seq(-1.0, 0.0, 1.0, Double.PositiveInfinity, Double.NaN)) + ), + expectedTensor = Tensor(Seq(0.0, 0.0, 0.0, 0.0, Double.NaN)) + ) + +} diff --git a/core/src/test/scala/torch/ops/RandomSamplingOpsSuite.scala b/core/src/test/scala/torch/ops/RandomSamplingOpsSuite.scala new file mode 100644 index 00000000..b5d28954 --- /dev/null +++ b/core/src/test/scala/torch/ops/RandomSamplingOpsSuite.scala @@ -0,0 +1,41 @@ +/* + * Copyright 2022 storch.dev + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package torch + +class RandomSamplingOpsSuite extends TensorCheckSuite { + + testUnaryOp( + op = multinomial(_, 2, true), + opName = "multinomial", + inputTensor = Tensor(Seq(0.0, 0.0, 0.0, 1.0)), + expectedTensor = Tensor(Seq(3L, 3L)) + ) + + test("randn.unit-test") { + val randnTensor = randn(Seq(100000)) + val randnMean = randnTensor.mean + val expectedMean = Tensor(0.0).to(dtype = float32) + val randnVariance = randnTensor.variance + val expectedVariance = Tensor(1.0).to(dtype = float32) + + assert( + allclose(randnMean, expectedMean, atol = 1e-2) && + allclose(randnVariance, expectedVariance, atol = 1e-2) + ) + } + +} diff --git a/core/src/test/scala/torch/ops/ReductionOpsSuite.scala b/core/src/test/scala/torch/ops/ReductionOpsSuite.scala new file mode 100644 index 00000000..50bea4c1 --- /dev/null +++ b/core/src/test/scala/torch/ops/ReductionOpsSuite.scala @@ -0,0 +1,28 @@ +/* + * Copyright 2022 storch.dev + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package torch + +class ReductionOpsSuite extends TensorCheckSuite { + + testUnaryOp( + op = sum(_, Array(), false, None), + opName = "sum", + inputTensor = Tensor(Seq(5.0, 5.0)), + expectedTensor = Tensor(10.0) + ) + +}