@frozen
public struct Tensor<Scalar> where Scalar : TensorFlowScalar
extension Tensor: Collatable
extension Tensor: CopyableToDevice
extension Tensor: AnyTensor
extension Tensor: ExpressibleByArrayLiteral
extension Tensor: CustomStringConvertible
extension Tensor: CustomPlaygroundDisplayConvertible
extension Tensor: CustomReflectable
extension Tensor: TensorProtocol
extension Tensor: TensorGroup
extension Tensor: ElementaryFunctions where Scalar: TensorFlowFloatingPoint
extension Tensor: VectorProtocol where Scalar: TensorFlowFloatingPoint
extension Tensor: Mergeable where Scalar: TensorFlowFloatingPoint
extension Tensor: Equatable where Scalar: Equatable
extension Tensor: Codable where Scalar: Codable
extension Tensor: AdditiveArithmetic where Scalar: Numeric
extension Tensor: PointwiseMultiplicative where Scalar: Numeric
extension Tensor: Differentiable & EuclideanDifferentiable where Scalar: TensorFlowFloatingPoint
extension Tensor: DifferentiableTensorProtocol
where Scalar: TensorFlowFloatingPoint
A multidimensional array of elements that is a generalization of vectors and matrices to potentially higher dimensions.
The generic parameter Scalar
describes the type of scalars in the tensor (such as Int32
,
Float
, etc).
-
The underlying
TensorHandle
.Note
handle
is public to allow user defined ops, but should not normally be used.Declaration
public let handle: TensorHandle<Scalar>
-
Declaration
public init(handle: TensorHandle<Scalar>)
-
Unpacks the given dimension of a rank-
R
tensor into multiple rank-(R-1)
tensors. UnpacksN
tensors from this tensor by chipping it along theaxis
dimension, whereN
is inferred from this tensor’s shape. For example, given a tensor with shape[A, B, C, D]
:- If
axis == 0
then thei
-th tensor in the returned array is the sliceself[i, :, :, :]
and each tensor in that array will have shape[B, C, D]
. (Note that the dimension unpacked along is gone, unlikeTensor.split(numSplits:alongAxis)
, orTensor.split(sizes:alongAxis)
). - If
axis == 1
then thei
-th tensor in the returned array is the slicevalue[:, i, :, :]
and each tensor in that array will have shape[A, C, D]
. - Etc.
This is the opposite of
Tensor.init(stacking:alongAxis:)
.Precondition
axis
must be in the range[-rank, rank)
, whererank
is the rank of the provided tensors.Declaration
@differentiable public func unstacked(alongAxis axis: Int = 0) -> [Tensor]
Parameters
axis
Dimension along which to unstack. Negative values wrap around.
Return Value
Array containing the unstacked tensors.
- If
-
Splits a tensor into multiple tensors. The tensor is split along dimension
axis
intocount
smaller tensors. This requires thatcount
evenly dividesshape[axis]
.For example:
// 'value' is a tensor with shape [5, 30] // Split 'value' into 3 tensors along dimension 1: let parts = value.split(count: 3, alongAxis: 1) parts[0] // has shape [5, 10] parts[1] // has shape [5, 10] parts[2] // has shape [5, 10]
Precondition
count
must divide the size of dimensionaxis
evenly.Precondition
axis
must be in the range[-rank, rank)
, whererank
is the rank of the provided tensors.Declaration
@differentiable public func split(count: Int, alongAxis axis: Int = 0) -> [Tensor]
Parameters
count
Number of splits to create.
axis
The dimension along which to split this tensor. Negative values wrap around.
Return Value
An array containing the tensors part.
-
Splits a tensor into multiple tensors. The tensor is split into
sizes.shape[0]
pieces. The shape of thei
-th piece has the same shape as this tensor except along dimensionaxis
where the size issizes[i]
.For example:
// 'value' is a tensor with shape [5, 30] // Split 'value' into 3 tensors with sizes [4, 15, 11] along dimension 1: let parts = value.split(sizes: Tensor<Int32>([4, 15, 11]), alongAxis: 1) parts[0] // has shape [5, 4] parts[1] // has shape [5, 15] parts[2] // has shape [5, 11]
Precondition
The values in
sizes
must add up to the size of dimensionaxis
.Precondition
axis
must be in the range[-rank, rank)
, whererank
is the rank of the provided tensors.Declaration
@differentiable(wrt: self) public func split(sizes: Tensor<Int32>, alongAxis axis: Int = 0) -> [Tensor]
Parameters
sizes
1-D tensor containing the size of each split.
axis
Dimension along which to split this tensor. Negative values wrap around.
Return Value
Array containing the tensors parts.
-
Declaration
@differentiable(wrt: self) public func split(sizes: [Int], alongAxis axis: Int = 0) -> [Tensor]
-
Returns a tiled tensor, constructed by tiling this tensor.
This constructor creates a new tensor by replicating this tensor
multiples
times. The constructed tensor’si
‘th dimension hasself.shape[i] * multiples[i]
elements, and the values of this tensor are replicatedmultiples[i]
times along thei
'th dimension. For example, tiling[a b c d]
by[2]
produces[a b c d a b c d]
.Precondition
The shape ofmultiples
must be[tensor.rank]
.Precondition
All scalars inmultiples
must be non-negative.Declaration
@differentiable(wrt: self) public func tiled(multiples: [Int]) -> Tensor
-
Returns a tiled tensor, constructed by tiling this tensor.
This constructor creates a new tensor by replicating this tensor
multiples
times. The constructed tensor’si
‘th dimension hasself.shape[i] * multiples[i]
elements, and the values of this tensor are replicatedmultiples[i]
times along thei
'th dimension. For example, tiling[a b c d]
by[2]
produces[a b c d a b c d]
.Precondition
The shape ofmultiples
must be[tensor.rank]
.Declaration
@differentiable(wrt: self) public func tiled(multiples: Tensor<Int32>) -> Tensor
-
Reshape to the shape of the specified
Tensor
.Precondition
The number of scalars matches the new shape.Declaration
@differentiable(wrt: self) public func reshaped<T>(like other: Tensor<T>) -> Tensor where T : TensorFlowScalar
-
Reshape to the specified shape.
Precondition
The number of scalars matches the new shape.Declaration
@differentiable(wrt: self) public func reshaped(to newShape: TensorShape) -> Tensor
-
Reshape to the specified
Tensor
representing a shape.Precondition
The number of scalars matches the new shape.Declaration
@differentiable(wrt: self) public func reshaped(toShape newShape: Tensor<Int32>) -> Tensor
-
Return a copy of the tensor collapsed into a 1-D
Tensor
, in row-major order.Declaration
@differentiable(wrt: self) public func flattened() -> Tensor
-
Returns a shape-expanded
Tensor
, with a dimension of 1 inserted at the specified shape indices.Declaration
@differentiable(wrt: self) public func expandingShape(at axes: Int...) -> Tensor
-
Returns a shape-expanded
Tensor
, with a dimension of 1 inserted at the specified shape indices.Declaration
@differentiable(wrt: self) public func expandingShape(at axes: [Int]) -> Tensor
-
Returns a rank-lifted
Tensor
with a leading dimension of 1.Declaration
@differentiable(wrt: self) public func rankLifted() -> Tensor
-
Removes the specified dimensions of size 1 from the shape of a tensor. If no dimensions are specified, then all dimensions of size 1 will be removed.
Declaration
@differentiable(wrt: self) public func squeezingShape(at axes: Int...) -> Tensor
-
Removes the specified dimensions of size 1 from the shape of a tensor. If no dimensions are specified, then all dimensions of size 1 will be removed.
Declaration
@differentiable(wrt: self) public func squeezingShape(at axes: [Int]) -> Tensor
-
Returns a transposed tensor, with dimensions permuted in the specified order.
Declaration
@differentiable(wrt: self) public func transposed(permutation: Tensor<Int32>) -> Tensor
-
Returns a transposed tensor, with dimensions permuted in the specified order.
Declaration
@available(*, deprecated, renamed: "transposed(permutation:﹚") @differentiable(wrt: self) public func transposed(withPermutations permutations: Tensor<Int32>) -> Tensor
-
Returns a transposed tensor, with dimensions permuted in the specified order.
Declaration
@differentiable(wrt: self) public func transposed(permutation: [Int]) -> Tensor
-
Returns a transposed tensor, with dimensions permuted in the specified order.
Declaration
@available(*, deprecated, renamed: "transposed(permutation:﹚") @differentiable(wrt: self) public func transposed(withPermutations permutations: [Int]) -> Tensor
-
Returns a transposed tensor, with dimensions permuted in the specified order.
Declaration
@differentiable(wrt: self) public func transposed(permutation: Int...) -> Tensor
-
Returns a transposed tensor, with dimensions permuted in the specified order.
Declaration
@available(*, deprecated, renamed: "transposed(permutation:﹚") @differentiable(wrt: self) public func transposed(withPermutations permutations: Int...) -> Tensor
-
Returns a transposed tensor, with dimensions permuted in reverse order.
Declaration
@differentiable(wrt: self) public func transposed() -> Tensor
-
Returns a tensor with specified dimensions reversed.
Precondition
Each value inaxes
must be in the range-rank..<rank
.Precondition
There must be no duplication inaxes
.Declaration
@differentiable(wrt: self) public func reversed(inAxes axes: Tensor<Int32>) -> Tensor
-
Returns a tensor with specified dimensions reversed.
Precondition
Each value inaxes
must be in the range-rank..<rank
.Precondition
There must be no duplication inaxes
.Declaration
@differentiable(wrt: self) public func reversed(inAxes axes: [Int]) -> Tensor
-
Returns a tensor with specified dimensions reversed.
Precondition
Each value inaxes
must be in the range-rank..<rank
.Precondition
There must be no duplication inaxes
.Declaration
@differentiable(wrt: self) public func reversed(inAxes axes: Int...) -> Tensor
-
Returns a concatenated tensor along the specified axis.
Precondition
The tensors must have the same dimensions, except for the specified axis.Precondition
The axis must be in the range-rank..<rank
.Declaration
@differentiable public func concatenated(with other: Tensor, alongAxis axis: Int = 0) -> Tensor
-
Concatenation operator.
Note
++
is a custom operator that does not exist in Swift, but does in Haskell/Scala. Its addition is not an insignificant language change and may be controversial. The existence/naming of++
will be discussed during a later API design phase.Declaration
@differentiable public static func ++ (lhs: Tensor, rhs: Tensor) -> Tensor
-
Returns a tensor by gathering slices of the input at
indices
along theaxis
dimensionFor 0-D (scalar)
indices
:result[p_0, ..., p_{axis-1}, p_{axis + 1}, ..., p_{N-1}] = self[p_0, ..., p_{axis-1}, indices, p_{axis + 1}, ..., p_{N-1}]
For 1-D (vector)
indices
:result[p_0, ..., p_{axis-1}, i, p_{axis + 1}, ..., p_{N-1}] = self[p_0, ..., p_{axis-1}, indices[i], p_{axis + 1}, ..., p_{N-1}]
In the general case, produces a resulting tensor where:
result[p_0, ..., p_{axis-1}, i_{batch\_dims}, ..., i_{M-1}, p_{axis + 1}, ..., p_{N-1}] = self[p_0, ..., p_{axis-1}, indices[i_0, ..., i_{M-1}], p_{axis + 1}, ..., p_{N-1}]
where
N = self.rank
andM = indices.rank
.The shape of the resulting tensor is:
self.shape[..<axis] + indices.shape + self.shape[(axis + 1)...]
.Note
On CPU, if an out-of-range index is found, an error is thrown. On GPU, if an out-of-range index is found, a 0 is stored in the corresponding output values.
Precondition
axis
must be in the range[-rank, rank)
.Declaration
@differentiable(wrt: self) public func gathering<Index: TensorFlowIndex>( atIndices indices: Tensor<Index>, alongAxis axis: Int = 0 ) -> Tensor
Parameters
indices
Contains the indices to gather at.
axis
Dimension along which to gather. Negative values wrap around.
Return Value
The gathered tensor.
-
Returns slices of this tensor at
indices
along theaxis
dimension, while ignoring the firstbatchDimensionCount
dimensions that correspond to batch dimensions. The gather is performed along the first non-batch dimension.Performs similar functionality to
gathering
, except that the resulting tensor shape is nowshape[..<axis] + indices.shape[batchDimensionCount...] + shape[(axis + 1)...]
.Precondition
axis
must be in the range-rank..<rank
, while also being greater than or equal tobatchDimensionCount
.Precondition
batchDimensionCount
must be less thanindices.rank
.Declaration
@differentiable(wrt: self) public func batchGathering<Index: TensorFlowIndex>( atIndices indices: Tensor<Index>, alongAxis axis: Int = 1, batchDimensionCount: Int = 1 ) -> Tensor
Parameters
indices
Contains the indices to gather.
axis
Dimension along which to gather. Negative values wrap around.
batchDimensionCount
Number of leading batch dimensions to ignore.
Return Value
The gathered tensor.
-
Returns a tensor by gathering the values after applying the provided boolean mask to the input.
For example:
// 1-D example // tensor is [0, 1, 2, 3] // mask is [true, false, true, false] tensor.gathering(where: mask) // is [0, 2] // 2-D example // tensor is [[1, 2], [3, 4], [5, 6]] // mask is [true, false, true] tensor.gathering(where: mask) // is [[1, 2], [5, 6]]
In general,
0 < mask.rank = K <= tensor.rank
, and themask
‘s shape must match the first K dimensions of thetensor
’s shape. We then have:tensor.gathering(where: mask)[i, j1, ..., jd] = tensor[i1, ..., iK, j1, ..., jd]
, where[i1, ..., iK]
is thei
thtrue
entry ofmask
(row-major order).The
axis
could be used withmask
to indicate the axis to mask from. In that case,axis + mask.rank <= tensor.rank
and themask
's shape must match the first
axis + mask.rankdimensions of the
tensor`’s shape.Precondition
The
mask
cannot be a scalar:mask.rank != 0
.Declaration
@differentiable(wrt: self) public func gathering(where mask: Tensor<Bool>, alongAxis axis: Int = 0) -> Tensor
Parameters
mask
K-D boolean tensor, where
K <= self.rank
.axis
0-D integer tensor representing the axis in
self
to mask from, whereK + axis <= self.rank
.Return Value
(self.rank - K + 1)
-dimensional tensor populated by entries in this tensor corresponding totrue
values inmask
. -
Returns the locations of non-zero / true values in this tensor.
The coordinates are returned in a 2-D tensor where the first dimension (rows) represents the number of non-zero elements, and the second dimension (columns) represents the coordinates of the non-zero elements. Keep in mind that the shape of the output tensor can vary depending on how many true values there are in this tensor. Indices are output in row-major order.
For example:
// 'input' is [[true, false], [true, false]] // 'input' has 2 true values and so the output has 2 rows. // 'input' has rank of 2, and so the second dimension of the output has size 2. input.nonZeroIndices() // is [[0, 0], [1, 0]] // 'input' is [[[ true, false], [ true, false]], // [[false, true], [false, true]], // [[false, false], [false, true]]] // 'input' has 5 true values and so the output has 5 rows. // 'input' has rank 3, and so the second dimension of the output has size 3. input.nonZeroIndices() // is [[0, 0, 0], // [0, 1, 0], // [1, 0, 1], // [1, 1, 1], // [2, 1, 1]]
Declaration
public func nonZeroIndices() -> Tensor<Int64>
Return Value
A tensor with shape
(num_true, rank(condition))
. -
Declaration
@differentiable(wrt: self) public func broadcasted(toShape shape: Tensor<Int32>) -> Tensor
-
Declaration
@differentiable(wrt: self) public func broadcasted(to shape: TensorShape) -> Tensor
-
Broadcast to the same shape as the specified
Tensor
.Precondition
The specified shape must be compatible for broadcasting.Declaration
@differentiable(wrt: self) public func broadcasted<OtherScalar>(like other: Tensor<OtherScalar>) -> Tensor where OtherScalar : TensorFlowScalar
-
Declaration
public static func .= (lhs: inout Tensor, rhs: Tensor)
-
Extracts a slice from the tensor defined by lower and upper bounds for each dimension.
Declaration
@differentiable(wrt: self) public func slice(lowerBounds: [Int], upperBounds: [Int]) -> Tensor
Parameters
lowerBounds
The lower bounds at each dimension.
upperBounds
The upper bounds at each dimension.
-
Declaration
@differentiable(wrt: self) public func slice(lowerBounds: Tensor<Int32>, sizes: Tensor<Int32>) -> Tensor
-
Declaration
@differentiable(wrt: self) public func slice(lowerBounds: [Int], sizes: [Int]) -> Tensor
-
Declaration
@differentiable(wrt: self) public subscript(ranges: TensorRangeExpression...) -> Tensor { get set }
-
Checks that each element of
axes
denotes an axis ofself
, and stops the program with a diagnostic otherwise.Declaration
func ensureValid( axes: Tensor<Int32>, function: StaticString = #function, file: StaticString = #file, line: UInt = #line )
-
Checks that each element of
axes
denotes an axis ofself
, and stops the program with a diagnostic otherwise.Declaration
func ensureValid( axes: [Int], function: StaticString = #function, file: StaticString = #file, line: UInt = #line )
-
Checks that
k
denotes an axis ofself
, and stops the program with a diagnostic otherwise.Declaration
func ensureValid( axis k: Int, function: StaticString = #function, file: StaticString = #file, line: UInt = #line )
-
Declaration
public init<BatchSamples: Collection>(collating samples: BatchSamples) where BatchSamples.Element == Self
-
Creates a tensor with the specified shape and a single, repeated scalar value.
Declaration
@available(*, deprecated, renamed: "init(repeating:shape:﹚") public init(shape: TensorShape, repeating repeatedValue: Scalar)
Parameters
shape
The dimensions of the tensor.
repeatedValue
The scalar value to repeat.
-
Creates a tensor with the specified shape and a single, repeated scalar value.
Declaration
@differentiable public init( repeating repeatedValue: Scalar, shape: TensorShape, on device: Device = .default )
Parameters
repeatedValue
The scalar value to repeat.
shape
The dimensions of the tensor.
-
Creates a tensor by broadcasting the given scalar to a given rank with all dimensions being 1.
Declaration
public init(broadcasting scalar: Scalar, rank: Int, on device: Device = .default)
-
Creates a tensor from an array of tensors (which may themselves be scalars).
Declaration
@differentiable public init(_ elements: [Tensor])
-
Stacks
tensors
, along theaxis
dimension, into a new tensor with rank one higher than the current tensor and each tensor intensors
.Given that
tensors
all have shape[A, B, C]
, andtensors.count = N
, then:- if
axis == 0
then the resulting tensor will have the shape[N, A, B, C]
. - if
axis == 1
then the resulting tensor will have the shape[A, N, B, C]
. - etc.
For example:
// 'x' is [1, 4] // 'y' is [2, 5] // 'z' is [3, 6] Tensor(stacking: [x, y, z]) // is [[1, 4], [2, 5], [3, 6]] Tensor(stacking: [x, y, z], alongAxis: 1) // is [[1, 2, 3], [4, 5, 6]]
This is the opposite of
Tensor.unstacked(alongAxis:)
.Precondition
All tensors must have the same shape.
Precondition
axis
must be in the range[-rank, rank)
, whererank
is the rank of the provided tensors.Declaration
@differentiable public init(stacking tensors: [Tensor], alongAxis axis: Int = 0)
Parameters
tensors
Tensors to stack.
axis
Dimension along which to stack. Negative values wrap around.
Return Value
The stacked tensor.
- if
-
Concatenates
tensors
along theaxis
dimension.Given that
tensors[i].shape = [D0, D1, ... Daxis(i), ...Dn]
, then the concatenated result has shape[D0, D1, ... Raxis, ...Dn]
, whereRaxis = sum(Daxis(i))
. That is, the data from the input tensors is joined along theaxis
dimension.For example:
// t1 is [[1, 2, 3], [4, 5, 6]] // t2 is [[7, 8, 9], [10, 11, 12]] Tensor(concatenating: [t1, t2]) // is [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]] Tensor(concatenating: [t1, t2], alongAxis: 1) // is [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]] // t3 has shape [2, 3] // t4 has shape [2, 3] Tensor(concatenating: [t3, t4]) // has shape [4, 3] Tensor(concatenating: [t3, t4], alongAxis: 1) // has shape [2, 6]
Note
If you are concatenating along a new axis consider using
Tensor.init(stacking:alongAxis:)
.Precondition
All tensors must have the same rank and all dimensions except
axis
must be equal.Precondition
axis
must be in the range[-rank, rank)
, whererank
is the rank of the provided tensors.Declaration
@differentiable public init(concatenating tensors: [Tensor], alongAxis axis: Int = 0)
Parameters
tensors
Tensors to concatenate.
axis
Dimension along which to concatenate. Negative values wrap around.
Return Value
The concatenated tensor.
-
Replaces elements of this tensor with
other
in the lanes wheremask
istrue
.Precondition
self
andother
must have the same shape. Ifself
andother
are scalar, thenmask
must also be scalar. Ifself
andother
have rank greater than or equal to1
, thenmask
must be either have the same shape asself
or be a 1-DTensor
such thatmask.scalarCount == self.shape[0]
.Declaration
@differentiable(wrt: (self, other) ) public func replacing(with other: Tensor, where mask: Tensor<Bool>) -> Tensor
-
Returns true if the physical scalar type is reduced precision.
Currently, reduced precision physical scalar types include only
BFloat16
.Declaration
public var isReducedPrecision: Bool { get }
-
Promotes a scalar to a tensor with the same device and precision as the given tensor.
Declaration
@differentiable public init(_ value: Scalar, deviceAndPrecisionLike tensor: Tensor)
-
Returns a copy of
self
converted toBFloat16
physical scalar type.Declaration
public var toReducedPrecision: `Self` { get }
-
Returns a copy of
self
converted toScalar
physical scalar type.Declaration
public var toFullPrecision: `Self` { get }
-
The number of dimensions of the
Tensor
.Declaration
public var rank: Int { get }
-
The shape of the
Tensor
.Declaration
public var shape: TensorShape { get }
-
The number of scalars in the
Tensor
. -
Declaration
public var scalarCount: Int { get }
-
The rank of the tensor, represented as a
Tensor<Int32>
.Declaration
public var rankTensor: Tensor<Int32> { get }
-
The dimensions of the tensor, represented as a
Tensor<Int32>
.Declaration
public var shapeTensor: Tensor<Int32> { get }
-
The number of scalars in the tensor, represented as a
Tensor<Int32>
.Declaration
public var scalarCountTensor: Tensor<Int32> { get }
-
Returns
true
ifrank
is equal to 0 andfalse
otherwise.Declaration
public var isScalar: Bool { get }
-
Returns the single scalar element if
rank
is equal to 0 andnil
otherwise.Declaration
public var scalar: Scalar? { get }
-
Reshape to scalar.
Precondition
The tensor has exactly one scalar.Declaration
@differentiable public func scalarized() -> Scalar
-
Declaration
public var array: ShapedArray<Scalar> { get }
-
Declaration
@differentiable public var scalars: [Scalar] { get }
-
Creates a 0-D tensor from a scalar value.
Declaration
@differentiable public init(_ value: Scalar, on device: Device = .default)
-
Creates a 1D tensor from scalars.
Declaration
@differentiable public init(_ scalars: [Scalar], on device: Device = .default)
-
Creates a 1D tensor from scalars.
Declaration
public init<C: Collection>( _ vector: C, on device: Device = .default ) where C.Element == Scalar
-
Creates a tensor with the specified shape and contiguous scalars in row-major order.
Precondition
The product of the dimensions of the shape must equal the number of scalars.Declaration
@differentiable public init(shape: TensorShape, scalars: [Scalar], on device: Device = .default)
Parameters
shape
The shape of the tensor.
scalars
The scalar contents of the tensor.
-
Creates a tensor with the specified shape and contiguous scalars in row-major order.
Precondition
The product of the dimensions of the shape must equal the number of scalars.Declaration
public init( shape: TensorShape, scalars: UnsafeBufferPointer<Scalar>, on device: Device = .default )
Parameters
shape
The shape of the tensor.
scalars
The scalar contents of the tensor.
-
Creates a tensor with the specified shape and contiguous scalars in row-major order.
Precondition
The product of the dimensions of the shape must equal the number of scalars. -
Creates a tensor with the specified shape and contiguous scalars in row-major order.
Precondition
The product of the dimensions of the shape must equal the number of scalars.Declaration
public init<C: Collection>( shape: TensorShape, scalars: C, on device: Device = .default ) where C.Element == Scalar
Parameters
shape
The shape of the tensor.
scalars
The scalar contents of the tensor.
-
The type of the elements of an array literal.
Declaration
public typealias ArrayLiteralElement = _TensorElementLiteral<Scalar>
-
Creates a tensor initialized with the given elements.
Declaration
public init(arrayLiteral elements: _TensorElementLiteral<Scalar>...)
-
A textual representation of the tensor.
Note
usefullDescription
for a non-pretty-printed description showing all scalars.Declaration
public var description: String { get }
-
A textual representation of the tensor. Returns a summarized description if
summarize
is true and the element count exceeds twice theedgeElementCount
.Declaration
public func description( lineWidth: Int = 80, edgeElementCount: Int = 3, summarizing: Bool = false ) -> String
Parameters
lineWidth
The max line width for printing. Used to determine number of scalars to print per line.
edgeElementCount
The maximum number of elements to print before and after summarization via ellipses (
...
).summarizing
If true, summarize description if element count exceeds twice
edgeElementCount
. -
A full, non-pretty-printed textual representation of the tensor, showing all scalars.
Declaration
public var fullDescription: String { get }
-
-
Declaration
public var playgroundDescription: Any { get }
-
Declaration
public var customMirror: Mirror { get }
-
The annotations describing this tensor.
Declaration
public var annotations: String { get }
-
An alias for annotations.
Declaration
public var summary: String { get }
-
Declaration
public init(_owning tensorHandles: UnsafePointer<CTensorHandle>?)
-
Declaration
public init<C: RandomAccessCollection>( _handles: C ) where C.Element: _AnyTensorHandle
-
Declaration
public init(_ array: ShapedArray<Scalar>, on device: Device = .default)
-
Declaration
init(_xla: XLATensor)
-
Declaration
init(_xlaHandle: UnsafeMutablePointer<OpaqueXLATensor>)
-
Declaration
var xlaHandle: UnsafeMutablePointer<OpaqueXLATensor> { get }
-
Declaration
var xlaTensor: XLATensor { get }
-
Declaration
@differentiable(wrt: self) public func unbroadcasted(toShape otherShape: Tensor<Int32>) -> Tensor
-
Declaration
@differentiable(wrt: self) public func unbroadcasted<OtherScalar>(like other: Tensor<OtherScalar>) -> Tensor where OtherScalar : TensorFlowScalar
-
Declaration
@differentiable(wrt: self) public func unbroadcasted(to shape: TensorShape) -> Tensor
-
A mode that dictates how a tensor is padded.
Declaration
public enum PaddingMode
-
Returns a tensor padded with constant according to the specified padding sizes.
Declaration
@differentiable(wrt: self) public func padded(forSizes sizes: [(before: Int, after: Int)], with value: Scalar = 0) -> Tensor
-
Returns a padded tensor according to the specified padding sizes and mode.
Declaration
@differentiable(wrt: self) public func padded(forSizes sizes: [(before: Int, after: Int)], mode: PaddingMode) -> Tensor
-
Returns a tensor of Boolean scalars by computing
lhs < rhs
element-wise.Declaration
public static func .< (lhs: Tensor, rhs: Tensor) -> Tensor<Bool>
-
Returns a tensor of Boolean scalars by computing
lhs <= rhs
element-wise.Declaration
public static func .<= (lhs: Tensor, rhs: Tensor) -> Tensor<Bool>
-
Returns a tensor of Boolean scalars by computing
lhs > rhs
element-wise.Declaration
public static func .> (lhs: Tensor, rhs: Tensor) -> Tensor<Bool>
-
Returns a tensor of Boolean scalars by computing
lhs >= rhs
element-wise.Declaration
public static func .>= (lhs: Tensor, rhs: Tensor) -> Tensor<Bool>
-
Returns a tensor of Boolean scalars by computing
lhs < rhs
element-wise.Note
.<
supports broadcasting.Declaration
public static func .< (lhs: Scalar, rhs: Tensor) -> Tensor<Bool>
-
Returns a tensor of Boolean scalars by computing
lhs <= rhs
element-wise.Note
.<=
supports broadcasting.Declaration
public static func .<= (lhs: Scalar, rhs: Tensor) -> Tensor<Bool>
-
Returns a tensor of Boolean scalars by computing
lhs > rhs
element-wise.Note
.>
supports broadcasting.Declaration
public static func .> (lhs: Scalar, rhs: Tensor) -> Tensor<Bool>
-
Returns a tensor of Boolean scalars by computing
lhs >= rhs
element-wise.Note
.>=
supports broadcasting.Declaration
public static func .>= (lhs: Scalar, rhs: Tensor) -> Tensor<Bool>
-
Returns a tensor of Boolean scalars by computing
lhs < rhs
element-wise.Note
.<
supports broadcasting.Declaration
public static func .< (lhs: Tensor, rhs: Scalar) -> Tensor<Bool>
-
Returns a tensor of Boolean scalars by computing
lhs <= rhs
element-wise.Note
.<=
supports broadcasting.Declaration
public static func .<= (lhs: Tensor, rhs: Scalar) -> Tensor<Bool>
-
Returns a tensor of Boolean scalars by computing
lhs > rhs
element-wise.Note
.>
supports broadcasting.Declaration
public static func .> (lhs: Tensor, rhs: Scalar) -> Tensor<Bool>
-
Returns a tensor of Boolean scalars by computing
lhs >= rhs
element-wise.Note
.>=
supports broadcasting.Declaration
public static func .>= (lhs: Tensor, rhs: Scalar) -> Tensor<Bool>
-
Returns a tensor of Boolean scalars by computing
lhs == rhs
element-wise.Note
.==
supports broadcasting.Declaration
public static func .== (lhs: Tensor, rhs: Tensor) -> Tensor<Bool>
-
Returns a tensor of Boolean scalars by computing
lhs != rhs
element-wise.Note
.!=
supports broadcasting.Declaration
public static func .!= (lhs: Tensor, rhs: Tensor) -> Tensor<Bool>
-
Returns a tensor of Boolean scalars by computing
lhs == rhs
element-wise.Note
.==
supports broadcasting.Declaration
public static func .== (lhs: Scalar, rhs: Tensor) -> Tensor<Bool>
-
Returns a tensor of Boolean scalars by computing
lhs != rhs
element-wise.Note
.!=
supports broadcasting.Declaration
public static func .!= (lhs: Scalar, rhs: Tensor) -> Tensor<Bool>
-
Returns a tensor of Boolean scalars by computing
lhs == rhs
element-wise.Note
.==
supports broadcasting.Declaration
public static func .== (lhs: Tensor, rhs: Scalar) -> Tensor<Bool>
-
Returns a tensor of Boolean scalars by computing
lhs != rhs
element-wise.Note
.!=
supports broadcasting.Declaration
public static func .!= (lhs: Tensor, rhs: Scalar) -> Tensor<Bool>
-
Returns a tensor of Boolean values indicating whether the elements of
self
are approximately equal to those ofother
.Precondition
self
andother
must be of the same shape.Declaration
public func elementsAlmostEqual( _ other: Tensor, tolerance: Scalar = Scalar.ulpOfOne.squareRoot() ) -> Tensor<Bool>
-
Returns
true
if all elements ofself
are approximately equal to those ofother
.Precondition
self
andother
must be of the same shape.Declaration
public func isAlmostEqual( to other: Tensor, tolerance: Scalar = Scalar.ulpOfOne.squareRoot() ) -> Bool
-
Runs a cross replica sum for this tensor. The same cross replica sum must happen on each of the other devices participating in the sum.
Declaration
public mutating mutating func crossReplicaSum(_ scale: Double)
-
Declaration
@derivative init(repeating: shape)
-
Perform an element-wise type conversion from a
Bool
tensor.Declaration
public init(_ other: Tensor<Bool>)
-
Perform an element-wise conversion from another
Tensor
.Declaration
@differentiable public init<OtherScalar>(_ other: Tensor<OtherScalar>) where OtherScalar : Numeric, OtherScalar : TensorFlowScalar
-
Declaration
@derivative init(_: <<error type>>)
-
Declaration
@derivative init(stacking: alongAxis)
-
Declaration
@derivative init(concatenating: alongAxis)
-
Creates a tensor with all scalars set to zero.
Declaration
public init(zeros shape: TensorShape, on device: Device = .default)
Parameters
shape
Shape of the tensor.
-
Creates a tensor with all scalars set to one.
Declaration
public init(ones shape: TensorShape, on device: Device = .default)
Parameters
shape
Shape of the tensor.
-
Creates a tensor with all scalars set to zero that has the same shape and type as the provided tensor.
Declaration
public init(zerosLike other: Tensor)
Parameters
other
Tensor whose shape and data type to use.
-
Creates a tensor with all scalars set to one that has the same shape and type as the provided tensor.
Declaration
public init(onesLike other: Tensor)
Parameters
other
Tensor whose shape and data type to use.
-
Creates a 1-D tensor representing a sequence from a starting value to, but not including, an end value, stepping by the specified amount.
Declaration
public init( rangeFrom start: Scalar, to end: Scalar, stride: Scalar, on device: Device = .default )
Parameters
start
The starting value to use for the sequence. If the sequence contains any values, the first one is
start
.end
An end value to limit the sequence.
end
is never an element of the resulting sequence.stride
The amount to step by with each iteration.
stride
must be positive. -
Creates a 1-D tensor representing a sequence from a starting value to, but not including, an end value, stepping by the specified amount.
Declaration
public init(rangeFrom start: Tensor<Scalar>, to end: Tensor<Scalar>, stride: Tensor<Scalar>)
Parameters
start
The starting value to use for the sequence. If the sequence contains any values, the first one is
start
.end
An end value to limit the sequence.
end
is never an element of the resulting sequence.stride
The amount to step by with each iteration.
stride
must be positive. -
Creates a one-hot tensor at given indices. The locations represented by
indices
take valueonValue
(1
by default), while all other locations take valueoffValue
(0
by default). If the inputindices
is rankn
, the new tensor will have rankn+1
. The new axis is created at dimensionaxis
(by default, the new axis is appended at the end).If
indices
is a scalar, the new tensor’s shape will be a vector of lengthdepth
.If
indices
is a vector of lengthfeatures
, the output shape will be: features x depth, if axis == -1 depth x features, if axis == 0If
indices
is a matrix (batch) with shape[batch, features]
, the output shape will be: batch x features x depth, if axis == -1 batch x depth x features, if axis == 1 depth x batch x features, if axis == 0Declaration
public init( oneHotAtIndices indices: Tensor<Int32>, depth: Int, onValue: Scalar = 1, offValue: Scalar = 0, axis: Int = -1 )
Parameters
indices
A
Tensor
of indices.depth
A scalar defining the depth of the one hot dimension.
onValue
A scalar defining the value at the location referred to by some index in
indices
.offValue
A scalar defining the value at a location that is not referred to by any index in
indices
.axis
The axis to fill. The default is
-1
, a new inner-most axis.
-
Creates a 1-D tensor representing a sequence from a starting value, up to and including an end value, spaced evenly to generate the number of values specified.
Declaration
public init( linearSpaceFrom start: Scalar, to end: Scalar, count: Int, on device: Device = .default )
Parameters
start
The starting value to use for the sequence. If the sequence contains any values, the first one is
start
.end
An end value to limit the sequence.
end
is the last element of the resulting sequence.count
The number of values in the resulting sequence.
count
must be positive. -
Creates a 1-D tensor representing a sequence from a starting value, up to and including an end value, spaced evenly to generate the number of values specified.
Precondition
start
,to
, andcount
must be Tensors containing a single Scalar value.Declaration
public init(linearSpaceFrom start: Tensor<Scalar>, to end: Tensor<Scalar>, count: Tensor<Int32>)
Parameters
start
The starting value to use for the sequence. If the sequence contains any values, the first one is
start
.end
An end value to limit the sequence.
end
is the last element of the resulting sequence.count
The number of values in the resulting sequence.
count
must be positive.
-
Creates a tensor with the specified shape, randomly sampling scalar values from a uniform distribution between
lowerBound
andupperBound
.Declaration
public init( randomUniform shape: TensorShape, lowerBound: Tensor<Scalar>? = nil, upperBound: Tensor<Scalar>? = nil, seed: TensorFlowSeed = Context.local.randomSeed, on device: Device = .default )
Parameters
shape
The dimensions of the tensor.
lowerBound
The lower bound of the distribution.
upperBound
The upper bound of the distribution.
seed
The seed value.
-
Creates a tensor with the specified shape, randomly sampling scalar values from a uniform distribution between
lowerBound
andupperBound
.Declaration
public init( randomUniform shape: TensorShape, lowerBound: Tensor<Scalar>? = nil, upperBound: Tensor<Scalar>? = nil, seed: TensorFlowSeed = Context.local.randomSeed, on device: Device = .default )
Parameters
shape
The dimensions of the tensor.
lowerBound
The lower bound of the distribution.
upperBound
The upper bound of the distribution.
seed
The seed value.
-
Creates a tensor with the specified shape, randomly sampling scalar values from a normal distribution.
Declaration
public init( randomNormal shape: TensorShape, mean: Tensor<Scalar>? = nil, standardDeviation: Tensor<Scalar>? = nil, seed: TensorFlowSeed = Context.local.randomSeed, on device: Device = .default )
Parameters
shape
The dimensions of the tensor.
mean
The mean of the distribution.
standardDeviation
The standard deviation of the distribution.
seed
The seed value.
-
Creates a tensor with the specified shape, randomly sampling scalar values from a truncated Normal distribution.
Declaration
public init( randomTruncatedNormal shape: TensorShape, mean: Tensor<Scalar>? = nil, standardDeviation: Tensor<Scalar>? = nil, seed: TensorFlowSeed = Context.local.randomSeed, on device: Device = .default )
Parameters
shape
The dimensions of the tensor.
mean
The mean of the distribution.
standardDeviation
The standard deviation of the distribution.
seed
The seed value.
-
Creates a tensor by drawing samples from a categorical distribution.
Declaration
public init<T: TensorFlowFloatingPoint>( randomCategorialLogits: Tensor<T>, sampleCount: Int32, seed: TensorFlowSeed = Context.local.randomSeed )
Parameters
randomCategorialLogits
2-D Tensor with shape
[batchSize, classCount]
. Each slice[i, :]
represents the unnormalized log probabilities for all classes.sampleCount
0-D. Number of independent samples to draw for each row slice.
seed
The seed value.
Return Value
2-D Tensor with shape
[batchSize, sampleCount]
. Each slice[i, :]
contains the drawn class labels with range[0, classCount)
.
-
Creates a tensor with the specified shape by performing Glorot (Xavier) uniform initialization.
It draws random samples from a uniform distribution between
-limit
andlimit
generated by the default random number generator, wherelimit
issqrt(6 / (fanIn + fanOut))
andfanIn
/fanOut
represent the number of input and output features multiplied by the receptive field size.Reference: “Understanding the difficulty of training deep feedforward neural networks”
Declaration
public init( glorotUniform shape: TensorShape, seed: TensorFlowSeed = Context.local.randomSeed, on device: Device = .default )
Parameters
shape
The dimensions of the tensor.
seed
The seed value.
-
Creates a tensor with the specified shape by performing Glorot (Xavier) normal initialization.
It draws random samples from a truncated normal distribution centered on
0
with standard deviationsqrt(2 / (fanIn + fanOut))
generated by the default random number generator, wherefanIn
/fanOut
represent the number of input and output features multiplied by the receptive field size.Reference: “Understanding the difficulty of training deep feedforward neural networks”
Declaration
public init( glorotNormal shape: TensorShape, seed: TensorFlowSeed = Context.local.randomSeed, on device: Device = .default )
Parameters
shape
The dimensions of the tensor.
seed
The seed value.
-
Creates a tensor with the specified shape by performing He (Kaiming) uniform initialization.
It draws random samples from a uniform distribution between
-limit
andlimit
generated by the default random number generator, wherelimit
issqrt(6 / fanIn)
andfanIn
represents the number of input features multiplied by the receptive field size.Reference: “Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification”
Declaration
public init( heUniform shape: TensorShape, seed: TensorFlowSeed = Context.local.randomSeed, on device: Device = .default )
Parameters
shape
The dimensions of the tensor.
seed
The seed value.
-
Creates a tensor with the specified shape by performing He (Kaiming) normal initialization.
It draws random samples from a truncated normal distribution centered on
0
with standard deviationsqrt(2 / fanIn))
generated by the default random number generator, wherefanIn
represents the number of input features multiplied by the receptive field size.Reference: “Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification”
Declaration
public init( heNormal shape: TensorShape, seed: TensorFlowSeed = Context.local.randomSeed, on device: Device = .default )
Parameters
shape
The dimensions of the tensor.
seed
The seed value.
-
Creates a tensor with the specified shape by performing LeCun uniform initialization.
It draws random samples from a uniform distribution between
-limit
andlimit
generated by the default random number generator, wherelimit
issqrt(3 / fanIn)
andfanIn
represents the number of input features multiplied by the receptive field size.Reference: “Efficient BackProp”
Declaration
public init( leCunUniform shape: TensorShape, seed: TensorFlowSeed = Context.local.randomSeed, on device: Device = .default )
Parameters
shape
The dimensions of the tensor.
seed
The seed value.
-
Creates a tensor with the specified shape by performing LeCun normal initialization.
It draws random samples from a truncated normal distribution centered on
0
with standard deviationsqrt(1 / fanIn)
generated by the default random number generator, wherefanIn
represents the number of input features multiplied by the receptive field size.Reference: “Efficient BackProp”
Declaration
public init( leCunNormal shape: TensorShape, seed: TensorFlowSeed = Context.local.randomSeed, on device: Device = .default )
Parameters
shape
The dimensions of the tensor.
seed
The seed value.
-
Creates an orthogonal matrix or tensor.
If the shape of the tensor to initialize is two-dimensional, it is initialized with an orthogonal matrix obtained from the QR decomposition of a matrix of random numbers drawn from a normal distribution. If the matrix has fewer rows than columns then the output will have orthogonal rows. Otherwise, the output will have orthogonal columns.
If the shape of the tensor to initialize is more than two-dimensional, a matrix of shape
[shape[0] * ... * shape[rank - 2], shape[rank - 1]]
is initialized. The matrix is subsequently reshaped to give a tensor of the desired shape.Declaration
public init( orthogonal shape: TensorShape, gain: Tensor<Scalar> = Tensor<Scalar>(1), seed: TensorFlowSeed = Context.local.randomSeed )
Parameters
shape
The shape of the tensor.
gain
A multiplicative factor to apply to the orthogonal tensor.
seed
A tuple of two integers to seed the random number generator.
-
Returns the [batched] diagonal part of a [batched] tensor. For the tensor instance of the shape
[..., M, N]
, the output is a tensor of the shape[..., K]
, whereK
equalsmin(N, M)
.For example:
// 't' is [[1, 0, 0, 0] // [0, 2, 0, 0] // [0, 0, 3, 0] // [0, 0, 0, 4]] t.diagonalPart() // [1, 2, 3, 4]
Declaration
@differentiable public func diagonalPart() -> Tensor
-
Constructs a [batched] diagonal array. For the tensor instance of the shape
[..., M]
, the output is a tensor of the shape[..., M, M]
.For example:
// 't' is [1, 2, 3, 4] t.diagonal() // [[1, 0, 0, 0] // [0, 2, 0, 0] // [0, 0, 3, 0] // [0, 0, 0, 4]]
Declaration
@differentiable public func diagonal() -> Tensor
-
Returns
self
with new diagonal values, given thatself
is an optionally batched matrix.The returned tensor has the same shape and values as
self
, except for the specified diagonals of the innermost matrices which are overwritten by the values indiagonal
.Parameter diagonal: A tensor with rank
rank - 1
representing the new diagonal values.Declaration
public func withDiagonal(_ diagonal: Tensor<Scalar>) -> Tensor
-
Declaration
@differentiable(wrt: self) public func bandPart(_ subdiagonalCount: Int, _ superdiagonalCount: Int) -> Tensor
-
Returns a copy of a innermost tensor defined by a central band boundaries. The output is a tensor of the same shape as the instance
[..., :, :]
.For example:
// 't' is [[ 0, 1, 2, 3] // [-1, 0, 1, 2] // [-2, -1, 0, 1] // [-3, -2, -1, 0]] t.bandPart(1, -1) // [[ 0, 1, 2, 3] // [-1, 0, 1, 2] // [ 0, -1, 0, 1] // [ 0, 0, -1, 0]] t.bandPart(2, 1) // [[ 0, 1, 0, 0] // [-1, 0, 1, 0] // [-2, -1, 0, 1] // [ 0, -2, -1, 0]]
Declaration
@differentiable public func bandPart(subdiagonalCount: Int, superdiagonalCount: Int) -> Tensor
Parameters
subdiagonalCount
The number of subdiagonals to keep. If negative, keep entire lower triangle.
superdiagonalCount
The number of superdiagonals to keep. If negative, keep entire upper triangle.
-
Returns the QR decomposition of each inner matrix in the tensor, a tensor with inner orthogonal matrices
q
and a tensor with inner upper triangular matricesr
, such that the tensor is equal tomatmul(q, r)
.Declaration
public func qrDecomposition(fullMatrices: Bool = false) -> ( q: Tensor<Scalar>, r: Tensor<Scalar> )
Parameters
fullMatrices
If
true
, compute full-sizedq
andr
. Otherwise compute only the leadingmin(shape[rank - 1], shape[rank - 2])
columns ofq
. -
Returns the singular value decomposition of
self
, given thatself
is an optionally batched matrix.The singular value decomposition (SVD) of the optionally batched matrix
self
is valuess
,u
, andv
, such that:self[..., :, :] = u[..., :, :] • s[..., :, :].diagonal() • v[..., :, :].transposed()`
self
must be a tensor with shape
[…, M, N]. Let
K = min(M, N)`.Precondition
self
must be a tensor with shape[..., M, N]
.Declaration
public func svd(computeUV: Bool = true, fullMatrices: Bool = false) -> ( s: Tensor<Scalar>, u: Tensor<Scalar>?, v: Tensor<Scalar>? )
Parameters
computeUV
If
true
, the left and right singular vectors are computed and returned asu
andv
, respectively. Iffalse
,nil
values are returned asu
andv
.fullMatrices
If
true
,u
andv
respectively have shapes[..., M, M]
and[..., N, N]
. Iffalse
,u
andv
respectively have shapes[..., M, K]
and[..., K, N]
. Ignored whencomputeUV
is false.Return Value
- s: The singular values, with shape
[..., K]
. Within each vector, the singular values are sorted in descending order. - u: The left singular vectors.
- v: The right singular vectors.
- s: The singular values, with shape
-
The square root of
x
.For real types, if
x
is negative the result is.nan
. For complex types there is a branch cut on the negative real axis.Declaration
@differentiable public static func sqrt(_ x: `Self`) -> Tensor<Scalar>
-
The cosine of
x
, interpreted as an angle in radians.Declaration
@differentiable public static func cos(_ x: `Self`) -> Tensor<Scalar>
-
The sine of
x
, interpreted as an angle in radians.Declaration
@differentiable public static func sin(_ x: `Self`) -> Tensor<Scalar>
-
The tangent of
x
, interpreted as an angle in radians.Declaration
@differentiable public static func tan(_ x: `Self`) -> Tensor<Scalar>
-
The inverse cosine of
x
in radians.Declaration
@differentiable public static func acos(_ x: `Self`) -> Tensor<Scalar>
-
The inverse sine of
x
in radians.Declaration
@differentiable public static func asin(_ x: `Self`) -> Tensor<Scalar>
-
The inverse tangent of
x
in radians.Declaration
@differentiable public static func atan(_ x: `Self`) -> Tensor<Scalar>
-
The hyperbolic cosine of
x
.Declaration
@differentiable public static func cosh(_ x: `Self`) -> Tensor<Scalar>
-
The hyperbolic sine of
x
.Declaration
@differentiable public static func sinh(_ x: `Self`) -> Tensor<Scalar>
-
The hyperbolic tangent of
x
.Declaration
@differentiable public static func tanh(_ x: `Self`) -> Tensor<Scalar>
-
The inverse hyperbolic cosine of
x
.Declaration
@differentiable public static func acosh(_ x: `Self`) -> Tensor<Scalar>
-
The inverse hyperbolic sine of
x
.Declaration
@differentiable public static func asinh(_ x: `Self`) -> Tensor<Scalar>
-
The inverse hyperbolic tangent of
x
.Declaration
@differentiable public static func atanh(_ x: `Self`) -> Tensor<Scalar>
-
The exponential function applied to
x
, ore**x
.Declaration
@differentiable public static func exp(_ x: `Self`) -> Tensor<Scalar>
-
Two raised to to power
x
.Declaration
@differentiable public static func exp2(_ x: `Self`) -> Tensor<Scalar>
-
Ten raised to to power
x
.Declaration
@differentiable public static func exp10(_ x: `Self`) -> Tensor<Scalar>
-
exp(x) - 1
evaluated so as to preserve accuracy close to zero.Declaration
@differentiable public static func expm1(_ x: `Self`) -> Tensor<Scalar>
-
The natural logarithm of
x
.Declaration
@differentiable public static func log(_ x: `Self`) -> Tensor<Scalar>
-
The base-two logarithm of
x
.Declaration
@differentiable public static func log2(_ x: `Self`) -> Tensor<Scalar>
-
The base-ten logarithm of
x
.Declaration
@differentiable public static func log10(_ x: `Self`) -> Tensor<Scalar>
-
log(1 + x)
evaluated so as to preserve accuracy close to zero.Declaration
@differentiable public static func log1p(_ x: `Self`) -> Tensor<Scalar>
-
exp(y log(x))
computed without loss of intermediate precision.For real types, if
x
is negative the result is NaN, even ify
has an integral value. For complex types, there is a branch cut on the negative real axis.Declaration
@differentiable public static func pow(_ x: `Self`, _ y: `Self`) -> Tensor<Scalar>
-
x
raised to then
th power.The product of
n
copies ofx
.Declaration
@differentiable public static func pow(_ x: `Self`, _ n: Int) -> Tensor<Scalar>
-
The
n
th root ofx
.For real types, if
x
is negative andn
is even, the result is NaN. For complex types, there is a branch cut along the negative real axis.Declaration
@differentiable public static func root(_ x: `Self`, _ n: Int) -> Tensor<Scalar>
-
Declaration
public typealias VectorSpaceScalar = Float
-
Declaration
public func scaled(by scale: Float) -> Tensor<Scalar>
-
Declaration
public func adding(_ scalar: Float) -> Tensor<Scalar>
-
Declaration
public func subtracting(_ scalar: Float) -> Tensor<Scalar>
-
Adds the scalar to every scalar of the tensor and produces the sum.
Declaration
@differentiable public static func + (lhs: Scalar, rhs: Tensor) -> Tensor
-
Adds the scalar to every scalar of the tensor and produces the sum.
Declaration
@differentiable public static func + (lhs: Tensor, rhs: Scalar) -> Tensor
-
Subtracts the scalar from every scalar of the tensor and produces the difference.
Declaration
@differentiable public static func - (lhs: Scalar, rhs: Tensor) -> Tensor
-
Subtracts the scalar from every scalar of the tensor and produces the difference
Declaration
@differentiable public static func - (lhs: Tensor, rhs: Scalar) -> Tensor
-
Adds two tensors and stores the result in the left-hand-side variable.
Note
+=
supports broadcasting.Declaration
public static func += (lhs: inout Tensor, rhs: Tensor)
-
Adds the scalar to every scalar of the tensor and stores the result in the left-hand-side variable.
Declaration
public static func += (lhs: inout Tensor, rhs: Scalar)
-
Subtracts the second tensor from the first and stores the result in the left-hand-side variable.
Note
-=
supports broadcasting.Declaration
public static func -= (lhs: inout Tensor, rhs: Tensor)
-
Subtracts the scalar from every scalar of the tensor and stores the result in the left-hand-side variable.
Declaration
public static func -= (lhs: inout Tensor, rhs: Scalar)
-
Returns the tensor produced by multiplying the two tensors.
Note
*
supports broadcasting.Declaration
@differentiable public static func * (lhs: Tensor, rhs: Tensor) -> Tensor
-
Returns the tensor by multiplying it with every scalar of the tensor.
Declaration
@differentiable public static func * (lhs: Scalar, rhs: Tensor) -> Tensor
-
Multiplies the scalar with every scalar of the tensor and produces the product.
Declaration
@differentiable public static func * (lhs: Tensor, rhs: Scalar) -> Tensor
-
Multiplies two tensors and stores the result in the left-hand-side variable.
Note
*=
supports broadcasting.Declaration
public static func *= (lhs: inout Tensor, rhs: Tensor)
-
Multiplies the tensor with the scalar, broadcasting the scalar, and stores the result in the left-hand-side variable.
Declaration
public static func *= (lhs: inout Tensor, rhs: Scalar)
-
Returns the quotient of dividing the first tensor by the second.
Note
/
supports broadcasting.Declaration
@differentiable public static func / (lhs: Tensor, rhs: Tensor) -> Tensor
-
Returns the quotient of dividing the scalar by the tensor, broadcasting the scalar.
Declaration
@differentiable public static func / (lhs: Scalar, rhs: Tensor) -> Tensor
-
Returns the quotient of dividing the tensor by the scalar, broadcasting the scalar.
Declaration
@differentiable public static func / (lhs: Tensor, rhs: Scalar) -> Tensor
-
Divides the first tensor by the second and stores the quotient in the left-hand-side variable.
Declaration
public static func /= (lhs: inout Tensor, rhs: Tensor)
-
Divides the tensor by the scalar, broadcasting the scalar, and stores the quotient in the left-hand-side variable.
Declaration
public static func /= (lhs: inout Tensor, rhs: Scalar)
-
Returns the remainder of dividing the first tensor by the second.
Note
%
supports broadcasting.Declaration
public static func % (lhs: Tensor, rhs: Tensor) -> Tensor
-
Returns the remainder of dividing the tensor by the scalar, broadcasting the scalar.
Declaration
public static func % (lhs: Tensor, rhs: Scalar) -> Tensor
-
Returns the remainder of dividing the scalar by the tensor, broadcasting the scalar.
Declaration
public static func % (lhs: Scalar, rhs: Tensor) -> Tensor
-
Divides the first tensor by the second and stores the remainder in the left-hand-side variable.
Declaration
public static func %= (lhs: inout Tensor, rhs: Tensor)
-
Divides the tensor by the scalar and stores the remainder in the left-hand-side variable.
Declaration
public static func %= (lhs: inout Tensor, rhs: Scalar)
-
Returns
!self
element-wise.Declaration
public func elementsLogicalNot() -> Tensor
-
Returns
self && other
element-wise.Note
&&
supports broadcasting.Declaration
public func elementsLogicalAnd(_ other: Tensor) -> Tensor
-
Returns
self && other
element-wise, broadcastingother
.Declaration
public func elementsLogicalAnd(_ other: Scalar) -> Tensor
-
Returns
self || other
element-wise.Declaration
public func elementsLogicalOr(_ other: Tensor) -> Tensor
-
Returns
self || other
element-wise, broadcastingother
.Declaration
public func elementsLogicalOr(_ other: Scalar) -> Tensor
-
Returns
max(min(self, max), min)
.Declaration
@differentiable public func clipped(min: Tensor, max: Tensor) -> Tensor
-
Returns
max(min(self, max), min)
.Declaration
@differentiable(wrt: (self, min) ) public func clipped(min: Tensor, max: Scalar) -> Tensor
-
Returns
max(min(self, max), min)
.Declaration
@differentiable(wrt: (self, max) ) public func clipped(min: Scalar, max: Tensor) -> Tensor
-
Returns
max(min(self, max), min)
.Declaration
@differentiable(wrt: self) public func clipped(min: Scalar, max: Scalar) -> Tensor
-
Returns the negation of the specified tensor element-wise.
Declaration
@differentiable public prefix static func - (rhs: Tensor) -> Tensor
-
Declaration
@differentiable(wrt: self) public func squared() -> Tensor
-
Returns a boolean tensor indicating which elements of
x
are finite.Declaration
public var isFinite: Tensor<Bool> { get }
-
Returns a boolean tensor indicating which elements of
x
are infinite.Declaration
public var isInfinite: Tensor<Bool> { get }
-
Returns a boolean tensor indicating which elements of
x
are NaN-valued.Declaration
public var isNaN: Tensor<Bool> { get }
-
Returns
true
if all scalars are equal totrue
. Otherwise, returnsfalse
.Declaration
public func all() -> Bool
-
Returns
true
if any scalars are equal totrue
. Otherwise, returnsfalse
.Declaration
public func any() -> Bool
-
Performs a logical AND operation along the specified axes. The reduced dimensions are removed.
Precondition
Each value inaxes
must be in the range-rank..<rank
.Declaration
public func all(squeezingAxes axes: Int...) -> Tensor
Parameters
axes
The dimensions to reduce.
-
Performs a logical AND operation along the specified axes. The reduced dimensions are removed.
Precondition
Each value inaxes
must be in the range-rank..<rank
.Declaration
public func any(squeezingAxes