public enum _Raw
-
Declaration
public enum _Raw.A
-
Declaration
public enum _Raw.DataFormat
-
Declaration
public enum _Raw.DataFormat1
-
Declaration
public enum _Raw.DataFormat5
-
Declaration
public enum _Raw.DensityUnit
-
Declaration
public enum _Raw.Direction
-
Declaration
public enum _Raw.Errors
-
Declaration
public enum _Raw.FinalOp
-
Declaration
public enum _Raw.Format
-
Declaration
public enum _Raw.InputMode
-
Declaration
public enum _Raw.InputQuantMode
-
Declaration
public enum _Raw.LossType
-
Declaration
public enum _Raw.MergeOp
-
Declaration
public enum _Raw.Method
-
Declaration
public enum _Raw.Method4
-
Declaration
public enum _Raw.Mode
-
Declaration
public enum _Raw.Mode6
-
Declaration
public enum _Raw.OutputEncoding
-
Declaration
public enum _Raw.Padding
-
Declaration
public enum _Raw.Padding2
-
Declaration
public enum _Raw.Reduction
-
Declaration
public enum _Raw.ReductionType
-
Declaration
public enum _Raw.RnnMode
-
Declaration
public enum _Raw.RoundMode
-
Declaration
public enum _Raw.RoundMode7
-
Declaration
public enum _Raw.SplitType
-
Declaration
public enum _Raw.SplitType2
-
Declaration
public enum _Raw.Unit
-
Declaration
public static func a() -> Tensor<Float>
-
Raise a exception to abort the process when called.
If exit_without_error is true, the process will exit normally, otherwise it will exit with a SIGABORT signal.
Returns nothing but an exception.
- Attr error_msg: A string which is the message associated with the exception.
Declaration
public static func abort( errorMsg: String, exitWithoutError: Bool = false )
-
Computes the absolute value of a tensor.
Given a tensor
x
, this operation returns a tensor containing the absolute value of each element inx
. For example, if x is an input element and y is an output element, this operation computes \(y = |x|\).Declaration
public static func abs<T: TensorFlowNumeric>( _ x: Tensor<T> ) -> Tensor<T>
-
Returns the element-wise sum of a list of tensors.
tf.accumulate_n_v2
performs the same operation astf.add_n
, but does not wait for all of its inputs to be ready before beginning to sum. This can save memory if inputs are ready at different times, since minimum temporary storage is proportional to the output size rather than the inputs size.Unlike the original
accumulate_n
,accumulate_n_v2
is differentiable.Returns a
Tensor
of same shape and type as the elements ofinputs
.Attr shape: Shape of elements of
inputs
.
Declaration
public static func accumulateNV2<T: TensorFlowNumeric>( inputs: [Tensor<T>], shape: TensorShape? ) -> Tensor<T>
Parameters
inputs
A list of
Tensor
objects, each with same shape and type. -
Computes acos of x element-wise.
Declaration
public static func acos<T: TensorFlowNumeric>( _ x: Tensor<T> ) -> Tensor<T>
-
Computes inverse hyperbolic cosine of x element-wise.
Given an input tensor, the function computes inverse hyperbolic cosine of every element. Input range is
[1, inf]
. It returnsnan
if the input lies outside the range.x = tf.constant([-2, -0.5, 1, 1.2, 200, 10000, float("inf")]) tf.math.acosh(x) ==> [nan nan 0. 0.62236255 5.9914584 9.903487 inf]
Declaration
public static func acosh<T: FloatingPoint & TensorFlowScalar>( _ x: Tensor<T> ) -> Tensor<T>
-
Returns x + y element-wise.
NOTE:
Add
supports broadcasting.AddN
does not. More about broadcasting hereDeclaration
public static func add<T: TensorFlowNumeric>( _ x: Tensor<T>, _ y: Tensor<T> ) -> Tensor<T>
-
Returns x + y element-wise.
NOTE:
Add
supports broadcasting.AddN
does not. More about broadcasting hereDeclaration
public static func add( _ x: StringTensor, _ y: StringTensor ) -> StringTensor
-
Add an
N
-minibatchSparseTensor
to aSparseTensorsMap
, returnN
handles.A
SparseTensor
of rankR
is represented by three tensors:sparse_indices
,sparse_values
, andsparse_shape
, wheresparse_indices.shape[1] == sparse_shape.shape[0] == R
An
N
-minibatch ofSparseTensor
objects is represented as aSparseTensor
having a firstsparse_indices
column taking values between[0, N)
, where the minibatch sizeN == sparse_shape[0]
.The input
SparseTensor
must have rankR
greater than 1, and the first dimension is treated as the minibatch dimension. Elements of theSparseTensor
must be sorted in increasing order of this first dimension. The storedSparseTensor
objects pointed to by each row of the outputsparse_handles
will have rankR-1
.The
SparseTensor
values can then be read out as part of a minibatch by passing the given keys as vector elements toTakeManySparseFromTensorsMap
. To ensure the correctSparseTensorsMap
is accessed, ensure that the samecontainer
andshared_name
are passed to that Op. If noshared_name
is provided here, instead use the name of the Operation created by callingAddManySparseToTensorsMap
as theshared_name
passed toTakeManySparseFromTensorsMap
. Ensure the Operations are colocated.Attrs:
- container: The container name for the
SparseTensorsMap
created by this op. - shared_name: The shared name for the
SparseTensorsMap
created by this op. If blank, the new Operation’s unique name is used.
- container: The container name for the
Output sparse_handles: 1-D. The handles of the
SparseTensor
now stored in theSparseTensorsMap
. Shape:[N]
.
Declaration
public static func addManySparseToTensorsMap<T: TensorFlowScalar>( sparseIndices: Tensor<Int64>, sparseValues: Tensor<T>, sparseShape: Tensor<Int64>, container: String, sharedName: String ) -> Tensor<Int64>
Parameters
sparse_indices
2-D. The
indices
of the minibatchSparseTensor
.sparse_indices[:, 0]
must be ordered values in[0, N)
.sparse_values
1-D. The
values
of the minibatchSparseTensor
.sparse_shape
1-D. The
shape
of the minibatchSparseTensor
. The minibatch sizeN == sparse_shape[0]
. -
Add all input tensors element wise.
Inputs must be of same size and shape.
x = [9, 7, 10] tf.math.add_n(x) ==> 26
Declaration
public static func addN<T: TensorFlowNumeric>( inputs: [Tensor<T>] ) -> Tensor<T>
-
Add a
SparseTensor
to aSparseTensorsMap
return its handle.A
SparseTensor
is represented by three tensors:sparse_indices
,sparse_values
, andsparse_shape
.This operator takes the given
SparseTensor
and adds it to a container object (aSparseTensorsMap
). A unique key within this container is generated in the form of anint64
, and this is the value that is returned.The
SparseTensor
can then be read out as part of a minibatch by passing the key as a vector element toTakeManySparseFromTensorsMap
. To ensure the correctSparseTensorsMap
is accessed, ensure that the samecontainer
andshared_name
are passed to that Op. If noshared_name
is provided here, instead use the name of the Operation created by callingAddSparseToTensorsMap
as theshared_name
passed toTakeManySparseFromTensorsMap
. Ensure the Operations are colocated.Attrs:
- container: The container name for the
SparseTensorsMap
created by this op. - shared_name: The shared name for the
SparseTensorsMap
created by this op. If blank, the new Operation’s unique name is used.
- container: The container name for the
Output sparse_handle: 0-D. The handle of the
SparseTensor
now stored in theSparseTensorsMap
.
Declaration
public static func addSparseToTensorsMap<T: TensorFlowScalar>( sparseIndices: Tensor<Int64>, sparseValues: Tensor<T>, sparseShape: Tensor<Int64>, container: String, sharedName: String ) -> Tensor<Int64>
Parameters
sparse_indices
2-D. The
indices
of theSparseTensor
.sparse_values
1-D. The
values
of theSparseTensor
.sparse_shape
1-D. The
shape
of theSparseTensor
. -
Returns x + y element-wise.
NOTE:
Add
supports broadcasting.AddN
does not. More about broadcasting hereDeclaration
public static func addV2<T: TensorFlowNumeric>( _ x: Tensor<T>, _ y: Tensor<T> ) -> Tensor<T>
-
Adjust the contrast of one or more images.
images
is a tensor of at least 3 dimensions. The last 3 dimensions are interpreted as[height, width, channels]
. The other dimensions only represent a collection of images, such as[batch, height, width, channels].
Contrast is adjusted independently for each channel of each image.
For each channel, the Op first computes the mean of the image pixels in the channel and then adjusts each component of each pixel to
(x - mean) * contrast_factor + mean
.Output output: The contrast-adjusted image or images.
Declaration
public static func adjustContrastv2<T: FloatingPoint & TensorFlowScalar>( images: Tensor<T>, contrastFactor: Tensor<Float> ) -> Tensor<T>
Parameters
images
Images to adjust. At least 3-D.
contrast_factor
A float multiplier for adjusting contrast.
-
Adjust the hue of one or more images.
images
is a tensor of at least 3 dimensions. The last dimension is interpretted as channels, and must be three.The input image is considered in the RGB colorspace. Conceptually, the RGB colors are first mapped into HSV. A delta is then applied all the hue values, and then remapped back to RGB colorspace.
Output output: The hue-adjusted image or images.
Declaration
public static func adjustHue<T: FloatingPoint & TensorFlowScalar>( images: Tensor<T>, delta: Tensor<Float> ) -> Tensor<T>
Parameters
images
Images to adjust. At least 3-D.
delta
A float delta to add to the hue.
-
Adjust the saturation of one or more images.
images
is a tensor of at least 3 dimensions. The last dimension is interpretted as channels, and must be three.The input image is considered in the RGB colorspace. Conceptually, the RGB colors are first mapped into HSV. A scale is then applied all the saturation values, and then remapped back to RGB colorspace.
Output output: The hue-adjusted image or images.
Declaration
public static func adjustSaturation<T: FloatingPoint & TensorFlowScalar>( images: Tensor<T>, scale: Tensor<Float> ) -> Tensor<T>
Parameters
images
Images to adjust. At least 3-D.
scale
A float scale to add to the saturation.
-
Computes the “logical and” of elements across dimensions of a tensor.
Reduces
input
along the dimensions given inaxis
. Unlesskeep_dims
is true, the rank of the tensor is reduced by 1 for each entry inaxis
. Ifkeep_dims
is true, the reduced dimensions are retained with length 1.Attr keep_dims: If true, retain reduced dimensions with length 1.
Output output: The reduced tensor.
Declaration
public static func all<Tidx: TensorFlowIndex>( _ input: Tensor<Bool>, reductionIndices: Tensor<Tidx>, keepDims: Bool = false ) -> Tensor<Bool>
Parameters
input
The tensor to reduce.
reduction_indices
The dimensions to reduce. Must be in the range
[-rank(input), rank(input))
. -
Generates labels for candidate sampling with a learned unigram distribution.
See explanations of candidate sampling and the data formats at go/candidate-sampling.
For each batch, this op picks a single set of sampled candidate labels.
The advantages of sampling candidates per-batch are simplicity and the possibility of efficient dense matrix multiplication. The disadvantage is that the sampled candidates must be chosen independently of the context and of the true labels.
Attrs:
- num_true: Number of true labels per context.
- num_sampled: Number of candidates to produce.
- unique: If unique is true, we sample with rejection, so that all sampled candidates in a batch are unique. This requires some approximation to estimate the post-rejection sampling probabilities.
- seed: If either seed or seed2 are set to be non-zero, the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed.
- seed2: An second seed to avoid seed collision.
Outputs:
- sampled_candidates: A vector of length num_sampled, in which each element is the ID of a sampled candidate.
- true_expected_count: A batch_size * num_true matrix, representing the number of times each candidate is expected to occur in a batch of sampled candidates. If unique=true, then this is a probability.
- sampled_expected_count: A vector of length num_sampled, for each sampled candidate representing the number of times the candidate is expected to occur in a batch of sampled candidates. If unique=true, then this is a probability.
Declaration
Parameters
true_classes
A batch_size * num_true matrix, in which each row contains the IDs of the num_true target_classes in the corresponding original label.
-
An Op to exchange data across TPU replicas.
On each replica, the input is split into
split_count
blocks alongsplit_dimension
and send to the other replicas given group_assignment. After receivingsplit_count
- 1 blocks from other replicas, we concatenate the blocks alongconcat_dimension
as the output.For example, suppose there are 2 TPU replicas: replica 0 receives input:
[[A, B]]
replica 1 receives input:[[C, D]]
group_assignment=
[[0, 1]]
concat_dimension=0 split_dimension=1 split_count=2replica 0’s output:
[[A], [C]]
replica 1’s output:[[B], [D]]
Attrs:
- T: The type of elements to be exchanged.
- concat_dimension: The dimension number to concatenate.
- split_dimension: The dimension number to split.
- split_count: The number of splits, this number must equal to the sub-group size(group_assignment.get_shape()[1])
Output output: The exchanged result.
Declaration
public static func allToAll<T: TensorFlowScalar>( _ input: Tensor<T>, groupAssignment: Tensor<Int32>, concatDimension: Int64, splitDimension: Int64, splitCount: Int64 ) -> Tensor<T>
Parameters
input
The local input to the sum.
group_assignment
An int32 tensor with shape [num_groups, num_replicas_per_group].
group_assignment[i]
represents the replica ids in the ith subgroup. -
Returns the argument of a complex number.
Given a tensor
input
of complex numbers, this operation returns a tensor of typefloat
that is the argument of each element ininput
. All elements ininput
must be complex numbers of the form \(a + bj\), where a is the real part and b is the imaginary part.The argument returned by this operation is of the form \(atan2(b, a)\).
For example:
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] tf.angle(input) ==> [2.0132, 1.056]
@compatibility(numpy) Equivalent to np.angle. @end_compatibility
Declaration
public static func angle< T: TensorFlowScalar, Tout: FloatingPoint & TensorFlowScalar >( _ input: Tensor<T> ) -> Tensor<Tout>
-
A container for an iterator resource.
- Output handle: A handle to the iterator that can be passed to a “MakeIterator” or “IteratorGetNext” op. In contrast to Iterator, AnonymousIterator prevents resource sharing by name, and does not keep a reference to the resource container.
Declaration
public static func anonymousIterator( outputTypes: [TensorDataType], outputShapes: [TensorShape?] ) -> ResourceHandle
-
A container for an iterator resource.
- Outputs:
- handle: A handle to the iterator that can be passed to a “MakeIterator” or “IteratorGetNext” op. In contrast to Iterator, AnonymousIterator prevents resource sharing by name, and does not keep a reference to the resource container.
- deleter: A variant deleter that should be passed into the op that deletes the iterator.
Declaration
public static func anonymousIteratorV2( outputTypes: [TensorDataType], outputShapes: [TensorShape?] ) -> (handle: ResourceHandle, deleter: VariantHandle)
- Outputs:
-
Declaration
public static func anonymousMemoryCache() -> (handle: ResourceHandle, deleter: VariantHandle)
-
A container for a multi device iterator resource.
- Outputs:
- handle: A handle to a multi device iterator that can be passed to a “MultiDeviceIteratorGetNextFromShard” op. In contrast to MultiDeviceIterator, AnonymousIterator prevents resource sharing by name, and does not keep a reference to the resource container.
- deleter: A variant deleter that should be passed into the op that deletes the iterator.
Declaration
public static func anonymousMultiDeviceIterator( devices: [String], outputTypes: [TensorDataType], outputShapes: [TensorShape?] ) -> (handle: ResourceHandle, deleter: VariantHandle)
- Outputs:
-
Declaration
public static func anonymousRandomSeedGenerator( seed: Tensor<Int64>, seed2: Tensor<Int64> ) -> (handle: ResourceHandle, deleter: VariantHandle)
-
Computes the “logical or” of elements across dimensions of a tensor.
Reduces
input
along the dimensions given inaxis
. Unlesskeep_dims
is true, the rank of the tensor is reduced by 1 for each entry inaxis
. Ifkeep_dims
is true, the reduced dimensions are retained with length 1.Attr keep_dims: If true, retain reduced dimensions with length 1.
Output output: The reduced tensor.
Declaration
public static func any<Tidx: TensorFlowIndex>( _ input: Tensor<Bool>, reductionIndices: Tensor<Tidx>, keepDims: Bool = false ) -> Tensor<Bool>
Parameters
input
The tensor to reduce.
reduction_indices
The dimensions to reduce. Must be in the range
[-rank(input), rank(input))
. -
Returns the truth value of abs(x-y) < tolerance element-wise.
Declaration
public static func approximateEqual<T: TensorFlowNumeric>( _ x: Tensor<T>, _ y: Tensor<T>, tolerance: Double = 1e-05 ) -> Tensor<Bool>
-
Returns the index with the largest value across dimensions of a tensor.
Note that in case of ties the identity of the return value is not guaranteed.
Usage:
import tensorflow as tf a = [1, 10, 26.9, 2.8, 166.32, 62.3] b = tf.math.argmax(input = a) c = tf.keras.backend.eval(b) # c = 4 # here a[4] = 166.32 which is the largest element of a across axis 0
Declaration
public static func argMax< T: TensorFlowNumeric, Tidx: TensorFlowIndex, OutputType: TensorFlowIndex >( _ input: Tensor<T>, dimension: Tensor<Tidx> ) -> Tensor<OutputType>
Parameters
dimension
int32 or int64, must be in the range
[-rank(input), rank(input))
. Describes which dimension of the input Tensor to reduce across. For vectors, use dimension = 0. -
Returns the index with the smallest value across dimensions of a tensor.
Note that in case of ties the identity of the return value is not guaranteed.
Usage:
import tensorflow as tf a = [1, 10, 26.9, 2.8, 166.32, 62.3] b = tf.math.argmin(input = a) c = tf.keras.backend.eval(b) # c = 0 # here a[0] = 1 which is the smallest element of a across axis 0
Declaration
public static func argMin< T: TensorFlowNumeric, Tidx: TensorFlowIndex, OutputType: TensorFlowIndex >( _ input: Tensor<T>, dimension: Tensor<Tidx> ) -> Tensor<OutputType>
Parameters
dimension
int32 or int64, must be in the range
[-rank(input), rank(input))
. Describes which dimension of the input Tensor to reduce across. For vectors, use dimension = 0. -
Converts each entry in the given tensor to strings.
Supports many numeric types and boolean.
For Unicode, see the https://www.tensorflow.org/tutorials/representation/unicode tutorial.
- Attrs:
- precision: The post-decimal precision to use for floating point numbers. Only used if precision > -1.
- scientific: Use scientific notation for floating point numbers.
- shortest: Use shortest representation (either scientific or standard) for floating point numbers.
- width: Pad pre-decimal numbers to this width. Applies to both floating point and integer numbers. Only used if width > -1.
- fill: The value to pad if width > -1. If empty, pads with spaces. Another typical value is ‘0’. String cannot be longer than 1 character.
Declaration
public static func asString<T: TensorFlowScalar>( _ input: Tensor<T>, precision: Int64 = -1, scientific: Bool = false, shortest: Bool = false, width: Int64 = -1, fill: String ) -> StringTensor
- Attrs:
-
Computes the trignometric inverse sine of x element-wise.
The
tf.math.asin
operation returns the inverse oftf.math.sin
, such that ify = tf.math.sin(x)
then,x = tf.math.asin(y)
.Note: The output of
tf.math.asin
will lie within the invertible range of sine, i.e [-pi/2, pi/2].For example:
# Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] x = tf.constant([1.047, 0.785]) y = tf.math.sin(x) # [0.8659266, 0.7068252] tf.math.asin(y) # [1.047, 0.785] = x
Declaration
public static func asin<T: TensorFlowNumeric>( _ x: Tensor<T> ) -> Tensor<T>
-
Computes inverse hyperbolic sine of x element-wise.
Given an input tensor, this function computes inverse hyperbolic sine for every element in the tensor. Both input and output has a range of
[-inf, inf]
.x = tf.constant([-float("inf"), -2, -0.5, 1, 1.2, 200, 10000, float("inf")]) tf.math.asinh(x) ==> [-inf -1.4436355 -0.4812118 0.8813736 1.0159732 5.991471 9.903487 inf]
Declaration
public static func asinh<T: FloatingPoint & TensorFlowScalar>( _ x: Tensor<T> ) -> Tensor<T>
-
Asserts that the given condition is true.
If
condition
evaluates to false, print the list of tensors indata
.summarize
determines how many entries of the tensors to print.Attr summarize: Print this many entries of each tensor.
Declaration
public static func assert<T: TensorArrayProtocol>( condition: Tensor<Bool>, data: T, summarize: Int64 = 3 )
Parameters
condition
The condition to evaluate.
data
The tensors to print out when condition is false.
-
A transformation that asserts which transformations happen next.
This transformation checks whether the camel-case names (i.e. “FlatMap”, not “flat_map”) of the transformations following this transformation match the list of names in the
transformations
argument. If there is a mismatch, the transformation raises an exception.The check occurs when iterating over the contents of the dataset, which means that the check happens after any static optimizations are applied to the dataset graph.
Declaration
public static func assertNextDataset( inputDataset: VariantHandle, transformations: StringTensor, outputTypes: [TensorDataType], outputShapes: [TensorShape?] ) -> VariantHandle
Parameters
input_dataset
A variant tensor representing the input dataset.
AssertNextDataset
passes through the outputs of its input dataset.transformations
A
tf.string
vectortf.Tensor
identifying the transformations that are expected to happen next. -
Adds a value to the current value of a variable.
Any ReadVariableOp with a control dependency on this op is guaranteed to see the incremented value or a subsequent newer one.
Attr dtype: the dtype of the value.
Declaration
public static func assignAddVariableOp<Dtype: TensorFlowScalar>( resource: ResourceHandle, value: Tensor<Dtype> )
Parameters
resource
handle to the resource in which to store the variable.
value
the value by which the variable will be incremented.
-
Subtracts a value from the current value of a variable.
Any ReadVariableOp with a control dependency on this op is guaranteed to see the decremented value or a subsequent newer one.
Attr dtype: the dtype of the value.
Declaration
public static func assignSubVariableOp<Dtype: TensorFlowScalar>( resource: ResourceHandle, value: Tensor<Dtype> )
Parameters
resource
handle to the resource in which to store the variable.
value
the value by which the variable will be incremented.
-
Assigns a new value to a variable.
Any ReadVariableOp with a control dependency on this op is guaranteed to return this value or a subsequent newer value of the variable.
Attr dtype: the dtype of the value.
Declaration
public static func assignVariableOp<Dtype: TensorFlowScalar>( resource: ResourceHandle, value: Tensor<Dtype> )
Parameters
resource
handle to the resource in which to store the variable.
value
the value to set the new tensor to use.
-
Computes the trignometric inverse tangent of x element-wise.
The
tf.math.atan
operation returns the inverse oftf.math.tan
, such that ify = tf.math.tan(x)
then,x = tf.math.atan(y)
.Note: The output of
tf.math.atan
will lie within the invertible range of tan, i.e (-pi/2, pi/2).For example:
# Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] x = tf.constant([1.047, 0.785]) y = tf.math.tan(x) # [1.731261, 0.99920404] tf.math.atan(y) # [1.047, 0.785] = x
Declaration
public static func atan<T: TensorFlowNumeric>( _ x: Tensor<T> ) -> Tensor<T>
-
Computes arctangent of
y/x
element-wise, respecting signs of the arguments.This is the angle ( \theta \in [-\pi, \pi] ) such that [ x = r \cos(\theta) ] and [ y = r \sin(\theta) ] where (r = \sqrt(x^2 + y^2) ).
Declaration
public static func atan2<T: FloatingPoint & TensorFlowScalar>( _ y: Tensor<T>, _ x: Tensor<T> ) -> Tensor<T>
-
Computes inverse hyperbolic tangent of x element-wise.
Given an input tensor, this function computes inverse hyperbolic tangent for every element in the tensor. Input range is
[-1,1]
and output range is[-inf, inf]
. If input is-1
, output will be-inf
and if the input is1
, output will beinf
. Values outside the range will havenan
as output.x = tf.constant([-float("inf"), -1, -0.5, 1, 0, 0.5, 10, float("inf")]) tf.math.atanh(x) ==> [nan -inf -0.54930615 inf 0. 0.54930615 nan nan]
Declaration
public static func atanh<T: FloatingPoint & TensorFlowScalar>( _ x: Tensor<T> ) -> Tensor<T>
-
Declaration
public static func attr( _ a: Int64 )
-
Declaration
public static func attrBool( _ a: Bool )
-
Declaration
public static func attrBoolList( _ a: [Bool] )
-
Declaration
public static func attrDefault( _ a: String = "banana" )
-
Declaration
public static func attrEmptyListDefault( _ a: [Double] )
-
Declaration
public static func attrEnum( _ a: A )
-
Declaration
public static func attrEnumList( _ a: [String] )
-
Declaration
public static func attrFloat( _ a: Double )
-
Declaration
public static func attrListDefault( _ a: [Int32] = [5, 15] )
-
Declaration
public static func attrListMin( _ a: [Int32] )
-
Declaration
public static func attrListTypeDefault<T: TensorFlowScalar>( _ a: [Tensor<T>], _ b: [Tensor<T>] )
-
Declaration
public static func attrMin( _ a: Int64 )
-
Declaration
public static func attrPartialShape( _ a: TensorShape? )
-
Declaration
public static func attrPartialShapeList( _ a: [TensorShape?] )
-
Declaration
public static func attrShape( _ a: TensorShape? )
-
Declaration
public static func attrShapeList( _ a: [TensorShape?] )
-
Declaration
public static func attrTypeDefault<T: TensorFlowScalar>( _ a: Tensor<T> )
-
audioMicrofrontend(audio:sampleRate:windowSize:windowStep:numChannels:upperBandLimit:lowerBandLimit:smoothingBits:evenSmoothing:oddSmoothing:minSignalRemaining:enablePcan:pcanStrength:pcanOffset:gainBits:enableLog:scaleShift:leftContext:rightContext:frameStride:zeroPadding:outScale:)
Audio Microfrontend Op.
This Op converts a sequence of audio data into one or more feature vectors containing filterbanks of the input. The conversion process uses a lightweight library to perform:
- A slicing window function
- Short-time FFTs
- Filterbank calculations
- Noise reduction
- PCAN Auto Gain Control
- Logarithmic scaling
Arguments audio: 1D Tensor, int16 audio data in temporal ordering. sample_rate: Integer, the sample rate of the audio in Hz. window_size: Integer, length of desired time frames in ms. window_step: Integer, length of step size for the next frame in ms. num_channels: Integer, the number of filterbank channels to use. upper_band_limit: Float, the highest frequency included in the filterbanks. lower_band_limit: Float, the lowest frequency included in the filterbanks. smoothing_bits: Int, scale up signal by 2^(smoothing_bits) before reduction. even_smoothing: Float, smoothing coefficient for even-numbered channels. odd_smoothing: Float, smoothing coefficient for odd-numbered channels. min_signal_remaining: Float, fraction of signal to preserve in smoothing. enable_pcan: Bool, enable PCAN auto gain control. pcan_strength: Float, gain normalization exponent. pcan_offset: Float, positive value added in the normalization denominator. gain_bits: Int, number of fractional bits in the gain. enable_log: Bool, enable logarithmic scaling of filterbanks. scale_shift: Integer, scale filterbanks by 2^(scale_shift). left_context: Integer, number of preceding frames to attach to each frame. right_context: Integer, number of preceding frames to attach to each frame. frame_stride: Integer, M frames to skip over, where output[n] = frame[n*M]. zero_padding: Bool, if left/right context is out-of-bounds, attach frame of zeroes. Otherwise, frame[0] or frame[size-1] will be copied. out_scale: Integer, divide all filterbanks by this number. out_type: DType, type of the output Tensor, defaults to UINT16.
Returns filterbanks: 2D Tensor, each row is a time frame, each column is a channel.
Declaration
public static func audioMicrofrontend<OutType: TensorFlowNumeric>( audio: Tensor<Int16>, sampleRate: Int64 = 16000, windowSize: Int64 = 25, windowStep: Int64 = 10, numChannels: Int64 = 32, upperBandLimit: Double = 7500, lowerBandLimit: Double = 125, smoothingBits: Int64 = 10, evenSmoothing: Double = 0.025, oddSmoothing: Double = 0.06, minSignalRemaining: Double = 0.05, enablePcan: Bool = false, pcanStrength: Double = 0.95, pcanOffset: Double = 80, gainBits: Int64 = 21, enableLog: Bool = true, scaleShift: Int64 = 6, leftContext: Int64 = 0, rightContext: Int64 = 0, frameStride: Int64 = 1, zeroPadding: Bool = false, outScale: Int64 = 1 ) -> Tensor<OutType>
-
Produces a visualization of audio data over time.
Spectrograms are a standard way of representing audio information as a series of slices of frequency information, one slice for each window of time. By joining these together into a sequence, they form a distinctive fingerprint of the sound over time.
This op expects to receive audio data as an input, stored as floats in the range -1 to 1, together with a window width in samples, and a stride specifying how far to move the window between slices. From this it generates a three dimensional output. The first dimension is for the channels in the input, so a stereo audio input would have two here for example. The second dimension is time, with successive frequency slices. The third dimension has an amplitude value for each frequency during that time slice.
This means the layout when converted and saved as an image is rotated 90 degrees clockwise from a typical spectrogram. Time is descending down the Y axis, and the frequency decreases from left to right.
Each value in the result represents the square root of the sum of the real and imaginary parts of an FFT on the current window of samples. In this way, the lowest dimension represents the power of each frequency in the current window, and adjacent windows are concatenated in the next dimension.
To get a more intuitive and visual look at what this operation does, you can run tensorflow/examples/wav_to_spectrogram to read in an audio file and save out the resulting spectrogram as a PNG image.
Attrs:
- window_size: How wide the input window is in samples. For the highest efficiency this should be a power of two, but other values are accepted.
- stride: How widely apart the center of adjacent sample windows should be.
- magnitude_squared: Whether to return the squared magnitude or just the magnitude. Using squared magnitude can avoid extra calculations.
Output spectrogram: 3D representation of the audio frequencies as an image.
Declaration
Parameters
input
Float representation of audio data.
-
Outputs a
Summary
protocol buffer with audio.The summary has up to
max_outputs
summary values containing audio. The audio is built fromtensor
which must be 3-D with shape[batch_size, frames, channels]
or 2-D with shape[batch_size, frames]
. The values are assumed to be in the range of[-1.0, 1.0]
with a sample rate ofsample_rate
.The
tag
argument is a scalarTensor
of typestring
. It is used to build thetag
of the summary values:- If
max_outputs
is 1, the summary value tag is ‘tag/audio’. If
max_outputs
is greater than 1, the summary value tags are generated sequentially as ‘tag/audio/0’, ‘tag/audio/1’, etc.Attrs:
- sample_rate: The sample rate of the signal in hertz.
- max_outputs: Max number of batch elements to generate audio for.
Output summary: Scalar. Serialized
Summary
protocol buffer.
Declaration
public static func audioSummary( tag: StringTensor, _ tensor: Tensor<Float>, sampleRate: Double, maxOutputs: Int64 = 3 ) -> StringTensor
Parameters
tag
Scalar. Used to build the
tag
attribute of the summary values.tensor
2-D of shape
[batch_size, frames]
. - If
-
Outputs a
Summary
protocol buffer with audio.The summary has up to
max_outputs
summary values containing audio. The audio is built fromtensor
which must be 3-D with shape[batch_size, frames, channels]
or 2-D with shape[batch_size, frames]
. The values are assumed to be in the range of[-1.0, 1.0]
with a sample rate ofsample_rate
.The
tag
argument is a scalarTensor
of typestring
. It is used to build thetag
of the summary values:- If
max_outputs
is 1, the summary value tag is ‘tag/audio’. If
max_outputs
is greater than 1, the summary value tags are generated sequentially as ‘tag/audio/0’, ‘tag/audio/1’, etc.Attr max_outputs: Max number of batch elements to generate audio for.
Output summary: Scalar. Serialized
Summary
protocol buffer.
Declaration
public static func audioSummaryV2( tag: StringTensor, _ tensor: Tensor<Float>, sampleRate: Tensor<Float>, maxOutputs: Int64 = 3 ) -> StringTensor
Parameters
tag
Scalar. Used to build the
tag
attribute of the summary values.tensor
2-D of shape
[batch_size, frames]
.sample_rate
The sample rate of the signal in hertz.
- If
-
Creates a dataset that shards the input dataset.
Creates a dataset that shards the input dataset by num_workers, returning a sharded dataset for the index-th worker. This attempts to automatically shard a dataset by examining the Dataset graph and inserting a shard op before the inputs to a reader Dataset (e.g. CSVDataset, TFRecordDataset).
This dataset will throw a NotFound error if we cannot shard the dataset automatically.
Declaration
public static func autoShardDataset( inputDataset: VariantHandle, numWorkers: Tensor<Int64>, index: Tensor<Int64>, autoShardPolicy: Int64 = 0, outputTypes: [TensorDataType], outputShapes: [TensorShape?] ) -> VariantHandle
Parameters
input_dataset
A variant tensor representing the input dataset.
num_workers
A scalar representing the number of workers to distribute this dataset across.
index
A scalar representing the index of the current worker out of num_workers.
-
Performs average pooling on the input.
Each entry in
output
is the mean of the corresponding sizeksize
window invalue
.Attrs:
- ksize: The size of the sliding window for each dimension of
value
. - strides: The stride of the sliding window for each dimension of
value
. - padding: The type of padding algorithm to use.
- data_format: Specify the data format of the input and output data. With the default format “NHWC”, the data is stored in the order of: [batch, in_height, in_width, in_channels]. Alternatively, the format could be “NCHW”, the data storage order of: [batch, in_channels, in_height, in_width].
- ksize: The size of the sliding window for each dimension of
Output output: The average pooled output tensor.
Declaration
public static func avgPool<T: FloatingPoint & TensorFlowScalar>( value: Tensor<T>, ksize: [Int32], strides: [Int32], padding: Padding, dataFormat: DataFormat = .nhwc ) -> Tensor<T>
Parameters
value
4-D with shape
[batch, height, width, channels]
. -
Performs 3D average pooling on the input.
Attrs:
- ksize: 1-D tensor of length 5. The size of the window for each dimension of
the input tensor. Must have
ksize[0] = ksize[4] = 1
. - strides: 1-D tensor of length 5. The stride of the sliding window for each
dimension of
input
. Must havestrides[0] = strides[4] = 1
. - padding: The type of padding algorithm to use.
- data_format: The data format of the input and output data. With the default format “NDHWC”, the data is stored in the order of: [batch, in_depth, in_height, in_width, in_channels]. Alternatively, the format could be “NCDHW”, the data storage order is: [batch, in_channels, in_depth, in_height, in_width].
- ksize: 1-D tensor of length 5. The size of the window for each dimension of
the input tensor. Must have
Output output: The average pooled output tensor.
Declaration
public static func avgPool3D<T: FloatingPoint & TensorFlowScalar>( _ input: Tensor<T>, ksize: [Int32], strides: [Int32], padding: Padding, dataFormat: DataFormat1 = .ndhwc ) -> Tensor<T>
Parameters
input
Shape
[batch, depth, rows, cols, channels]
tensor to pool over. -
Computes gradients of average pooling function.
Attrs:
- ksize: 1-D tensor of length 5. The size of the window for each dimension of
the input tensor. Must have
ksize[0] = ksize[4] = 1
. - strides: 1-D tensor of length 5. The stride of the sliding window for each
dimension of
input
. Must havestrides[0] = strides[4] = 1
. - padding: The type of padding algorithm to use.
- data_format: The data format of the input and output data. With the default format “NDHWC”, the data is stored in the order of: [batch, in_depth, in_height, in_width, in_channels]. Alternatively, the format could be “NCDHW”, the data storage order is: [batch, in_channels, in_depth, in_height, in_width].
- ksize: 1-D tensor of length 5. The size of the window for each dimension of
the input tensor. Must have
Output output: The backprop for input.
Declaration
public static func avgPool3DGrad<T: FloatingPoint & TensorFlowScalar>( origInputShape: Tensor<Int32>, grad: Tensor<T>, ksize: [Int32], strides: [Int32], padding: Padding, dataFormat: DataFormat1 = .ndhwc ) -> Tensor<T>
Parameters
orig_input_shape
The original input dimensions.
grad
Output backprop of shape
[batch, depth, rows, cols, channels]
. -
Computes gradients of the average pooling function.
Attrs:
- ksize: The size of the sliding window for each dimension of the input.
- strides: The stride of the sliding window for each dimension of the input.
- padding: The type of padding algorithm to use.
- data_format: Specify the data format of the input and output data. With the default format “NHWC”, the data is stored in the order of: [batch, in_height, in_width, in_channels]. Alternatively, the format could be “NCHW”, the data storage order of: [batch, in_channels, in_height, in_width].
Output output: 4-D. Gradients w.r.t. the input of
avg_pool
.
Declaration
public static func avgPoolGrad<T: FloatingPoint & TensorFlowScalar>( origInputShape: Tensor<Int32>, grad: Tensor<T>, ksize: [Int32], strides: [Int32], padding: Padding, dataFormat: DataFormat = .nhwc ) -> Tensor<T>
Parameters
orig_input_shape
1-D. Shape of the original input to
avg_pool
.grad
4-D with shape
[batch, height, width, channels]
. Gradients w.r.t. the output ofavg_pool
. -
Declaration
public static func b() -> Tensor<Float>
-
batch(inTensors:numBatchThreads:maxBatchSize:maxEnqueuedBatches:batchTimeoutMicros:allowedBatchSizes:gradTimeoutMicros:container:sharedName:batchingQueue:)
Batches all input tensors nondeterministically.
When many instances of this Op are being run concurrently with the same container/shared_name in the same device, some will output zero-shaped Tensors and others will output Tensors of size up to max_batch_size.
All Tensors in in_tensors are batched together (so, for example, labels and features should be batched with a single instance of this operation.
Each invocation of batch emits an
id
scalar which will be used to identify this particular invocation when doing unbatch or its gradient.Each op which emits a non-empty batch will also emit a non-empty batch_index Tensor, which, is a [K, 3] matrix where each row contains the invocation’s id, start, and length of elements of each set of Tensors present in batched_tensors.
Batched tensors are concatenated along the first dimension, and all tensors in in_tensors must have the first dimension of the same size.
in_tensors: The tensors to be batched. num_batch_threads: Number of scheduling threads for processing batches of work. Determines the number of batches processed in parallel. max_batch_size: Batch sizes will never be bigger than this. batch_timeout_micros: Maximum number of microseconds to wait before outputting an incomplete batch. allowed_batch_sizes: Optional list of allowed batch sizes. If left empty, does nothing. Otherwise, supplies a list of batch sizes, causing the op to pad batches up to one of those sizes. The entries must increase monotonically, and the final entry must equal max_batch_size. grad_timeout_micros: The timeout to use for the gradient. See Unbatch. batched_tensors: Either empty tensors or a batch of concatenated Tensors. batch_index: If out_tensors is non-empty, has information to invert it. container: Controls the scope of sharing of this batch. id: always contains a scalar with a unique ID for this invocation of Batch. shared_name: Concurrently running instances of batch in the same device with the same container and shared_name will batch their elements together. If left empty, the op name will be used as the shared name. T: the types of tensors to be batched.
Declaration
public static func batch<T: TensorArrayProtocol>( inTensors: T, numBatchThreads: Int64, maxBatchSize: Int64, maxEnqueuedBatches: Int64 = 10, batchTimeoutMicros: Int64, allowedBatchSizes: [Int32], gradTimeoutMicros: Int64, container: String, sharedName: String, batchingQueue: String ) -> (batchedTensors: T, batchIndex: Tensor<Int64>, id: Tensor<Int64>)
-
Declaration
public static func batchCholesky<T: FloatingPoint & TensorFlowScalar>( _ input: Tensor<T> ) -> Tensor<T>
-
Declaration
public static func batchCholeskyGrad<T: FloatingPoint & TensorFlowScalar>( l: Tensor<T>, grad: Tensor<T> ) -> Tensor<T>
-
Creates a dataset that batches
batch_size
elements frominput_dataset
.Declaration
public static func batchDataset( inputDataset: VariantHandle, batchSize: Tensor<Int64>, outputTypes: [TensorDataType], outputShapes: [TensorShape?] ) -> VariantHandle
Parameters
batch_size
A scalar representing the number of elements to accumulate in a batch.
-
Creates a dataset that batches
batch_size
elements frominput_dataset
.Declaration
public static func batchDatasetV2( inputDataset: VariantHandle, batchSize: Tensor<Int64>, dropRemainder: Tensor<Bool>, parallelCopy: Bool = false, outputTypes: [TensorDataType], outputShapes: [TensorShape?] ) -> VariantHandle
Parameters
batch_size
A scalar representing the number of elements to accumulate in a batch.
drop_remainder
A scalar representing whether the last batch should be dropped in case its size is smaller than desired.
-
batchFunction(inTensors:capturedTensors:f:numBatchThreads:maxBatchSize:batchTimeoutMicros:maxEnqueuedBatches:allowedBatchSizes:container:sharedName:batchingQueue:)
Batches all the inputs tensors to the computation done by the function.
So, for example, in the following code
# This input will be captured. y = tf.placeholder_with_default(1.0, shape=[]) @tf.Defun(tf.float32) def computation(a): return tf.matmul(a, a) + y b = gen_batch_ops.batch_function( f=computation in_tensors=[a], captured_tensors=computation.captured_inputs, Tout=[o.type for o in computation.definition.signature.output_arg], num_batch_threads=1, max_batch_size=10, batch_timeout_micros=100000, # 100ms allowed_batch_sizes=[3, 10], batching_queue="") If more than one session.run call is simultaneously trying to compute `b` the values of `a` will be gathered, non-deterministically concatenated along the first axis, and only one thread will run the computation. Assumes that all arguments of the function are Tensors which will be batched along their first dimension. Arguments that are captured, are not batched. The session.run call which does the concatenation, will use the values of the captured tensors available to it. Therefore, typical uses of captured tensors should involve values which remain unchanged across session.run calls. Inference is a good example of this. SparseTensor is not supported. The return value of the decorated function must be a Tensor or a list/tuple of Tensors. - Parameters: - in_tensors: The tensors to be batched. - captured_tensors: The tensors which are captured in the function, and don't need to be batched. - Attrs: - num_batch_threads: Number of scheduling threads for processing batches of work. Determines the number of batches processed in parallel. - max_batch_size: Batch sizes will never be bigger than this. - batch_timeout_micros: Maximum number of microseconds to wait before outputting an incomplete batch. - max_enqueued_batches: Maximum number of batches enqueued. Default: 10. - allowed_batch_sizes: Optional list of allowed batch sizes. If left empty, does nothing. Otherwise, supplies a list of batch sizes, causing the op to pad batches up to one of those sizes. The entries must increase monotonically, and the final entry must equal max_batch_size. - container: Controls the scope of sharing of this batch. - shared_name: Concurrently running instances of batch in the same device with the same container and shared_name will batch their elements together. If left empty, the op name will be used as the shared name. - Tin: the types of tensors to be batched. - Tcaptured: the types of the captured tensors. - Tout: the types of the output tensors. - Output out_tensors: The output tensors.
Declaration
public static func batchFunction< FIn: TensorGroup, FOut: TensorGroup, Tin: TensorArrayProtocol, Tcaptured: TensorArrayProtocol, Tout: TensorGroup >( inTensors: Tin, capturedTensors: Tcaptured, f: (FIn) -> FOut, numBatchThreads: Int64, maxBatchSize: Int64, batchTimeoutMicros: Int64, maxEnqueuedBatches: Int64 = 10, allowedBatchSizes: [Int32], container: String, sharedName: String, batchingQueue: String ) -> Tout
-
Multiplies slices of two tensors in batches.
Multiplies all slices of
Tensor
x
andy
(each slice can be viewed as an element of a batch), and arranges the individual results in a single output tensor of the same batch size. Each of the individual slices can optionally be adjointed (to adjoint a matrix means to transpose and conjugate it) before multiplication by setting theadj_x
oradj_y
flag toTrue
, which are by defaultFalse
.The input tensors
x
andy
are 2-D or higher with shape[..., r_x, c_x]
and[..., r_y, c_y]
.The output tensor is 2-D or higher with shape
[..., r_o, c_o]
, where:r_o = c_x if adj_x else r_x c_o = r_y if adj_y else c_y
It is computed as:
output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
Attrs:
- adj_x: If
True
, adjoint the slices ofx
. Defaults toFalse
. - adj_y: If
True
, adjoint the slices ofy
. Defaults toFalse
.
- adj_x: If
Output output: 3-D or higher with shape
[..., r_o, c_o]
Declaration
public static func batchMatMul<T: TensorFlowNumeric>( _ x: Tensor<T>, _ y: Tensor<T>, adjX: Bool = false, adjY: Bool = false ) -> Tensor<T>
Parameters
x
2-D or higher with shape
[..., r_x, c_x]
.y
2-D or higher with shape
[..., r_y, c_y]
. -
Multiplies slices of two tensors in batches.
Multiplies all slices of
Tensor
x
andy
(each slice can be viewed as an element of a batch), and arranges the individual results in a single output tensor of the same batch size. Each of the individual slices can optionally be adjointed (to adjoint a matrix means to transpose and conjugate it) before multiplication by setting theadj_x
oradj_y
flag toTrue
, which are by defaultFalse
.The input tensors
x
andy
are 2-D or higher with shape[..., r_x, c_x]
and[..., r_y, c_y]
.The output tensor is 2-D or higher with shape
[..., r_o, c_o]
, where:r_o = c_x if adj_x else r_x c_o = r_y if adj_y else c_y
It is computed as:
output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
NOTE:
BatchMatMulV2
supports broadcasting in the batch dimensions. More about broadcasting here.Attrs:
- adj_x: If
True
, adjoint the slices ofx
. Defaults toFalse
. - adj_y: If
True
, adjoint the slices ofy
. Defaults toFalse
.
- adj_x: If
Output output: 3-D or higher with shape
[..., r_o, c_o]
Declaration
public static func batchMatMulV2<T: TensorFlowNumeric>( _ x: Tensor<T>, _ y: Tensor<T>, adjX: Bool = false, adjY: Bool = false ) -> Tensor<T>
Parameters
x
2-D or higher with shape
[..., r_x, c_x]
.y
2-D or higher with shape
[..., r_y, c_y]
. -
Declaration
public static func batchMatrixBandPart<T: TensorFlowScalar>( _ input: Tensor<T>, numLower: Tensor<Int64>, numUpper: Tensor<Int64> ) -> Tensor<T>
-
Declaration
public static func batchMatrixDeterminant<T: FloatingPoint & TensorFlowScalar>( _ input: Tensor<T> ) -> Tensor<T>
-
Declaration
public static func batchMatrixDiag<T: TensorFlowScalar>( diagonal: Tensor<T> ) -> Tensor<T>
-
Declaration
public static func batchMatrixDiagPart<T: TensorFlowScalar>( _ input: Tensor<T> ) -> Tensor<T>
-
Declaration
public static func batchMatrixInverse<T: FloatingPoint & TensorFlowScalar>( _ input: Tensor<T>, adjoint: Bool = false ) -> Tensor<T>
-
Declaration
public static func batchMatrixSetDiag<T: TensorFlowScalar>( _ input: Tensor<T>, diagonal: Tensor<T> ) -> Tensor<T>
-
Declaration
public static func batchMatrixSolve<T: FloatingPoint & TensorFlowScalar>( matrix: Tensor<T>, rhs: Tensor<T>, adjoint: Bool = false ) -> Tensor<T>
-
Declaration
public static func batchMatrixSolveLs<T: FloatingPoint & TensorFlowScalar>( matrix: Tensor<T>, rhs: Tensor<T>, l2Regularizer: Tensor<Double>, fast: Bool = true ) -> Tensor<T>
-
Declaration
public static func batchMatrixTriangularSolve<T: FloatingPoint & TensorFlowScalar>( matrix: Tensor<T>, rhs: Tensor<T>, lower: Bool = true, adjoint: Bool = false ) -> Tensor<T>
-
Batch normalization.
This op is deprecated. Prefer
tf.nn.batch_normalization
.Attrs:
- variance_epsilon: A small float number to avoid dividing by 0.
- scale_after_normalization: A bool indicating whether the resulted tensor needs to be multiplied with gamma.
Declaration
Parameters
t
A 4D input Tensor.
m
A 1D mean Tensor with size matching the last dimension of t. This is the first output from tf.nn.moments, or a saved moving average thereof.
v
A 1D variance Tensor with size matching the last dimension of t. This is the second output from tf.nn.moments, or a saved moving average thereof.
beta
A 1D beta Tensor with size matching the last dimension of t. An offset to be added to the normalized tensor.
gamma
A 1D gamma Tensor with size matching the last dimension of t. If “scale_after_normalization” is true, this tensor will be multiplied with the normalized tensor.
-
Gradients for batch normalization.
This op is deprecated. See
tf.nn.batch_normalization
.Attrs:
- variance_epsilon: A small float number to avoid dividing by 0.
- scale_after_normalization: A bool indicating whether the resulted tensor needs to be multiplied with gamma.
Outputs:
- dx: 4D backprop tensor for input.
- dm: 1D backprop tensor for mean.
- dv: 1D backprop tensor for variance.
- db: 1D backprop tensor for beta.
- dg: 1D backprop tensor for gamma.
Declaration
Parameters
t
A 4D input Tensor.
m
A 1D mean Tensor with size matching the last dimension of t. This is the first output from tf.nn.moments, or a saved moving average thereof.
v
A 1D variance Tensor with size matching the last dimension of t. This is the second output from tf.nn.moments, or a saved moving average thereof.
gamma
A 1D gamma Tensor with size matching the last dimension of t. If “scale_after_normalization” is true, this Tensor will be multiplied with the normalized Tensor.
backprop
4D backprop Tensor.
-
Declaration
public static func batchSelfAdjointEig<T: FloatingPoint & TensorFlowScalar>( _ input: Tensor<T> ) -> Tensor<T>
-
Declaration
public static func batchSelfAdjointEigV2<T: FloatingPoint & TensorFlowScalar>( _ input: Tensor<T>, computeV: Bool = true ) -> (e: Tensor<T>, v: Tensor<T>)
-
Declaration
public static func batchSvd<T: FloatingPoint & TensorFlowScalar>( _ input: Tensor<T>, computeUv: Bool = true, fullMatrices: Bool = false ) -> (s: Tensor<T>, u: Tensor<T>, v: Tensor<T>)
-
BatchToSpace for 4-D tensors of type T.
This is a legacy version of the more general BatchToSpaceND.
Rearranges (permutes) data from batch into blocks of spatial data, followed by cropping. This is the reverse transformation of SpaceToBatch. More specifically, this op outputs a copy of the input tensor where values from the
batch
dimension are moved in spatial blocks to theheight
andwidth
dimensions, followed by cropping along theheight
andwidth
dimensions.Output output: 4-D with shape
[batch, height, width, depth]
, where:height = height_pad - crop_top - crop_bottom width = width_pad - crop_left - crop_right
The attr
block_size
must be greater than one. It indicates the block size.Some examples:
(1) For the following input of shape
[4, 1, 1, 1]
and block_size of 2:[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
The output tensor has shape
[1, 2, 2, 1]
and value:x = [[[[1], [2]], [[3], [4]]]]
(2) For the following input of shape
[4, 1, 1, 3]
and block_size of 2:[[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
The output tensor has shape
[1, 2, 2, 3]
and value:x = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]]
(3) For the following input of shape
[4, 2, 2, 1]
and block_size of 2:x = [[[[1], [3]], [[9], [11]]], [[[2], [4]], [[10], [12]]], [[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]]
The output tensor has shape
[1, 4, 4, 1]
and value:x = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]], [[9], [10], [11], [12]], [[13], [14], [15], [16]]]]
(4) For the following input of shape
[8, 1, 2, 1]
and block_size of 2:x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
The output tensor has shape
[2, 2, 4, 1]
and value:x = [[[[1], [3]], [[5], [7]]], [[[2], [4]], [[10], [12]]], [[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]]
Declaration
public static func batchToSpace< T: TensorFlowScalar, Tidx: TensorFlowIndex >( _ input: Tensor<T>, crops: Tensor<Tidx>, blockSize: Int64 ) -> Tensor<T>
Parameters
input
4-D tensor with shape
[batch*block_size*block_size, height_pad/block_size, width_pad/block_size, depth]
. Note that the batch size of the input tensor must be divisible byblock_size * block_size
.crops
2-D tensor of non-negative integers with shape
[2, 2]
. It specifies how many elements to crop from the intermediate result across the spatial dimensions as follows:crops = [[crop_top, crop_bottom], [crop_left, crop_right]]
-
BatchToSpace for N-D tensors of type T.
This operation reshapes the “batch” dimension 0 into
M + 1
dimensions of shapeblock_shape + [batch]
, interleaves these blocks back into the grid defined by the spatial dimensions[1, ..., M]
, to obtain a result with the same rank as the input. The spatial dimensions of this intermediate result are then optionally cropped according tocrops
to produce the output. This is the reverse of SpaceToBatch. See below for a precise description.Declaration
public static func batchToSpaceND< T: TensorFlowScalar, TblockShape: TensorFlowIndex, Tcrops: TensorFlowIndex >( _ input: Tensor<T>, blockShape: Tensor<TblockShape>, crops: Tensor<Tcrops> ) -> Tensor<T>
Parameters
input
N-D with shape
input_shape = [batch] + spatial_shape + remaining_shape
, where spatial_shape has M dimensions.block_shape
1-D with shape
[M]
, all values must be >= 1.crops
2-D with shape
[M, 2]
, all values must be >= 0.crops[i] = [crop_start, crop_end]
specifies the amount to crop from input dimensioni + 1
, which corresponds to spatial dimensioni
. It is required thatcrop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]
.This operation is equivalent to the following steps:
-
Computes the Bessel i0e function of
x
element-wise.Exponentially scaled modified Bessel function of order 0 defined as
bessel_i0e(x) = exp(-abs(x)) bessel_i0(x)
.This function is faster and numerically stabler than
bessel_i0(x)
.Declaration
public static func besselI0e<T: FloatingPoint & TensorFlowScalar>( _ x: Tensor<T> ) -> Tensor<T>
-
Computes the Bessel i1e function of
x
element-wise.Exponentially scaled modified Bessel function of order 0 defined as
bessel_i1e(x) = exp(-abs(x)) bessel_i1(x)
.This function is faster and numerically stabler than
bessel_i1(x)
.Declaration
public static func besselI1e<T: FloatingPoint & TensorFlowScalar>( _ x: Tensor<T> ) -> Tensor<T>
-
Compute the regularized incomplete beta integral \(I_x(a, b)\).
The regularized incomplete beta integral is defined as:
\(I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}\)
where
\(B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt\)
is the incomplete beta function and \(B(a, b)\) is the complete beta function.
Declaration
public static func betainc<T: FloatingPoint & TensorFlowScalar>( _ a: Tensor<T>, _ b: Tensor<T>, _ x: Tensor<T> ) -> Tensor<T>
-
Adds
bias
tovalue
.This is a special case of
tf.add
wherebias
is restricted to be 1-D. Broadcasting is supported, sovalue
may have any number of dimensions.Attr data_format: Specify the data format of the input and output data. With the default format “NHWC”, the bias tensor will be added to the last dimension of the value tensor. Alternatively, the format could be “NCHW”, the data storage order of: [batch, in_channels, in_height, in_width]. The tensor will be added to “in_channels”, the third-to-the-last dimension.
Output output: Broadcasted sum of
value
andbias
.
Declaration
public static func biasAdd<T: TensorFlowNumeric>( value: Tensor<T>, bias: Tensor<T>, dataFormat: DataFormat = .nhwc ) -> Tensor<T>
Parameters
value
Any number of dimensions.
bias
1-D with size the last dimension of
value
. -
The backward operation for “BiasAdd” on the “bias” tensor.
It accumulates all the values from out_backprop into the feature dimension. For NHWC data format, the feature dimension is the last. For NCHW data format, the feature dimension is the third-to-last.
Attr data_format: Specify the data format of the input and output data. With the default format “NHWC”, the bias tensor will be added to the last dimension of the value tensor. Alternatively, the format could be “NCHW”, the data storage order of: [batch, in_channels, in_height, in_width]. The tensor will be added to “in_channels”, the third-to-the-last dimension.
Output output: 1-D with size the feature dimension of
out_backprop
.
Declaration
public static func biasAddGrad<T: TensorFlowNumeric>( outBackprop: Tensor<T>, dataFormat: DataFormat = .nhwc ) -> Tensor<T>
Parameters
out_backprop
Any number of dimensions.
-
Adds
bias
tovalue
.This is a deprecated version of BiasAdd and will be soon removed.
This is a special case of
tf.add
wherebias
is restricted to be 1-D. Broadcasting is supported, sovalue
may have any number of dimensions.Output output: Broadcasted sum of
value
andbias
.
Declaration
public static func biasAddV1<T: TensorFlowNumeric>( value: Tensor<T>, bias: Tensor<T> ) -> Tensor<T>
Parameters
value
Any number of dimensions.
bias
1-D with size the last dimension of
value
. -
Declaration
public static func binary<T: TensorFlowScalar>( _ a: Tensor<T>, _ b: Tensor<T> ) -> Tensor<T>
-
Counts the number of occurrences of each value in an integer array.
Outputs a vector with length
size
and the same dtype asweights
. Ifweights
are empty, then indexi
stores the number of times the valuei
is counted inarr
. Ifweights
are non-empty, then indexi
stores the sum of the value inweights
at each index where the corresponding value inarr
isi
.Values in
arr
outside of the range [0, size) are ignored.Output bins: 1D
Tensor
with length equal tosize
. The counts or summed weights for each value in the range [0, size).
Declaration
public static func bincount<T: TensorFlowNumeric>( arr: Tensor<Int32>, size: Tensor<Int32>, weights: Tensor<T> ) -> Tensor<T>
-
Bitcasts a tensor from one type to another without copying data.
Given a tensor
input
, this operation returns a tensor that has the same buffer data asinput
with datatypetype
.If the input datatype
T
is larger than the output datatypetype
then the shape changes from […] to […, sizeof(T
)/sizeof(type
)].If
T
is smaller thantype
, the operator requires that the rightmost dimension be equal to sizeof(type
)/sizeof(T
). The shape then goes from […, sizeof(type
)/sizeof(T
)] to […].tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype (e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast() gives module error. For example,
Example 1:
a = [1., 2., 3.] equality_bitcast = tf.bitcast(a, tf.complex128) Traceback (most recent call last): … InvalidArgumentError: Cannot bitcast from 1 to 18 [Op:Bitcast] equality_cast = tf.cast(a, tf.complex128) print(equality_cast) tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128)
Example 2:
tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8)
Example 3:
x = [1., 2., 3.] y = [0., 2., 3.] equality= tf.equal(x,y) equality_cast = tf.cast(equality,tf.float32) equality_bitcast = tf.bitcast(equality_cast,tf.uint8) print(equality) tf.Tensor([False True True], shape=(3,), dtype=bool) print(equality_cast) tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32) print(equality_bitcast) tf.Tensor( [[ 0 0 0 0] [ 0 0 128 63] [ 0 0 128 63]], shape=(3, 4), dtype=uint8)
NOTE: Bitcast is implemented as a low-level cast, so machines with different endian orderings will give different results.
Declaration
public static func bitcast< T: TensorFlowNumeric, Type: TensorFlowNumeric >( _ input: Tensor<T> ) -> Tensor<Type>
-
Elementwise computes the bitwise AND of
x
andy
.The result will have those bits set, that are set in both
x
andy
. The computation is performed on the underlying representations ofx
andy
.For example:
import tensorflow as tf from tensorflow.python.ops import bitwise_ops dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, tf.uint8, tf.uint16, tf.uint32, tf.uint64] for dtype in dtype_list: lhs = tf.constant([0, 5, 3, 14], dtype=dtype) rhs = tf.constant([5, 0, 7, 11], dtype=dtype) exp = tf.constant([0, 0, 3, 10], dtype=tf.float32) res = bitwise_ops.bitwise_and(lhs, rhs) tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE
Declaration
public static func bitwiseAnd<T: TensorFlowInteger>( _ x: Tensor<T>, _ y: Tensor<T> ) -> Tensor<T>
-
Elementwise computes the bitwise OR of
x
andy
.The result will have those bits set, that are set in
x
,y
or both. The computation is performed on the underlying representations ofx
andy
.For example:
import tensorflow as tf from tensorflow.python.ops import bitwise_ops dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, tf.uint8, tf.uint16, tf.uint32, tf.uint64] for dtype in dtype_list: lhs = tf.constant([0, 5, 3, 14], dtype=dtype) rhs = tf.constant([5, 0, 7, 11], dtype=dtype) exp = tf.constant([5, 5, 7, 15], dtype=tf.float32) res = bitwise_ops.bitwise_or(lhs, rhs) tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE
Declaration
public static func bitwiseOr<T: TensorFlowInteger>( _ x: Tensor<T>, _ y: Tensor<T> ) -> Tensor<T>
-
Elementwise computes the bitwise XOR of
x
andy
.The result will have those bits set, that are different in
x
andy
. The computation is performed on the underlying representations ofx
andy
.For example:
import tensorflow as tf from tensorflow.python.ops import bitwise_ops dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, tf.uint8, tf.uint16, tf.uint32, tf.uint64] for dtype in dtype_list: lhs = tf.constant([0, 5, 3, 14], dtype=dtype) rhs = tf.constant([5, 0, 7, 11], dtype=dtype) exp = tf.constant([5, 5, 4, 5], dtype=tf.float32) res = bitwise_ops.bitwise_xor(lhs, rhs) tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE
Declaration
public static func bitwiseXor<T: TensorFlowInteger>( _ x: Tensor<T>, _ y: Tensor<T> ) -> Tensor<T>
-
Computes the LSTM cell forward propagation for all the time steps.
This is equivalent to applying LSTMBlockCell in a loop, like so:
for x1 in unpack(x): i1, cs1, f1, o1, ci1, co1, h1 = LSTMBlock( x1, cs_prev, h_prev, w, wci, wcf, wco, b) cs_prev = cs1 h_prev = h1 i.append(i1) cs.append(cs1) f.append(f1) o.append(o1) ci.append(ci1) co.append(co1) h.append(h1) return pack(i), pack(cs), pack(f), pack(o), pack(ci), pack(ch), pack(h)
Attrs:
- forget_bias: The forget gate bias.
- cell_clip: Value to clip the ‘cs’ value to.
- use_peephole: Whether to use peephole weights.
Outputs:
- i: The input gate over the whole time sequence.
- cs: The cell state before the tanh over the whole time sequence.
- f: The forget gate over the whole time sequence.
- o: The output gate over the whole time sequence.
- ci: The cell input over the whole time sequence.
- co: The cell after the tanh over the whole time sequence.
- h: The output h vector over the whole time sequence.
Declaration
public static func blockLSTM<T: FloatingPoint & TensorFlowScalar>( seqLenMax: Tensor<Int64>, _ x: Tensor<T>, csPrev: Tensor<T>, hPrev: Tensor<T>, w: Tensor<T>, wci: Tensor<T>, wcf: Tensor<T>, wco: Tensor<T>, _ b: Tensor<T>, forgetBias: Double = 1, cellClip: Double = 3, usePeephole: Bool = false ) -> ( i: Tensor<T>, cs: Tensor<T>, f: Tensor<T>, o: Tensor<T>, ci: Tensor<T>, co: Tensor<T>, h: Tensor<T> )
Parameters
seq_len_max
Maximum time length actually used by this input. Outputs are padded with zeros beyond this length.
x
The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).
cs_prev
Value of the initial cell state.
h_prev
Initial output of cell (to be used for peephole).
w
The weight matrix.
wci
The weight matrix for input gate peephole connection.
wcf
The weight matrix for forget gate peephole connection.
wco
The weight matrix for output gate peephole connection.
b
The bias vector.
-
Computes the LSTM cell backward propagation for the entire time sequence.
This implementation is to be used in conjunction of LSTMBlock.
Attr use_peephole: Whether to use peephole weights.
Outputs:
- x_grad: The gradient of x to be back-propped.
- cs_prev_grad: The gradient of cs_prev to be back-propped.
- h_prev_grad: The gradient of h_prev to be back-propped.
- w_grad: The gradient for w to be back-propped.
- wci_grad: The gradient for wci to be back-propped.
- wcf_grad: The gradient for wcf to be back-propped.
- wco_grad: The gradient for wco to be back-propped.
- b_grad: The gradient for w to be back-propped.
Declaration
public static func blockLSTMGrad<T: FloatingPoint & TensorFlowScalar>( seqLenMax: Tensor<Int64>, _ x: Tensor<T>, csPrev: Tensor<T>, hPrev: Tensor<T>, w: Tensor<T>, wci: Tensor<T>, wcf: Tensor<T>, wco: Tensor<T>, _ b: Tensor<T>, i: Tensor<T>, cs: Tensor<T>, f: Tensor<T>, o: Tensor<T>, ci: Tensor<T>, co: Tensor<T>, h: Tensor<T>, csGrad: Tensor<T>, hGrad: Tensor<T>, usePeephole: Bool ) -> ( xGrad: Tensor<T>, csPrevGrad: Tensor<T>, hPrevGrad: Tensor<T>, wGrad: Tensor<T>, wciGrad: Tensor<T>, wcfGrad: Tensor<T>, wcoGrad: Tensor<T>, bGrad: Tensor<T> )
Parameters
seq_len_max
Maximum time length actually used by this input. Outputs are padded with zeros beyond this length.
x
The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).
cs_prev
Value of the initial cell state.
h_prev
Initial output of cell (to be used for peephole).
w
The weight matrix.
wci
The weight matrix for input gate peephole connection.
wcf
The weight matrix for forget gate peephole connection.
wco
The weight matrix for output gate peephole connection.
b
The bias vector.
i
The input gate over the whole time sequence.
cs
The cell state before the tanh over the whole time sequence.
f
The forget gate over the whole time sequence.
o
The output gate over the whole time sequence.
ci
The cell input over the whole time sequence.
co
The cell after the tanh over the whole time sequence.
h
The output h vector over the whole time sequence.
cs_grad
The current gradient of cs.
h_grad
The gradient of h vector.
-
blockLSTMGradV2(seqLenMax:_:csPrev:hPrev:w:wci:wcf:wco:_:i:cs:f:o:ci:co:h:csGrad:hGrad:usePeephole:)
Computes the LSTM cell backward propagation for the entire time sequence.
This implementation is to be used in conjunction of BlockLSTMV2.
Attr use_peephole: Whether to use peephole weights.
Outputs:
- x_grad: The gradient of x to be back-propped.
- cs_prev_grad: The gradient of cs_prev to be back-propped.
- h_prev_grad: The gradient of h_prev to be back-propped.
- w_grad: The gradient for w to be back-propped.
- wci_grad: The gradient for wci to be back-propped.
- wcf_grad: The gradient for wcf to be back-propped.
- wco_grad: The gradient for wco to be back-propped.
- b_grad: The gradient for w to be back-propped.
Declaration
public static func blockLSTMGradV2<T: FloatingPoint & TensorFlowScalar>( seqLenMax: Tensor<Int64>, _ x: Tensor<T>, csPrev: Tensor<T>, hPrev: Tensor<T>, w: Tensor<T>, wci: Tensor<T>, wcf: Tensor<T>, wco: Tensor<T>, _ b: Tensor<T>, i: Tensor<T>, cs: Tensor<T>, f: Tensor<T>, o: Tensor<T>, ci: Tensor<T>, co: Tensor<T>, h: Tensor<T>, csGrad: Tensor<T>, hGrad: Tensor<T>, usePeephole: Bool ) -> ( xGrad: Tensor<T>, csPrevGrad: Tensor<T>, hPrevGrad: Tensor<T>, wGrad: Tensor<T>, wciGrad: Tensor<T>, wcfGrad: Tensor<T>, wcoGrad: Tensor<T>, bGrad: Tensor<T> )
Parameters
seq_len_max
Maximum time length actually used by this input. Outputs are padded with zeros beyond this length.
x
The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).
cs_prev
Value of the initial cell state.
h_prev
Initial output of cell (to be used for peephole).
w
The weight matrix.
wci
The weight matrix for input gate peephole connection.
wcf
The weight matrix for forget gate peephole connection.
wco
The weight matrix for output gate peephole connection.
b
The bias vector.
i
The input gate over the whole time sequence.
cs
The cell state before the tanh over the whole time sequence.
f
The forget gate over the whole time sequence.
o
The output gate over the whole time sequence.
ci
The cell input over the whole time sequence.
co
The cell after the tanh over the whole time sequence.
h
The output h vector over the whole time sequence.
cs_grad
The current gradient of cs.
h_grad
The gradient of h vector.
-
Computes the LSTM cell forward propagation for all the time steps.
This is equivalent to applying LSTMBlockCell in a loop, like so:
for x1 in unpack(x): i1, cs1, f1, o1, ci1, co1, h1 = LSTMBlock( x1, cs_prev, h_prev, w, wci, wcf, wco, b) cs_prev = cs1 h_prev = h1 i.append(i1) cs.append(cs1) f.append(f1) o.append(o1) ci.append(ci1) co.append(co1) h.append(h1) return pack(i), pack(cs), pack(f), pack(o), pack(ci), pack(ch), pack(h) Note that unlike LSTMBlockCell (and BlockLSTM) which uses ICFO gate layout, this op uses IFCO. So in order for the following snippet to be equivalent all gate-related outputs should be reordered.
Attrs:
- cell_clip: Value to clip the ‘cs’ value to.
- use_peephole: Whether to use peephole weights.
Outputs:
- i: The input gate over the whole time sequence.
- cs: The cell state before the tanh over the whole time sequence.
- f: The forget gate over the whole time sequence.
- o: The output gate over the whole time sequence.
- ci: The cell input over the whole time sequence.
- co: The cell after the tanh over the whole time sequence.
- h: The output h vector over the whole time sequence.
Declaration
public static func blockLSTMV2<T: FloatingPoint & TensorFlowScalar>( seqLenMax: Tensor<Int64>, _ x: Tensor<T>, csPrev: Tensor<T>, hPrev: Tensor<T>, w: Tensor<T>, wci: Tensor<T>, wcf: Tensor<T>, wco: Tensor<T>, _ b: Tensor<T>, cellClip: Double = 0, usePeephole: Bool = false ) -> ( i: Tensor<T>, cs: Tensor<T>, f: Tensor<T>, o: Tensor<T>, ci: Tensor<T>, co: Tensor<T>, h: Tensor<T> )
Parameters
seq_len_max
Maximum time length actually used by this input. Outputs are padded with zeros beyond this length.
x
The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).
cs_prev
Value of the initial cell state.
h_prev
Initial output of cell (to be used for peephole).
w
The weight matrix.
wci
The weight matrix for input gate peephole connection.
wcf
The weight matrix for forget gate peephole connection.
wco
The weight matrix for output gate peephole connection.
b
The bias vector.
-
Aggregates the summary of accumulated stats for the batch.
The summary stats contains gradients and hessians accumulated for each node, feature dimension id and bucket.
Attrs:
- max_splits: int; the maximum number of splits possible in the whole tree.
- num_buckets: int; equals to the maximum possible value of bucketized feature.
Output stats_summary: output Rank 4 Tensor (shape=[splits, feature_dimension, buckets, logits_dimension + hessian_dimension]) containing accumulated stats for each node, feature dimension and bucket.
Declaration
Parameters
node_ids
int32; Rank 1 Tensor containing node ids for each example, shape [batch_size].
gradients
float32; Rank 2 Tensor (shape=[batch_size, logits_dimension]) with gradients for each example.
hessians
float32; Rank 2 Tensor (shape=[batch_size, hessian_dimension]) with hessians for each example.
feature
int32; Rank 2 feature Tensors (shape=[batch_size, feature_dimension]).
-
Bucketize each feature based on bucket boundaries.
An op that returns a list of float tensors, where each tensor represents the bucketized values for a single feature.
Attr num_features: inferred int; number of features.
Output buckets: int; List of Rank 1 Tensors each containing the bucketized values for a single feature.
Declaration
Parameters
float_values
float; List of Rank 1 Tensor each containing float values for a single feature.
bucket_boundaries
float; List of Rank 1 Tensors each containing the bucket boundaries for a single feature.
-
boostedTreesCalculateBestFeatureSplit(nodeIdRange:statsSummary:l1:l2:treeComplexity:minNodeWeight:logitsDimension:splitType:)
Calculates gains for each feature and returns the best possible split information for the feature.
The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.
It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return
node_ids_list
for each feature, containing the list of nodes that this feature can be used to split.In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).
The output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature.
Attrs:
- logits_dimension: The dimension of logit, i.e., number of classes.
- split_type: A string indicating if this Op should perform inequality split or equality split.
Outputs:
- node_ids: A Rank 1 tensors indicating possible split node ids for each feature. The length of the list is num_features, but each tensor has different size as each feature provides different possible nodes. See above for details like shapes and sizes.
- gains: A Rank 1 tensors indicating the best gains for each feature to split for certain nodes. See above for details like shapes and sizes.
- feature_dimensions: A Rank 1 tensors indicating the best feature dimension for each feature to split for certain nodes if the feature is multi-dimension. See above for details like shapes and sizes.
- thresholds: A Rank 1 tensors indicating the bucket id to compare with (as a threshold) for split in each node. See above for details like shapes and sizes.
- left_node_contribs: A Rank 2 tensors indicating the contribution of the left nodes when branching from parent nodes (given by the tensor element in the output node_ids_list) to the left direction by the given threshold for each feature. This value will be used to make the left node value by adding to the parent node value. Second dimension size is 1 for 1-dimensional logits, but would be larger for multi-class problems. See above for details like shapes and sizes.
- right_node_contribs: A Rank 2 tensors, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node.
- split_with_default_directions: A Rank 1 tensors indicating the which direction to go if data is missing. See above for details like shapes and sizes. Inequality with default left returns 0, inequality with default right returns 1, equality with default right returns 2.
Declaration
public static func boostedTreesCalculateBestFeatureSplit( nodeIdRange: Tensor<Int32>, statsSummary: Tensor<Float>, l1: Tensor<Float>, l2: Tensor<Float>, treeComplexity: Tensor<Float>, minNodeWeight: Tensor<Float>, logitsDimension: Int64, splitType: SplitType = .inequality ) -> ( nodeIds: Tensor<Int32>, gains: Tensor<Float>, featureDimensions: Tensor<Int32>, thresholds: Tensor<Int32>, leftNodeContribs: Tensor<Float>, rightNodeContribs: Tensor<Float>, splitWithDefaultDirections: StringTensor )
Parameters
node_id_range
A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within
stats_summary_list
. The nodes are iterated between the two nodes specified by the tensor, as likefor node_id in range(node_id_range[0], node_id_range[1])
(Note that the last index node_id_range[1] is exclusive).stats_summary
A Rank 4 tensor (#shape=[max_splits, feature_dims, bucket, stats_dims]) for accumulated stats summary (gradient/hessian) per node, per dimension, per buckets for each feature. The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used.
l1
l1 regularization factor on leaf weights, per instance based.
l2
l2 regularization factor on leaf weights, per instance based.
tree_complexity
adjustment to the gain, per leaf based.
min_node_weight
mininum avg of hessians in a node before required for the node to be considered for splitting.
-
boostedTreesCalculateBestGainsPerFeature(nodeIdRange:statsSummaryList:l1:l2:treeComplexity:minNodeWeight:maxSplits:)
Calculates gains for each feature and returns the best possible split information for the feature.
The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.
It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return
node_ids_list
for each feature, containing the list of nodes that this feature can be used to split.In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).
The length of output lists are all of the same length,
num_features
. The output shapes are compatible in a way that the first dimension of all tensors of all lists are the same and equal to the number of possible split nodes for each feature.Attrs:
- max_splits: the number of nodes that can be split in the whole tree. Used as a dimension of output tensors.
- num_features: inferred from the size of
stats_summary_list
; the number of total features.
Outputs:
- node_ids_list: An output list of Rank 1 tensors indicating possible split node ids for each feature. The length of the list is num_features, but each tensor has different size as each feature provides different possible nodes. See above for details like shapes and sizes.
- gains_list: An output list of Rank 1 tensors indicating the best gains for each feature to split for certain nodes. See above for details like shapes and sizes.
- thresholds_list: An output list of Rank 1 tensors indicating the bucket id to compare with (as a threshold) for split in each node. See above for details like shapes and sizes.
- left_node_contribs_list: A list of Rank 2 tensors indicating the contribution of the left nodes when branching from parent nodes (given by the tensor element in the output node_ids_list) to the left direction by the given threshold for each feature. This value will be used to make the left node value by adding to the parent node value. Second dimension size is 1 for 1-dimensional logits, but would be larger for multi-class problems. See above for details like shapes and sizes.
- right_node_contribs_list: A list of Rank 2 tensors, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node.
Declaration
public static func boostedTreesCalculateBestGainsPerFeature( nodeIdRange: Tensor<Int32>, statsSummaryList: [Tensor<Float>], l1: Tensor<Float>, l2: Tensor<Float>, treeComplexity: Tensor<Float>, minNodeWeight: Tensor<Float>, maxSplits: Int64 ) -> ( nodeIdsList: [Tensor<Int32>], gainsList: [Tensor<Float>], thresholdsList: [Tensor<Int32>], leftNodeContribsList: [Tensor<Float>], rightNodeContribsList: [Tensor<Float>] )
Parameters
node_id_range
A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within
stats_summary_list
. The nodes are iterated between the two nodes specified by the tensor, as likefor node_id in range(node_id_range[0], node_id_range[1])
(Note that the last index node_id_range[1] is exclusive).stats_summary_list
A list of Rank 3 tensor (#shape=[max_splits, bucket, 2]) for accumulated stats summary (gradient/hessian) per node per buckets for each feature. The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used.
l1
l1 regularization factor on leaf weights, per instance based.
l2
l2 regularization factor on leaf weights, per instance based.
tree_complexity
adjustment to the gain, per leaf based.
min_node_weight
mininum avg of hessians in a node before required for the node to be considered for splitting.
-
Calculates the prior from the training data (the bias) and fills in the first node with the logits’ prior. Returns a boolean indicating whether to continue centering.
Output continue_centering: Bool, whether to continue bias centering.
Declaration
Parameters
tree_ensemble_handle
Handle to the tree ensemble.
mean_gradients
A tensor with shape=[logits_dimension] with mean of gradients for a first node.
mean_hessians
A tensor with shape=[logits_dimension] mean of hessians for a first node.
l1
l1 regularization factor on leaf weights, per instance based.
l2
l2 regularization factor on leaf weights, per instance based.
-
Creates a tree ensemble model and returns a handle to it.
Declaration
public static func boostedTreesCreateEnsemble( treeEnsembleHandle: ResourceHandle, stampToken: Tensor<Int64>, treeEnsembleSerialized: StringTensor )
Parameters
tree_ensemble_handle
Handle to the tree ensemble resource to be created.
stamp_token
Token to use as the initial value of the resource stamp.
tree_ensemble_serialized
Serialized proto of the tree ensemble.
-
boostedTreesCreateQuantileStreamResource(quantileStreamResourceHandle:epsilon:numStreams:maxElements:)
Create the Resource for Quantile Streams.
Attr max_elements: int; The maximum number of data points that can be fed to the stream.
Declaration
public static func boostedTreesCreateQuantileStreamResource( quantileStreamResourceHandle: ResourceHandle, epsilon: Tensor<Float>, numStreams: Tensor<Int64>, maxElements: Int64 = 1_099_511_627_776 )
Parameters
quantile_stream_resource_handle
resource; Handle to quantile stream resource.
epsilon
float; The required approximation error of the stream resource.
num_streams
int; The number of streams managed by the resource that shares the same epsilon.
-
Deserializes a serialized tree ensemble config and replaces current tree
ensemble.
Declaration
public static func boostedTreesDeserializeEnsemble( treeEnsembleHandle: ResourceHandle, stampToken: Tensor<Int64>, treeEnsembleSerialized: StringTensor )
Parameters
tree_ensemble_handle
Handle to the tree ensemble.
stamp_token
Token to use as the new value of the resource stamp.
tree_ensemble_serialized
Serialized proto of the ensemble.
-
Creates a handle to a BoostedTreesEnsembleResource
Declaration
public static func boostedTreesEnsembleResourceHandleOp( container: String, sharedName: String ) -> ResourceHandle
-
Debugging/model interpretability outputs for each example.
It traverses all the trees and computes debug metrics for individual examples, such as getting split feature ids and logits after each split along the decision path used to compute directional feature contributions.
Attrs:
- num_bucketized_features: Inferred.
- logits_dimension: scalar, dimension of the logits, to be used for constructing the protos in examples_debug_outputs_serialized.
Output examples_debug_outputs_serialized: Output rank 1 Tensor containing a proto serialized as a string for each example.
Declaration
public static func boostedTreesExampleDebugOutputs( treeEnsembleHandle: ResourceHandle, bucketizedFeatures: [Tensor<Int32>], logitsDimension: Int64 ) -> StringTensor
Parameters
bucketized_features
A list of rank 1 Tensors containing bucket id for each feature.
-
Flush the quantile summaries from each quantile stream resource.
An op that outputs a list of quantile summaries of a quantile stream resource. Each summary Tensor is rank 2, containing summaries (value, weight, min_rank, max_rank) for a single feature.
Declaration
public static func boostedTreesFlushQuantileSummaries( quantileStreamResourceHandle: ResourceHandle, numFeatures: Int64 ) -> [Tensor<Float>]
Parameters
quantile_stream_resource_handle
resource handle referring to a QuantileStreamResource.
-
Retrieves the tree ensemble resource stamp token, number of trees and growing statistics.
Outputs:
- stamp_token: Stamp token of the tree ensemble resource.
- num_trees: The number of trees in the tree ensemble resource.
- num_finalized_trees: The number of trees that were finished successfully.
- num_attempted_layers: The number of layers we attempted to build (but not necessarily succeeded).
- last_layer_nodes_range: Rank size 2 tensor that contains start and end ids of the nodes in the latest layer.
Declaration
Parameters
tree_ensemble_handle
Handle to the tree ensemble.
-
Makes the summary of quantiles for the batch.
An op that takes a list of tensors (one tensor per feature) and outputs the quantile summaries for each tensor.
Attr num_features: int; Inferred from the size of float_values. The number of float features.
Output summaries: float; List of Rank 2 Tensors each containing the quantile summary (value, weight, min_rank, max_rank) of a single feature.
Declaration
Parameters
float_values
float; List of Rank 1 Tensors each containing values for a single feature.
example_weights
float; Rank 1 Tensor with weights per instance.
epsilon
float; The required maximum approximation error.
-
boostedTreesMakeStatsSummary(nodeIds:gradients:hessians:bucketizedFeaturesList:maxSplits:numBuckets:)
Makes the summary of accumulated stats for the batch.
The summary stats contains gradients and hessians accumulated into the corresponding node and bucket for each example.
Attrs:
- max_splits: int; the maximum number of splits possible in the whole tree.
- num_buckets: int; equals to the maximum possible value of bucketized feature.
- num_features: int; inferred from the size of bucketized_features_list; the number of features.
Output stats_summary: output Rank 4 Tensor (shape=[#features, #splits, #buckets, 2]) containing accumulated stats put into the corresponding node and bucket. The first index of 4th dimension refers to gradients, and the second to hessians.
Declaration
Parameters
node_ids
int32 Rank 1 Tensor containing node ids, which each example falls into for the requested layer.
gradients
float32; Rank 2 Tensor (shape=[#examples, 1]) for gradients.
hessians
float32; Rank 2 Tensor (shape=[#examples, 1]) for hessians.
bucketized_features_list
int32 list of Rank 1 Tensors, each containing the bucketized feature (for each feature column).
-
Runs multiple additive regression ensemble predictors on input instances and
computes the logits. It is designed to be used during prediction. It traverses all the trees and calculates the final score for each instance.
Attrs:
- num_bucketized_features: Inferred.
- logits_dimension: scalar, dimension of the logits, to be used for partial logits shape.
Output logits: Output rank 2 Tensor containing logits for each example.
Declaration
public static func boostedTreesPredict( treeEnsembleHandle: ResourceHandle, bucketizedFeatures: [Tensor<Int32>], logitsDimension: Int64 ) -> Tensor<Float>
Parameters
bucketized_features
A list of rank 1 Tensors containing bucket id for each feature.
-
Add the quantile summaries to each quantile stream resource.
An op that adds a list of quantile summaries to a quantile stream resource. Each summary Tensor is rank 2, containing summaries (value, weight, min_rank, max_rank) for a single feature.
Declaration
public static func boostedTreesQuantileStreamResourceAddSummaries( quantileStreamResourceHandle: ResourceHandle, summaries: [Tensor<Float>] )
Parameters
quantile_stream_resource_handle
resource handle referring to a QuantileStreamResource.
summaries
string; List of Rank 2 Tensor each containing the summaries for a single feature.
-
Deserialize bucket boundaries and ready flag into current QuantileAccumulator.
An op that deserializes bucket boundaries and are boundaries ready flag into current QuantileAccumulator.
Attr num_streams: inferred int; number of features to get bucket boundaries for.
Declaration
public static func boostedTreesQuantileStreamResourceDeserialize( quantileStreamResourceHandle: ResourceHandle, bucketBoundaries: [Tensor<Float>] )
Parameters
quantile_stream_resource_handle
resource handle referring to a QuantileStreamResource.
bucket_boundaries
float; List of Rank 1 Tensors each containing the bucket boundaries for a feature.
-
Flush the summaries for a quantile stream resource.
An op that flushes the summaries for a quantile stream resource.
Attr generate_quantiles: bool; If True, the output will be the num_quantiles for each stream where the ith entry is the ith quantile of the input with an approximation error of epsilon. Duplicate values may be present. If False, the output will be the points in the histogram that we got which roughly translates to 1/epsilon boundaries and without any duplicates. Default to False.
Declaration
public static func boostedTreesQuantileStreamResourceFlush( quantileStreamResourceHandle: ResourceHandle, numBuckets: Tensor<Int64>, generateQuantiles: Bool = false )
Parameters
quantile_stream_resource_handle
resource handle referring to a QuantileStreamResource.
num_buckets
int; approximate number of buckets unless using generate_quantiles.
-
Generate the bucket boundaries for each feature based on accumulated summaries.
An op that returns a list of float tensors for a quantile stream resource. Each tensor is Rank 1 containing bucket boundaries for a single feature.
Attr num_features: inferred int; number of features to get bucket boundaries for.
Output bucket_boundaries: float; List of Rank 1 Tensors each containing the bucket boundaries for a feature.
Declaration
public static func boostedTreesQuantileStreamResourceGetBucketBoundaries( quantileStreamResourceHandle: ResourceHandle, numFeatures: Int64 ) -> [Tensor<Float>]
Parameters
quantile_stream_resource_handle
resource handle referring to a QuantileStreamResource.
-
Creates a handle to a BoostedTreesQuantileStreamResource.
Declaration
public static func boostedTreesQuantileStreamResourceHandleOp( container: String, sharedName: String ) -> ResourceHandle
-
Serializes the tree ensemble to a proto.
Outputs:
- stamp_token: Stamp token of the tree ensemble resource.
- tree_ensemble_serialized: Serialized proto of the ensemble.
Declaration
public static func boostedTreesSerializeEnsemble( treeEnsembleHandle: ResourceHandle ) -> (stampToken: Tensor<Int64>, treeEnsembleSerialized: StringTensor)
Parameters
tree_ensemble_handle
Handle to the tree ensemble.
-
boostedTreesSparseAggregateStats(nodeIds:gradients:hessians:featureIndices:featureValues:featureShape:maxSplits:numBuckets:)
Aggregates the summary of accumulated stats for the batch.
The summary stats contains gradients and hessians accumulated for each node, bucket and dimension id.
Attrs:
- max_splits: int; the maximum number of splits possible in the whole tree.
- num_buckets: int; equals to the maximum possible value of bucketized feature + 1.
Outputs:
- stats_summary_indices: int32; Rank 2 indices of summary sparse Tensors (shape=[number of non zero statistics, 4]) The second axis can only be 4 including node id, feature dimension, bucket id, and statistics_dimension. statistics_dimension = logits_dimension + hessian_dimension.
- stats_summary_values: output Rank 1 Tensor (shape=[number of non zero statistics])
- stats_summary_shape: output Rank 1 Tensor (shape=[4]) The tensor has following 4 values: [max_splits, feature_dimension, num_buckets, statistics_dimension], where statistics_dimension = gradient_dimension + hessian_dimension. gradient_dimension is the same as label_dimension, i.e., the output space. hessian_dimension can be the same as logits dimension when diagonal hessian is used, or label_dimension^2 when full hessian is used.
Declaration
public static func boostedTreesSparseAggregateStats( nodeIds: Tensor<Int32>, gradients: Tensor<Float>, hessians: Tensor<Float>, featureIndices: Tensor<Int32>, featureValues: Tensor<Int32>, featureShape: Tensor<Int32>, maxSplits: Int64, numBuckets: Int64 ) -> ( statsSummaryIndices: Tensor<Int32>, statsSummaryValues: Tensor<Float>, statsSummaryShape: Tensor<Int32> )
Parameters
node_ids
int32; Rank 1 Tensor containing node ids for each example, shape [batch_size].
gradients
float32; Rank 2 Tensor (shape=[batch_size, logits_dimension]) with gradients for each example.
hessians
float32; Rank 2 Tensor (shape=[batch_size, hessian_dimension]) with hessians for each example.
feature_indices
int32; Rank 2 indices of feature sparse Tensors (shape=[number of sparse entries, 2]). Number of sparse entries across all instances from the batch. The first value is the index of the instance, the second is dimension of the feature. The second axis can only have 2 values, i.e., the input dense version of Tensor can only be matrix.
feature_values
int32; Rank 1 values of feature sparse Tensors (shape=[number of sparse entries]). Number of sparse entries across all instances from the batch. The first value is the index of the instance, the second is dimension of the feature.
feature_shape
int32; Rank 1 dense shape of feature sparse Tensors (shape=[2]). The first axis can only have 2 values, [batch_size, feature_dimension].
-
boostedTreesSparseCalculateBestFeatureSplit(nodeIdRange:statsSummaryIndices:statsSummaryValues:statsSummaryShape:l1:l2:treeComplexity:minNodeWeight:logitsDimension:splitType:)
Calculates gains for each feature and returns the best possible split information for the feature.
The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.
It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return
node_ids_list
for each feature, containing the list of nodes that this feature can be used to split.In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).
The output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature.
Attrs:
- logits_dimension: The dimension of logit, i.e., number of classes.
- split_type: A string indicating if this Op should perform inequality split or equality split.
Outputs:
- node_ids: A Rank 1 tensor indicating possible node ids that can be split.
- gains: A Rank 1 tensor indicating the best gains to split each node.
- feature_dimensions: A Rank 1 tensor indicating the best feature dimension for each feature to split for each node.
- thresholds: A Rank 1 tensor indicating the bucket id to compare with (as a threshold) for split in each node.
- left_node_contribs: A Rank 2 tensor indicating the contribution of the left nodes when branching from parent nodes to the left direction by the given threshold for each feature. This value will be used to make the left node value by adding to the parent node value. Second dimension size is logits dimension.
- right_node_contribs: A Rank 2 tensor, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node.
- split_with_default_directions: A Rank 1 tensor indicating which direction to go if data is missing. Inequality with default left returns 0, inequality with default right returns 1, equality with default right returns 2.
Declaration
public static func boostedTreesSparseCalculateBestFeatureSplit( nodeIdRange: Tensor<Int32>, statsSummaryIndices: Tensor<Int32>, statsSummaryValues: Tensor<Float>, statsSummaryShape: Tensor<Int32>, l1: Tensor<Float>, l2: Tensor<Float>, treeComplexity: Tensor<Float>, minNodeWeight: Tensor<Float>, logitsDimension: Int64, splitType: SplitType2 = .inequality ) -> ( nodeIds: Tensor<Int32>, gains: Tensor<Float>, featureDimensions: Tensor<Int32>, thresholds: Tensor<Int32>, leftNodeContribs: Tensor<Float>, rightNodeContribs: Tensor<Float>, splitWithDefaultDirections: StringTensor )
Parameters
node_id_range
A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within
stats_summary_list
. The nodes are iterated between the two nodes specified by the tensor, as likefor node_id in range(node_id_range[0], node_id_range[1])
(Note that the last index node_id_range[1] is exclusive).stats_summary_indices
A Rank 2 int64 tensor of dense shape N, 4 for accumulated stats summary (gradient/hessian) per node per bucket for each feature. The second dimension contains node id, feature dimension, bucket id, and stats dim. stats dim is the sum of logits dimension and hessian dimension, hessian dimension can either be logits dimension if diagonal hessian is used, or logits dimension^2 if full hessian is used.
stats_summary_values
A Rank 1 float tensor of dense shape N, which supplies the values for each element in summary_indices.
stats_summary_shape
A Rank 1 float tensor of dense shape [4], which specifies the dense shape of the sparse tensor, which is [num tree nodes, feature dimensions, num buckets, stats dim].
l1
l1 regularization factor on leaf weights, per instance based.
l2
l2 regularization factor on leaf weights, per instance based.
tree_complexity
adjustment to the gain, per leaf based.
min_node_weight
mininum avg of hessians in a node before required for the node to be considered for splitting.
-
boostedTreesTrainingPredict(treeEnsembleHandle:cachedTreeIds:cachedNodeIds:bucketizedFeatures:logitsDimension:)
Runs multiple additive regression ensemble predictors on input instances and
computes the update to cached logits. It is designed to be used during training. It traverses the trees starting from cached tree id and cached node id and calculates the updates to be pushed to the cache.
Attrs:
- num_bucketized_features: Inferred.
- logits_dimension: scalar, dimension of the logits, to be used for partial logits shape.
Outputs:
- partial_logits: Rank 2 Tensor containing logits update (with respect to cached values stored) for each example.
- tree_ids: Rank 1 Tensor containing new tree ids for each example.
- node_ids: Rank 1 Tensor containing new node ids in the new tree_ids.
Declaration
Parameters
cached_tree_ids
Rank 1 Tensor containing cached tree ids which is the starting tree of prediction.
cached_node_ids
Rank 1 Tensor containing cached node id which is the starting node of prediction.
bucketized_features
A list of rank 1 Tensors containing bucket id for each feature.
-
boostedTreesUpdateEnsemble(treeEnsembleHandle:featureIds:nodeIds:gains:thresholds:leftNodeContribs:rightNodeContribs:maxDepth:learningRate:pruningMode:)
Updates the tree ensemble by either adding a layer to the last tree being grown
or by starting a new tree.
Attrs:
- pruning_mode: 0-No pruning, 1-Pre-pruning, 2-Post-pruning.
- num_features: Number of features that have best splits returned. INFERRED.
Declaration
public static func boostedTreesUpdateEnsemble( treeEnsembleHandle: ResourceHandle, featureIds: Tensor<Int32>, nodeIds: [Tensor<Int32>], gains: [Tensor<Float>], thresholds: [Tensor<Int32>], leftNodeContribs: [Tensor<Float>], rightNodeContribs: [Tensor<Float>], maxDepth: Tensor<Int32>, learningRate: Tensor<Float>, pruningMode: Int64 )
Parameters
tree_ensemble_handle
Handle to the ensemble variable.
feature_ids
Rank 1 tensor with ids for each feature. This is the real id of the feature that will be used in the split.
node_ids
List of rank 1 tensors representing the nodes for which this feature has a split.
gains
List of rank 1 tensors representing the gains for each of the feature’s split.
thresholds
List of rank 1 tensors representing the thesholds for each of the feature’s split.
left_node_contribs
List of rank 2 tensors with left leaf contribs for each of the feature’s splits. Will be added to the previous node values to constitute the values of the left nodes.
right_node_contribs
List of rank 2 tensors with right leaf contribs for each of the feature’s splits. Will be added to the previous node values to constitute the values of the right nodes.
max_depth
Max depth of the tree to build.
learning_rate
shrinkage const for each new tree.
-
boostedTreesUpdateEnsembleV2(treeEnsembleHandle:featureIds:dimensionIds:nodeIds:gains:thresholds:leftNodeContribs:rightNodeContribs:splitTypes:maxDepth:learningRate:pruningMode:logitsDimension:)
Updates the tree ensemble by adding a layer to the last tree being grown
or by starting a new tree.
Attrs:
- num_features: Number of features that have best splits returned. INFERRED.
- logits_dimension: scalar, dimension of the logits
Declaration
public static func boostedTreesUpdateEnsembleV2( treeEnsembleHandle: ResourceHandle, featureIds: Tensor<Int32>, dimensionIds: [Tensor<Int32>], nodeIds: [Tensor<Int32>], gains: [Tensor<Float>], thresholds: [Tensor<Int32>], leftNodeContribs: [Tensor<Float>], rightNodeContribs: [Tensor<Float>], splitTypes: [StringTensor], maxDepth: Tensor<Int32>, learningRate: Tensor<Float>, pruningMode: Tensor<Int32>, logitsDimension: Int64 = 1 )
Parameters
tree_ensemble_handle
Handle to the ensemble variable.
feature_ids
Rank 1 tensor with ids for each feature. This is the real id of the feature that will be used in the split.
dimension_ids
List of rank 1 tensors representing the dimension in each feature.
node_ids
List of rank 1 tensors representing the nodes for which this feature has a split.
gains
List of rank 1 tensors representing the gains for each of the feature’s split.
thresholds
List of rank 1 tensors representing the thesholds for each of the feature’s split.
left_node_contribs
List of rank 2 tensors with left leaf contribs for each of the feature’s splits. Will be added to the previous node values to constitute the values of the left nodes.
right_node_contribs
List of rank 2 tensors with right leaf contribs for each of the feature’s splits. Will be added to the previous node values to constitute the values of the right nodes.
split_types
List of rank 1 tensors representing the split type for each feature.
max_depth
Max depth of the tree to build.
learning_rate
shrinkage const for each new tree.
pruning_mode
0-No pruning, 1-Pre-pruning, 2-Post-pruning.
-
Return the shape of s0 op s1 with broadcast.
Given
s0
ands1
, tensors that represent shapes, computer0
, the broadcasted shape.s0
,s1
andr0
are all integer vectors.Declaration
public static func broadcastArgs<T: TensorFlowIndex>( s0: Tensor<T>, s1: Tensor<T> ) -> Tensor<T>
-
Return the reduction indices for computing gradients of s0 op s1 with broadcast.
This is typically used by gradient computations for a broadcasting operation.
Declaration
public static func broadcastGradientArgs<T: TensorFlowIndex>( s0: Tensor<T>, s1: Tensor<T> ) -> (r0: Tensor<T>, r1: Tensor<T>)
-
Broadcast an array for a compatible shape.
Broadcasting is the process of making arrays to have compatible shapes for arithmetic operations. Two shapes are compatible if for each dimension pair they are either equal or one of them is one. When trying to broadcast a Tensor to a shape, it starts with the trailing dimensions, and works its way forward.
For example,
x = tf.constant([1, 2, 3]) y = tf.broadcast_to(x, [3, 3]) print(y) tf.Tensor( [[1 2 3] [1 2 3] [1 2 3]], shape=(3, 3), dtype=int32)
In the above example, the input Tensor with the shape of
[1, 3]
is broadcasted to output Tensor with shape of[3, 3]
.Output output: A Tensor.
Declaration
public static func broadcastTo< T: TensorFlowScalar, Tidx: TensorFlowIndex >( _ input: Tensor<T>, shape: Tensor<Tidx> ) -> Tensor<T>
Parameters
input
A Tensor to broadcast.
shape
An 1-D
int
Tensor. The shape of the desired output. -
Bucketizes ‘input’ based on ‘boundaries’.
For example, if the inputs are boundaries = [0, 10, 100] input = [[-5, 10000] [150, 10] [5, 100]]
then the output will be output = [[0, 3] [3, 2] [1, 3]]
Attr boundaries: A sorted list of floats gives the boundary of the buckets.
Output output: Same shape with ‘input’, each value of input replaced with bucket index.
@compatibility(numpy) Equivalent to np.digitize. @end_compatibility
Declaration
public static func bucketize<T: TensorFlowNumeric>( _ input: Tensor<T>, boundaries: [Double] ) -> Tensor<Int32>
Parameters
input
Any shape of Tensor contains with int or float type.
-
Records the bytes size of each element of
input_dataset
in a StatsAggregator.Declaration
public static func bytesProducedStatsDataset( inputDataset: VariantHandle, tag: StringTensor, outputTypes: [TensorDataType], outputShapes: [TensorShape?] ) -> VariantHandle
-
Reads out the CSR components at batch
index
.This op is meant only for debugging / testing, and its interface is not expected to be stable.
Outputs:
- row_ptrs: An array containing CSR matrix row pointers.
- col_inds: An array containing CSR matrix column indices.
- values: An array containing CSR matrix nonzero values.
Declaration
public static func cSRSparseMatrixComponents<Type: FloatingPoint & TensorFlowScalar>( csrSparseMatrix: VariantHandle, index: Tensor<Int32> ) -> (rowPtrs: Tensor<Int32>, colInds: Tensor<Int32>, values: Tensor<Type>)
Parameters
csr_sparse_matrix
A batched CSRSparseMatrix.
index
The index in
csr_sparse_matrix
‘s batch. -
Convert a (possibly batched) CSRSparseMatrix to dense.
Output dense_output: A dense tensor.
Declaration
public static func cSRSparseMatrixToDense<Type: FloatingPoint & TensorFlowScalar>( sparseInput: VariantHandle ) -> Tensor<Type>
Parameters
sparse_input
A batched CSRSparseMatrix.
-
Converts a (possibly batched) CSRSparesMatrix to a SparseTensor.
Outputs:
- indices: SparseTensor indices.
- values: SparseTensor values.
- dense_shape: SparseTensor dense shape.
Declaration
public static func cSRSparseMatrixToSparseTensor<Type: FloatingPoint & TensorFlowScalar>( sparseMatrix: VariantHandle ) -> (indices: Tensor<Int64>, values: Tensor<Type>, denseShape: Tensor<Int64>)
Parameters
sparse_matrix
A (possibly batched) CSRSparseMatrix.
-
cSVDataset(filenames:compressionType:bufferSize:header:fieldDelim:useQuoteDelim:naValue:selectCols:recordDefaults:outputShapes:)
Declaration
public static func cSVDataset<OutputTypes: TensorArrayProtocol>( filenames: StringTensor, compressionType: StringTensor, bufferSize: Tensor<Int64>, header: Tensor<Bool>, fieldDelim: StringTensor, useQuoteDelim: Tensor<Bool>, naValue: StringTensor, selectCols: Tensor<Int64>, recordDefaults: OutputTypes, outputShapes: [TensorShape?] ) -> VariantHandle
-
Performs beam search decoding on the logits given in input.
A note about the attribute merge_repeated: For the beam search decoder, this means that if consecutive entries in a beam are the same, only the first of these is emitted. That is, when the top path is “A B B B B”, “A B” is returned if merge_repeated = True but “A B B B B” is returned if merge_repeated = False.
Attrs:
- beam_width: A scalar >= 0 (beam search beam width).
- top_paths: A scalar >= 0, <= beam_width (controls output size).
- merge_repeated: If true, merge repeated classes in output.
Outputs:
- decoded_indices: A list (length: top_paths) of indices matrices. Matrix j,
size
(total_decoded_outputs[j] x 2)
, has indices of aSparseTensor<int64, 2>
. The rows store: [batch, time]. - decoded_values: A list (length: top_paths) of values vectors. Vector j,
size
(length total_decoded_outputs[j])
, has the values of aSparseTensor<int64, 2>
. The vector stores the decoded classes for beam j. - decoded_shape: A list (length: top_paths) of shape vector. Vector j,
size
(2)
, stores the shape of the decodedSparseTensor[j]
. Its values are:[batch_size, max_decoded_length[j]]
. - log_probability: A matrix, shaped:
(batch_size x top_paths)
. The sequence log-probabilities.
- decoded_indices: A list (length: top_paths) of indices matrices. Matrix j,
size
Declaration
public static func cTCBeamSearchDecoder<T: FloatingPoint & TensorFlowScalar>( inputs: Tensor<T>, sequenceLength: Tensor<Int32>, beamWidth: Int64, topPaths: Int64, mergeRepeated: Bool = true ) -> ( decodedIndices: [Tensor<Int64>], decodedValues: [Tensor<Int64>], decodedShape: [Tensor<Int64>], logProbability: Tensor<T> )
Parameters
inputs
3-D, shape:
(max_time x batch_size x num_classes)
, the logits.sequence_length
A vector containing sequence lengths, size
(batch)
. -
Performs greedy decoding on the logits given in inputs.
A note about the attribute merge_repeated: if enabled, when consecutive logits’ maximum indices are the same, only the first of these is emitted. Labeling the blank ‘*’, the sequence “A B B * B B” becomes “A B B” if merge_repeated = True and “A B B B B” if merge_repeated = False.
Regardless of the value of merge_repeated, if the maximum index of a given time and batch corresponds to the blank, index
(num_classes - 1)
, no new element is emitted.Attr merge_repeated: If True, merge repeated classes in output.
Outputs:
- decoded_indices: Indices matrix, size
(total_decoded_outputs x 2)
, of aSparseTensor<int64, 2>
. The rows store: [batch, time]. - decoded_values: Values vector, size:
(total_decoded_outputs)
, of aSparseTensor<int64, 2>
. The vector stores the decoded classes. - decoded_shape: Shape vector, size
(2)
, of the decoded SparseTensor. Values are:[batch_size, max_decoded_length]
. - log_probability: Matrix, size
(batch_size x 1)
, containing sequence log-probabilities.
- decoded_indices: Indices matrix, size
Declaration
Parameters
inputs
3-D, shape:
(max_time x batch_size x num_classes)
, the logits.sequence_length
A vector containing sequence lengths, size
(batch_size)
. -
cTCLoss(inputs:labelsIndices:labelsValues:sequenceLength:preprocessCollapseRepeated:ctcMergeRepeated:ignoreLongerOutputsThanInputs:)
Calculates the CTC Loss (log probability) for each batch entry. Also calculates
the gradient. This class performs the softmax operation for you, so inputs should be e.g. linear projections of outputs by an LSTM.
Attrs:
- preprocess_collapse_repeated: Scalar, if true then repeated labels are collapsed prior to the CTC calculation.
- ctc_merge_repeated: Scalar. If set to false, during CTC calculation repeated non-blank labels will not be merged and are interpreted as individual labels. This is a simplified version of CTC.
- ignore_longer_outputs_than_inputs: Scalar. If set to true, during CTC calculation, items that have longer output sequences than input sequences are skipped: they don’t contribute to the loss term and have zero-gradient.
Outputs:
- loss: A vector (batch) containing log-probabilities.
- gradient: The gradient of
loss
. 3-D, shape:(max_time x batch_size x num_classes)
.
Declaration
public static func cTCLoss<T: FloatingPoint & TensorFlowScalar>( inputs: Tensor<T>, labelsIndices: Tensor<Int64>, labelsValues: Tensor<Int32>, sequenceLength: Tensor<Int32>, preprocessCollapseRepeated: Bool = false, ctcMergeRepeated: Bool = true, ignoreLongerOutputsThanInputs: Bool = false ) -> (loss: Tensor<T>, gradient: Tensor<T>)
Parameters
inputs
3-D, shape:
(max_time x batch_size x num_classes)
, the logits.labels_indices
The indices of a
SparseTensor<int32, 2>
.labels_indices(i, :) == [b, t]
meanslabels_values(i)
stores the id for(batch b, time t)
.labels_values
The values (labels) associated with the given batch and time.
sequence_length
A vector containing sequence lengths (batch).
-
Creates a dataset that caches elements from
input_dataset
.A CacheDataset will iterate over the input_dataset, and store tensors. If the cache already exists, the cache will be used. If the cache is inappropriate (e.g. cannot be opened, contains tensors of the wrong shape / size), an error will the returned when used.
Declaration
public static func cacheDataset( inputDataset: VariantHandle, filename: StringTensor, outputTypes: [TensorDataType], outputShapes: [TensorShape?] ) -> VariantHandle
Parameters
filename
A path on the filesystem where we should cache the dataset. Note: this will be a directory.
-
Declaration
public static func cacheDatasetV2( inputDataset: VariantHandle, filename: StringTensor, cache: ResourceHandle, outputTypes: [TensorDataType], outputShapes: [TensorShape?] ) -> VariantHandle
-
Cast x of type SrcT to y of DstT.
Declaration
public static func cast< Srct: TensorFlowScalar, Dstt: TensorFlowScalar >( _ x: Tensor<Srct>, truncate: Bool = false ) -> Tensor<Dstt>
-
Returns element-wise smallest integer not less than x.
Declaration
public static func ceil<T: FloatingPoint & TensorFlowScalar>( _ x: Tensor<T> ) -> Tensor<T>
-
Checks a tensor for NaN and Inf values.
When run, reports an
InvalidArgument
error iftensor
has any values that are not a number (NaN) or infinity (Inf). Otherwise, passestensor
as-is.- Attr message: Prefix of the error message.
Declaration
public static func checkNumerics<T: FloatingPoint & TensorFlowScalar>( _ tensor: Tensor<T>, message: String ) -> Tensor<T>
-
Computes the Cholesky decomposition of one or more square matrices.
The input is a tensor of shape
[..., M, M]
whose inner-most 2 dimensions form square matrices.The input has to be symmetric and positive definite. Only the lower-triangular part of the input will be used for this operation. The upper-triangular part will not be read.
The output is a tensor of the same shape as the input containing the Cholesky decompositions for all input submatrices
[..., :, :]
.Note: The gradient computation on GPU is faster for large matrices but not for large batch dimensions when the submatrices are small. In this case it might be faster to use the CPU.
Output output: Shape is
[..., M, M]
.
Declaration
public static func cholesky<T: FloatingPoint & TensorFlowScalar>( _ input: Tensor<T> ) -> Tensor<T>
Parameters
input
Shape is
[..., M, M]
. -
Computes the reverse mode backpropagated gradient of the Cholesky algorithm.
For an explanation see “Differentiation of the Cholesky algorithm” by Iain Murray http://arxiv.org/abs/1602.07527.
Output output: Symmetrized version of df/dA . Shape is
[..., M, M]
Declaration
public static func choleskyGrad<T: FloatingPoint & TensorFlowScalar>( l: Tensor<T>, grad: Tensor<T> ) -> Tensor<T>
Parameters
l
Output of batch Cholesky algorithm l = cholesky(A). Shape is
[..., M, M]
. Algorithm depends only on lower triangular part of the innermost matrices of this tensor.grad
df/dl where f is some scalar function. Shape is
[..., M, M]
. Algorithm depends only on lower triangular part of the innermost matrices of this tensor. -
Declaration
public static func chooseFastestDataset( inputDatasets: [VariantHandle], numExperiments: Int64, outputTypes: [TensorDataType], outputShapes: [TensorShape?] ) -> VariantHandle
-
Clips tensor values to a specified min and max.
Given a tensor
t
, this operation returns a tensor of the same type and shape ast
with its values clipped toclip_value_min
andclip_value_max
. Any values less thanclip_value_min
are set toclip_value_min
. Any values greater thanclip_value_max
are set toclip_value_max
.Output output: A clipped
Tensor
with the same shape as input ‘t’.
Declaration
public static func clipByValue<T: TensorFlowNumeric>( t: Tensor<T>, clipValueMin: Tensor<T>, clipValueMax: Tensor<T> ) -> Tensor<T>
-
Declaration
public static func closeSummaryWriter( writer: ResourceHandle )
-
Receives a tensor value broadcast from another device.
Declaration
public static func collectiveBcastRecv<T: TensorFlowNumeric>( groupSize: Int64, groupKey: Int64, instanceKey: Int64, shape: TensorShape?, communicationHint: String = "auto" ) -> Tensor<T>
-
Broadcasts a tensor value to one or more other devices.
Declaration
public static func collectiveBcastSend<T: TensorFlowNumeric>( _ input: Tensor<T>, groupSize: Int64, groupKey: Int64, instanceKey: Int64, shape: TensorShape?, communicationHint: String = "auto" ) -> Tensor<T>
-
Mutually accumulates multiple tensors of identical type and shape.
Declaration
public static func collectiveGather<T: TensorFlowNumeric>( _ input: Tensor<T>, groupSize: Int64, groupKey: Int64, instanceKey: Int64, shape: TensorShape?, communicationHint: String = "auto" ) -> Tensor<T>
-
An Op to permute tensors across replicated TPU instances.
Each instance supplies its own input.
For example, suppose there are 4 TPU instances:
[A, B, C, D]
. Passing source_target_pairs=[[0,1],[1,2],[2,3],[3,0]]
gets the outputs:[D, A, B, C]
.Attr T: The type of elements to be exchanged.
Output output: The permuted input.
Declaration
public static func collectivePermute<T: TensorFlowNumeric>( _ input: Tensor<T>, sourceTargetPairs: Tensor<Int32> ) -> Tensor<T>
Parameters
input
The local input to be permuted. Currently only supports float and bfloat16.
source_target_pairs
A tensor with shape [num_pairs, 2].
-
collectiveReduce(_:groupSize:groupKey:instanceKey:mergeOp:finalOp:subdivOffsets:waitFor:communicationHint:)
Mutually reduces multiple tensors of identical type and shape.
Declaration
public static func collectiveReduce<T: TensorFlowNumeric>( _ input: Tensor<T>, groupSize: Int64, groupKey: Int64, instanceKey: Int64, mergeOp: MergeOp, finalOp: FinalOp, subdivOffsets: [Int32], waitFor: [Int32], communicationHint: String = "auto" ) -> Tensor<T>
-
combinedNonMaxSuppression(boxes:scores:maxOutputSizePerClass:maxTotalSize:iouThreshold:scoreThreshold:padPerClass:clipBoxes:)
Greedily selects a subset of bounding boxes in descending order of score,
This operation performs non_max_suppression on the inputs per batch, across all classes. Prunes away boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. Bounding boxes are supplied as [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm is agnostic to where the origin is in the coordinate system. Also note that this algorithm is invariant to orthogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system result in the same boxes being selected by the algorithm. The output of this operation is the final boxes, scores and classes tensor returned after performing non_max_suppression.
Attrs:
- pad_per_class: If false, the output nmsed boxes, scores and classes
are padded/clipped to
max_total_size
. If true, the output nmsed boxes, scores and classes are padded to be of lengthmax_size_per_class
*num_classes
, unless it exceedsmax_total_size
in which case it is clipped tomax_total_size
. Defaults to false. - clip_boxes: If true, assume the box coordinates are between [0, 1] and clip the output boxes if they fall beyond [0, 1]. If false, do not do clipping and output the box coordinates as it is.
- pad_per_class: If false, the output nmsed boxes, scores and classes
are padded/clipped to
Outputs:
- nmsed_boxes: A [batch_size, max_detections, 4] float32 tensor containing the non-max suppressed boxes.
- nmsed_scores: A [batch_size, max_detections] float32 tensor containing the scores for the boxes.
- nmsed_classes: A [batch_size, max_detections] float32 tensor containing the classes for the boxes.
- valid_detections: A [batch_size] int32 tensor indicating the number of valid detections per batch item. Only the top num_detections[i] entries in nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The rest of the entries are zero paddings.
Declaration
public static func combinedNonMaxSuppression( boxes: Tensor<Float>, scores: Tensor<Float>, maxOutputSizePerClass: Tensor<Int32>, maxTotalSize: Tensor<Int32>, iouThreshold: Tensor<Float>, scoreThreshold: Tensor<Float>, padPerClass: Bool = false, clipBoxes: Bool = true ) -> ( nmsedBoxes: Tensor<Float>, nmsedScores: Tensor<Float>, nmsedClasses: Tensor<Float>, validDetections: Tensor<Int32> )
Parameters
boxes
A 4-D float tensor of shape
[batch_size, num_boxes, q, 4]
. Ifq
is 1 then same boxes are used for all classes otherwise, ifq
is equal to number of classes, class-specific boxes are used.scores
A 3-D float tensor of shape
[batch_size, num_boxes, num_classes]
representing a single score corresponding to each box (each row of boxes).max_output_size_per_class
A scalar integer tensor representing the maximum number of boxes to be selected by non max suppression per class
max_total_size
A scalar representing maximum number of boxes retained over all classes.
iou_threshold
A 0-D float tensor representing the threshold for deciding whether boxes overlap too much with respect to IOU.
score_threshold
A 0-D float tensor representing the threshold for deciding when to remove boxes based on score.
-
Compare values of
input
tothreshold
and pack resulting bits into auint8
.Each comparison returns a boolean
true
(ifinput_value > threshold
) or andfalse
otherwise.This operation is useful for Locality-Sensitive-Hashing (LSH) and other algorithms that use hashing approximations of cosine and
L2
distances; codes can be generated from an input via:codebook_size = 50 codebook_bits = codebook_size * 32 codebook = tf.get_variable('codebook', [x.shape[-1].value, codebook_bits], dtype=x.dtype, initializer=tf.orthogonal_initializer()) codes = compare_and_threshold(tf.matmul(x, codebook), threshold=0.) codes = tf.bitcast(codes, tf.int32) # go from uint8 to int32 # now codes has shape x.shape[:-1] + [codebook_size]
NOTE: Currently, the innermost dimension of the tensor must be divisible by 8.
Given an
input
shaped[s0, s1, ..., s_n]
, the output is auint8
tensor shaped[s0, s1, ..., s_n / 8]
.Attr T: The type of the input and threshold.
Output output: The bitpacked comparisons.
Declaration
public static func compareAndBitpack<T: TensorFlowScalar>( _ input: Tensor<T>, threshold: Tensor<T> ) -> Tensor<UInt8>
Parameters
input
Values to compare against
threshold
and bitpack.threshold
Threshold to compare against.
-
Converts two real numbers to a complex number.
Given a tensor
real
representing the real part of a complex number, and a tensorimag
representing the imaginary part of a complex number, this operation returns complex numbers elementwise of the form \(a + bj\), where a represents thereal
part and b represents theimag
part.The input tensors
real
andimag
must have the same shape.For example:
# tensor 'real' is [2.25, 3.25] # tensor `imag` is [4.75, 5.75] tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]
Declaration
public static func complex< T: FloatingPoint & TensorFlowScalar, Tout: TensorFlowScalar >( real: Tensor<T>, imag: Tensor<T> ) -> Tensor<Tout>
-
Computes the complex absolute value of a tensor.
Given a tensor
x
of complex numbers, this operation returns a tensor of typefloat
ordouble
that is the absolute value of each element inx
. All elements inx
must be complex numbers of the form \(a + bj\). The absolute value is computed as \( \sqrt{a^2 + b^2}\).Declaration
public static func complexAbs< T: TensorFlowScalar, Tout: FloatingPoint & TensorFlowScalar >( _ x: Tensor<T> ) -> Tensor<Tout>
-
Declaration
public static func complexStruct<TC: TensorGroup>( nA: Int64, nB: Int64 ) -> (a: [Tensor<Int32>], b: [Tensor<Int64>], c: TC)
-
Computes the ids of the positions in sampled_candidates that match true_labels.
When doing log-odds NCE, the result of this op should be passed through a SparseToDense op, then added to the logits of the sampled candidates. This has the effect of ‘removing’ the sampled labels that match the true labels by making the classifier sure that they are sampled labels.
Attrs:
- num_true: Number of true labels per context.
- seed: If either seed or seed2 are set to be non-zero, the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed.
- seed2: An second seed to avoid seed collision.
Outputs:
- indices: A vector of indices corresponding to rows of true_candidates.
- ids: A vector of IDs of positions in sampled_candidates that match a true_label for the row with the corresponding index in indices.
- weights: A vector of the same length as indices and ids, in which each element is -FLOAT_MAX.
Declaration
Parameters
true_classes
The true_classes output of UnpackSparseLabels.
sampled_candidates
The sampled_candidates output of CandidateSampler.
-
Concatenates tensors along one dimension.
Output output: A
Tensor
with the concatenation of values stacked along theconcat_dim
dimension. This tensor’s shape matches that ofvalues
except inconcat_dim
where it has the sum of the sizes.
Declaration
public static func concat<T: TensorFlowScalar>( concatDim: Tensor<Int32>, _ values: [Tensor<T>] ) -> Tensor<T>
Parameters
concat_dim
0-D. The dimension along which to concatenate. Must be in the range [0, rank(values)).
values
The
N
Tensors to concatenate. Their ranks and types must match, and their sizes must match in all dimensions exceptconcat_dim
. -
Computes offsets of concat inputs within its output.
For example:
# 'x' is [2, 2, 7] # 'y' is [2, 3, 7] # 'z' is [2, 5, 7] concat_offset(2, [x, y, z]) => [0, 0, 0], [0, 2, 0], [0, 5, 0]
This is typically used by gradient computations for a concat operation.
Output offset: The
N
int32 vectors representing the starting offset of input tensors within the concatenated output.
Declaration
Parameters
concat_dim
The dimension along which to concatenate.
shape
The
N
int32 vectors representing shape of tensors being concatenated. -
Concatenates tensors along one dimension.
Output output: A
Tensor
with the concatenation of values stacked along theconcat_dim
dimension. This tensor’s shape matches that ofvalues
except inconcat_dim
where it has the sum of the sizes.
Declaration
public static func concatV2< T: TensorFlowScalar, Tidx: TensorFlowIndex >( _ values: [Tensor<T>], axis: Tensor<Tidx> ) -> Tensor<T>
Parameters
values
List of
N
Tensors to concatenate. Their ranks and types must match, and their sizes must match in all dimensions exceptconcat_dim
.axis
0-D. The dimension along which to concatenate. Must be in the range [-rank(values), rank(values)).
-
Creates a dataset that concatenates
input_dataset
withanother_dataset
.Declaration
public static func concatenateDataset( inputDataset: VariantHandle, anotherDataset: VariantHandle, outputTypes: [TensorDataType], outputShapes: [TensorShape?] ) -> VariantHandle
-
configureDistributedTPU(embeddingConfig:tpuEmbeddingConfig:isGlobalInit:enableWholeMeshCompilations:compilationFailureClosesChips:)
Sets up the centralized structures for a distributed TPU system.
Attrs:
- embedding_config: Reserved. Do not use.
- tpu_embedding_config: Serialized tensorflow.tpu.TPUEmbeddingConfiguration that describes the embedding lookups of the program.
- is_global_init: Reserved. Do not use.
Output topology: A serialized tensorflow.tpu.TopologyProto that describes the TPU topology.
Declaration
public static func configureDistributedTPU( embeddingConfig: String, tpuEmbeddingConfig: String, isGlobalInit: Bool = false, enableWholeMeshCompilations: Bool = false, compilationFailureClosesChips: Bool = true ) -> StringTensor
-
Sets up TPUEmbedding in a distributed TPU system.
- Attr config: Serialized tensorflow.tpu.TPUEmbeddingConfiguration that describes the embedding lookups of the program.
Declaration
public static func configureTPUEmbedding( config: String )
-
Returns the complex conjugate of a complex number.
Given a tensor
input
of complex numbers, this operation returns a tensor of complex numbers that are the complex conjugate of each element ininput
. The complex numbers ininput
must be of the form \(a + bj\), where a is the real part and b is the imaginary part.The complex conjugate returned by this operation is of the form \(a - bj\).
For example:
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
Declaration
public static func conj<T: TensorFlowScalar>( _ input: Tensor<T> ) -> Tensor<T>
-
Shuffle dimensions of x according to a permutation and conjugate the result.
The output
y
has the same rank asx
. The shapes ofx
andy
satisfy:y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]
y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])
Declaration
public static func conjugateTranspose< T: TensorFlowScalar, Tperm: TensorFlowIndex >( _ x: Tensor<T>, perm: Tensor<Tperm> ) -> Tensor<T>
-
Declaration
public static func constructionFails()
-
This op consumes a lock created by
MutexLock
.This op exists to consume a tensor created by
MutexLock
(other than direct control dependencies). It should be the only that consumes the tensor, and will raise an error if it is not. Its only purpose is to keep the mutex lock tensor alive until it is consumed by this op.NOTE: This operation must run on the same device as its input. This may be enforced via the
colocate_with
mechanism.Declaration
public static func consumeMutexLock( mutexLock: VariantHandle )
Parameters
mutex_lock
A tensor returned by
MutexLock
. -
Does nothing. Serves as a control trigger for scheduling.
Only useful as a placeholder for control edges.
Declaration
public static func controlTrigger()
-
Computes a 2-D convolution given 4-D
input
andfilter
tensors.Given an input tensor of shape
[batch, in_height, in_width, in_channels]
and a filter / kernel tensor of shape[filter_height, filter_width, in_channels, out_channels]
, this op performs the following:- Flattens the filter to a 2-D matrix with shape
[filter_height * filter_width * in_channels, output_channels]
. - Extracts image patches from the input tensor to form a virtual
tensor of shape
[batch, out_height, out_width, filter_height * filter_width * in_channels]
. - For each patch, right-multiplies the filter matrix and the image patch vector.
In detail, with the default NHWC format,
output[b, i, j, k] = sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] * filter[di, dj, q, k]
Must have
strides[0] = strides[3] = 1
. For the most common case of the same horizontal and vertices strides,strides = [1, stride, stride, 1]
.Attrs:
- strides: 1-D tensor of length 4. The stride of the sliding window for each
dimension of
input
. The dimension order is determined by the value ofdata_format
, see below for details. - padding: The type of padding algorithm to use.
- explicit_paddings: If
padding
is"EXPLICIT"
, the list of explicit padding amounts. For the ith dimension, the amount of padding inserted before and after the dimension isexplicit_paddings[2 * i]
andexplicit_paddings[2 * i + 1]
, respectively. Ifpadding
is not"EXPLICIT"
,explicit_paddings
must be empty. - data_format: Specify the data format of the input and output data. With the default format “NHWC”, the data is stored in the order of: [batch, height, width, channels]. Alternatively, the format could be “NCHW”, the data storage order of: [batch, channels, height, width].
- dilations: 1-D tensor of length 4. The dilation factor for each dimension of
input
. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value ofdata_format
, see above for details. Dilations in the batch and depth dimensions must be 1.
- strides: 1-D tensor of length 4. The stride of the sliding window for each
dimension of
Output output: A 4-D tensor. The dimension order is determined by the value of
data_format
, see below for details.
Declaration
public static func conv2D<T: TensorFlowNumeric>( _ input: Tensor<T>, filter: Tensor<T>, strides: [Int32], useCudnnOnGpu: Bool = true, padding: Padding2, explicitPaddings: [Int32], dataFormat: DataFormat = .nhwc, dilations: [Int32] = [1, 1, 1, 1] ) -> Tensor<T>
Parameters
input
A 4-D tensor. The dimension order is interpreted according to the value of
data_format
, see below for details.filter
A 4-D tensor of shape
[filter_height, filter_width, in_channels, out_channels]
- Flattens the filter to a 2-D matrix with shape
-
conv2DBackpropFilter(_:filterSizes:outBackprop:strides:useCudnnOnGpu:padding:explicitPaddings:dataFormat:dilations:)
Computes the gradients of convolution with respect to the filter.
Attrs:
- strides: The stride of the sliding window for each dimension of the input of the convolution. Must be in the same order as the dimension specified with format.
- padding: The type of padding algorithm to use.
- explicit_paddings: If
padding
is"EXPLICIT"
, the list of explicit padding amounts. For the ith dimension, the amount of padding inserted before and after the dimension isexplicit_paddings[2 * i]
andexplicit_paddings[2 * i + 1]
, respectively. Ifpadding
is not"EXPLICIT"
,explicit_paddings
must be empty. - data_format: Specify the data format of the input and output data. With the default format “NHWC”, the data is stored in the order of: [batch, in_height, in_width, in_channels]. Alternatively, the format could be “NCHW”, the data storage order of: [batch, in_channels, in_height, in_width].
- dilations: 1-D tensor of length 4. The dilation factor for each dimension of
input
. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value ofdata_format
, see above for details. Dilations in the batch and depth dimensions must be 1.
Output output: 4-D with shape
[filter_height, filter_width, in_channels, out_channels]
. Gradient w.r.t. thefilter
input of the convolution.
Declaration
public static func conv2DBackpropFilter<T: FloatingPoint & TensorFlowScalar>( _ input: Tensor<T>, filterSizes: Tensor<Int32>, outBackprop: Tensor<T>, strides: [Int32], useCudnnOnGpu: Bool = true, padding: Padding2, explicitPaddings: [Int32], dataFormat: DataFormat = .nhwc, dilations: [Int32] = [1, 1, 1, 1] ) -> Tensor<T>
Parameters
input
4-D with shape
[batch, in_height, in_width, in_channels]
.filter_sizes
An integer vector representing the tensor shape of
filter
, wherefilter
is a 4-D[filter_height, filter_width, in_channels, out_channels]
tensor.out_backprop
4-D with shape
[batch, out_height, out_width, out_channels]
. Gradients w.r.t. the output of the convolution. -
conv2DBackpropInput(inputSizes:filter:outBackprop:strides:useCudnnOnGpu:padding:explicitPaddings:dataFormat:dilations:)
Computes the gradients of convolution with respect to the input.
Attrs:
- strides: The stride of the sliding window for each dimension of the input of the convolution. Must be in the same order as the dimension specified with format.
- padding: The type of padding algorithm to use.
- explicit_paddings: If
padding
is"EXPLICIT"
, the list of explicit padding amounts. For the ith dimension, the amount of padding inserted before and after the dimension isexplicit_paddings[2 * i]
andexplicit_paddings[2 * i + 1]
, respectively. Ifpadding
is not"EXPLICIT"
,explicit_paddings
must be empty. - data_format: Specify the data format of the input and output data. With the default format “NHWC”, the data is stored in the order of: [batch, in_height, in_width, in_channels]. Alternatively, the format could be “NCHW”, the data storage order of: [batch, in_channels, in_height, in_width].
- dilations: 1-D tensor of length 4. The dilation factor for each dimension of
input
. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value ofdata_format
, see above for details. Dilations in the batch and depth dimensions must be 1.
Output output: 4-D with shape
[batch, in_height, in_width, in_channels]
. Gradient w.r.t. the input of the convolution.
Declaration
public static func conv2DBackpropInput<T: TensorFlowNumeric>( inputSizes: Tensor<Int32>, filter: Tensor<T>, outBackprop: Tensor<T>, strides: [Int32], useCudnnOnGpu: Bool = true, padding: Padding2, explicitPaddings: [Int32], dataFormat: DataFormat = .nhwc, dilations: [Int32] = [1, 1, 1, 1] ) -> Tensor<T>
Parameters
input_sizes
An integer vector representing the shape of
input
, whereinput
is a 4-D[batch, height, width, channels]
tensor.filter
4-D with shape
[filter_height, filter_width, in_channels, out_channels]
.out_backprop
4-D with shape
[batch, out_height, out_width, out_channels]
. Gradients w.r.t. the output of the convolution. -
Computes a 3-D convolution given 5-D
input
andfilter
tensors.In signal processing, cross-correlation is a measure of similarity of two waveforms as a function of a time-lag applied to one of them. This is also known as a sliding dot product or sliding inner-product.
Our Conv3D implements a form of cross-correlation.
Attrs:
- strides: 1-D tensor of length 5. The stride of the sliding window for each
dimension of
input
. Must havestrides[0] = strides[4] = 1
. - padding: The type of padding algorithm to use.
- data_format: The data format of the input and output data. With the default format “NDHWC”, the data is stored in the order of: [batch, in_depth, in_height, in_width, in_channels]. Alternatively, the format could be “NCDHW”, the data storage order is: [batch, in_channels, in_depth, in_height, in_width].
- dilations: 1-D tensor of length 5. The dilation factor for each dimension of
input
. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value ofdata_format
, see above for details. Dilations in the batch and depth dimensions must be 1.
- strides: 1-D tensor of length 5. The stride of the sliding window for each
dimension of
Declaration
public static func conv3D<T: FloatingPoint & TensorFlowScalar>( _ input: Tensor<T>, filter: Tensor<T>, strides: [Int32], padding: Padding, dataFormat: DataFormat1 = .ndhwc, dilations: [Int32] = [1, 1, 1, 1, 1] ) -> Tensor<T>
Parameters
input
Shape
[batch, in_depth, in_height, in_width, in_channels]
.filter
Shape
[filter_depth, filter_height, filter_width, in_channels, out_channels]
.in_channels
must match betweeninput
andfilter
. -
Computes the gradients of 3-D convolution with respect to the filter.
Attrs:
- strides: 1-D tensor of length 5. The stride of the sliding window for each
dimension of
input
. Must havestrides[0] = strides[4] = 1
. - padding: The type of padding algorithm to use.
- strides: 1-D tensor of length 5. The stride of the sliding window for each
dimension of
Declaration
Parameters
input
Shape
[batch, depth, rows, cols, in_channels]
.filter
Shape
[depth, rows, cols, in_channels, out_channels]
.in_channels
must match betweeninput
andfilter
.out_backprop
Backprop signal of shape
[batch, out_depth, out_rows, out_cols, out_channels]
. -
Computes the gradients of 3-D convolution with respect to the filter.
Attrs:
- strides: 1-D tensor of length 5. The stride of the sliding window for each
dimension of
input
. Must havestrides[0] = strides[4] = 1
. - padding: The type of padding algorithm to use.
- data_format: The data format of the input and output data. With the default format “NDHWC”, the data is stored in the order of: [batch, in_depth, in_height, in_width, in_channels]. Alternatively, the format could be “NCDHW”, the data storage order is: [batch, in_channels, in_depth, in_height, in_width].
- dilations: 1-D tensor of length 5. The dilation factor for each dimension of
input
. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value ofdata_format
, see above for details. Dilations in the batch and depth dimensions must be 1.
- strides: 1-D tensor of length 5. The stride of the sliding window for each
dimension of
Declaration
public static func conv3DBackpropFilterV2<T: FloatingPoint & TensorFlowScalar>( _ input: Tensor<T>, filterSizes: Tensor<Int32>, outBackprop: Tensor<T>, strides: [Int32], padding: Padding, dataFormat: DataFormat1 = .ndhwc, dilations: [Int32] = [1, 1, 1, 1, 1] ) -> Tensor<T>
Parameters
input
Shape
[batch, depth, rows, cols, in_channels]
.filter_sizes
An integer vector representing the tensor shape of
filter
, wherefilter
is a 5-D[filter_depth, filter_height, filter_width, in_channels, out_channels]
tensor.out_backprop
Backprop signal of shape
[batch, out_depth, out_rows, out_cols, out_channels]
. -
Computes the gradients of 3-D convolution with respect to the input.
Attrs:
- strides: 1-D tensor of length 5. The stride of the sliding window for each
dimension of
input
. Must havestrides[0] = strides[4] = 1
. - padding: The type of padding algorithm to use.
- strides: 1-D tensor of length 5. The stride of the sliding window for each
dimension of
Declaration
Parameters
input
Shape
[batch, depth, rows, cols, in_channels]
.filter
Shape
[depth, rows, cols, in_channels, out_channels]
.in_channels
must match betweeninput
andfilter
.out_backprop
Backprop signal of shape
[batch, out_depth, out_rows, out_cols, out_channels]
. -
Computes the gradients of 3-D convolution with respect to the input.
Attrs:
- strides: 1-D tensor of length 5. The stride of the sliding window for each
dimension of
input
. Must havestrides[0] = strides[4] = 1
. - padding: The type of padding algorithm to use.
- data_format: The data format of the input and output data. With the default format “NDHWC”, the data is stored in the order of: [batch, in_depth, in_height, in_width, in_channels]. Alternatively, the format could be “NCDHW”, the data storage order is: [batch, in_channels, in_depth, in_height, in_width].
- dilations: 1-D tensor of length 5. The dilation factor for each dimension of
input
. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value ofdata_format
, see above for details. Dilations in the batch and depth dimensions must be 1.
- strides: 1-D tensor of length 5. The stride of the sliding window for each
dimension of
Declaration
public static func conv3DBackpropInputV2< T: FloatingPoint & TensorFlowScalar, Tshape: TensorFlowIndex >( inputSizes: Tensor<Tshape>, filter: Tensor<T>, outBackprop: Tensor<T>, strides: [Int32], padding: Padding, dataFormat: DataFormat1 = .ndhwc, dilations: [Int32] = [1, 1, 1, 1, 1] ) -> Tensor<T>
Parameters
input_sizes
An integer vector representing the tensor shape of
input
, whereinput
is a 5-D[batch, depth, rows, cols, in_channels]
tensor.filter
Shape
[depth, rows, cols, in_channels, out_channels]
.in_channels
must match betweeninput
andfilter
.out_backprop
Backprop signal of shape
[batch, out_depth, out_rows, out_cols, out_channels]
. -
Copy a tensor from CPU-to-CPU or GPU-to-GPU.
Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on the device on which the tensor is allocated. N.B.: If the all downstream attached debug ops are disabled given the current gRPC gating status, the output will simply forward the input tensor without deep-copying. See the documentation of Debug* ops for more details.
Unlike the CopyHost Op, this op does not have HostMemory constraint on its input or output.
Attrs:
- tensor_name: The name of the input tensor.
- debug_ops_spec: A list of debug op spec (op, url, gated_grpc) for attached debug
ops. Each element of the list has the format
; ; , wherein gated_grpc is boolean represented as 0/1. E.g., “DebugIdentity;grpc://foo:3333;1”, “DebugIdentity;file:///tmp/tfdbg_1;0”.
Declaration
public static func copy<T: TensorFlowScalar>( _ input: Tensor<T>, tensorName: String, debugOpsSpec: [String] ) -> Tensor<T>
Parameters
input
Input tensor.
-
Copy a tensor to host.
Performs CPU-to-CPU deep-copying of tensor. N.B.: If the all downstream attached debug ops are disabled given the current gRPC gating status, the output will simply forward the input tensor without deep-copying. See the documentation of Debug* ops for more details.
Unlike the Copy Op, this op has HostMemory constraint on its input or output.
Attrs:
- tensor_name: The name of the input tensor.
- debug_ops_spec: A list of debug op spec (op, url, gated_grpc) for attached debug
ops. Each element of the list has the format
; ; , wherein gated_grpc is boolean represented as 0/1. E.g., “DebugIdentity;grpc://foo:3333;1”, “DebugIdentity;file:///tmp/tfdbg_1;0”.
Declaration
public static func copyHost<T: TensorFlowScalar>( _ input: Tensor<T>, tensorName: String, debugOpsSpec: [String] ) -> Tensor<T>
Parameters
input
Input tensor.
-
Declaration
public static func copyOp<T: TensorFlowScalar>( _ a: Tensor<T> ) -> Tensor<T>
-
Computes cos of x element-wise.
Given an input tensor, this function computes cosine of every element in the tensor. Input range is
(-inf, inf)
and output range is[-1,1]
. If input lies outside the boundary,nan
is returned.x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")]) tf.math.cos(x) ==> [nan -0.91113025 0.87758255 0.5403023 0.36235774 0.48718765 -0.95215535 nan]
Declaration
public static func cos<T: FloatingPoint & TensorFlowScalar>( _ x: Tensor<T> ) -> Tensor<T>
-
Computes hyperbolic cosine of x element-wise.
Given an input tensor, this function computes hyperbolic cosine of every element in the tensor. Input range is
[-inf, inf]
and output range is[1, inf]
.x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")]) tf.math.cosh(x) ==> [inf 4.0515420e+03 1.1276259e+00 1.5430807e+00 1.8106556e+00 3.7621956e+00 1.1013233e+04 inf]
Declaration
public static func cosh<T: FloatingPoint & TensorFlowScalar>( _ x: Tensor<T> ) -> Tensor<T>
-
Declaration
public static func createSummaryDbWriter( writer: ResourceHandle, dbUri: StringTensor, experimentName: StringTensor, runName: StringTensor, userName: StringTensor )