The following functions are available globally.

  • Returns x like an identity function. When used in a context where x is being differentiated with respect to, this function will not produce any derivative at x.

    Declaration

    @_semantics("autodiff.nonvarying")
    public func withoutDerivative<T>(at x: T) -> T
  • Applies the given closure body to x. When used in a context where x is being differentiated with respect to, this function will not produce any derivative at x.

    Declaration

    @_semantics("autodiff.nonvarying")
    public func withoutDerivative<T, R>(at x: T, in body: (T) -> R) -> R
  • Create a differentiable function from a vector-Jacobian products function.

    Declaration

    public func differentiableFunction<T : Differentiable, R : Differentiable>(
      from vjp: @escaping (T)
               -> (value: R, pullback: (R.TangentVector) -> T.TangentVector)
    ) -> @differentiable (T) -> R
  • Create a differentiable function from a vector-Jacobian products function.

    Declaration

    public func differentiableFunction<T, U, R>(
      from vjp: @escaping (T, U)
               -> (value: R, pullback: (R.TangentVector)
                 -> (T.TangentVector, U.TangentVector))
    ) -> @differentiable (T, U) -> R
      where T : Differentiable, U : Differentiable, R : Differentiable
  • Make a function be recomputed in its pullback, known as checkpointing in traditional automatic differentiation.

    Declaration

    public func withRecomputationInPullbacks<T, U>(
      _ body: @escaping @differentiable (T) -> U
    ) -> @differentiable (T) -> U where T : Differentiable, U : Differentiable
  • Declaration

    public func valueWithDifferential<T, R>(
      at x: T, in f: @differentiable (T) -> R
    ) -> (value: R, differential: (T.TangentVector) -> R.TangentVector)
  • Declaration

    public func valueWithDifferential<T, U, R>(
      at x: T, _ y: U, in f: @differentiable (T, U) -> R
    ) -> (value: R,
          differential: (T.TangentVector, U.TangentVector) -> R.TangentVector)
  • Declaration

    public func valueWithDifferential<T, U, V, R>(
      at x: T, _ y: U, _ z: V, in f: @differentiable (T, U, V) -> R
    ) -> (value: R,
          differential: (T.TangentVector, U.TangentVector, V.TangentVector)
            -> (R.TangentVector))
  • Declaration

    public func valueWithPullback<T, R>(
      at x: T, in f: @differentiable (T) -> R
    ) -> (value: R, pullback: (R.TangentVector) -> T.TangentVector)
  • Declaration

    public func valueWithPullback<T, U, R>(
      at x: T, _ y: U, in f: @differentiable (T, U) -> R
    ) -> (value: R,
          pullback: (R.TangentVector) -> (T.TangentVector, U.TangentVector))
  • Declaration

    public func valueWithPullback<T, U, V, R>(
      at x: T, _ y: U, _ z: V, in f: @differentiable (T, U, V) -> R
    ) -> (value: R,
          pullback: (R.TangentVector)
            -> (T.TangentVector, U.TangentVector, V.TangentVector))
  • Declaration

    public func differential<T, R>(
      at x: T, in f: @differentiable (T) -> R
    ) -> (T.TangentVector) -> R.TangentVector
  • Declaration

    public func differential<T, U, R>(
      at x: T, _ y: U, in f: @differentiable (T, U) -> R
    ) -> (T.TangentVector, U.TangentVector) -> R.TangentVector
  • Declaration

    public func differential<T, U, V, R>(
      at x: T, _ y: U, _ z: V, in f: @differentiable (T, U, V) -> R
    ) -> (T.TangentVector, U.TangentVector, V.TangentVector) -> (R.TangentVector)
  • Declaration

    public func pullback<T, R>(
      at x: T, in f: @differentiable (T) -> R
    ) -> (R.TangentVector) -> T.TangentVector
  • Declaration

    public func pullback<T, U, R>(
      at x: T, _ y: U, in f: @differentiable (T, U) -> R
    ) -> (R.TangentVector) -> (T.TangentVector, U.TangentVector)
  • Declaration

    public func pullback<T, U, V, R>(
      at x: T, _ y: U, _ z: V, in f: @differentiable (T, U, V) -> R
    ) -> (R.TangentVector)
        -> (T.TangentVector, U.TangentVector, V.TangentVector)
  • Declaration

    public func derivative<T: FloatingPoint, R>(
      at x: T, in f: @differentiable (T) -> R
    ) ->  R.TangentVector
      where T.TangentVector == T
  • Declaration

    public func derivative<T: FloatingPoint, U: FloatingPoint, R>(
      at x: T, _ y: U, in f: @differentiable (T, U) -> R
    ) -> R.TangentVector
      where T.TangentVector == T,
            U.TangentVector == U
  • Declaration

    public func derivative<T: FloatingPoint, U: FloatingPoint, V: FloatingPoint, R>(
      at x: T, _ y: U, _ z: V, in f: @differentiable (T, U, V) -> R
    ) -> R.TangentVector
      where T.TangentVector == T,
            U.TangentVector == U,
            V.TangentVector == V
  • Declaration

    public func gradient<T, R>(
      at x: T, in f: @differentiable (T) -> R
    ) -> T.TangentVector
      where R : FloatingPoint, R.TangentVector == R
  • Declaration

    public func gradient<T, U, R>(
      at x: T, _ y: U, in f: @differentiable (T, U) -> R
    ) -> (T.TangentVector, U.TangentVector)
      where R : FloatingPoint, R.TangentVector == R
  • Declaration

    public func gradient<T, U, V, R>(
      at x: T, _ y: U, _ z: V, in f: @differentiable (T, U, V) -> R
    ) -> (T.TangentVector, U.TangentVector, V.TangentVector)
      where R : FloatingPoint, R.TangentVector == R
  • Declaration

    public func valueWithDerivative<T: FloatingPoint, R>(
      at x: T, in f: @escaping @differentiable (T) -> R
    ) -> (value: R, derivative: R.TangentVector)
      where T.TangentVector == T
  • Declaration

    public func valueWithDerivative<T: FloatingPoint, U: FloatingPoint, R>(
      at x: T, _ y: U, in f: @escaping @differentiable (T, U) -> R
    ) -> (value: R, derivative: R.TangentVector)
      where T.TangentVector == T,
            U.TangentVector == U
  • Declaration

    public func valueWithDerivative<
      T: FloatingPoint, U: FloatingPoint, V: FloatingPoint, R>(
      at x: T, _ y: U, _ z: V, in f: @escaping @differentiable (T, U, V) -> R
    ) -> (value: R, derivative: R.TangentVector)
      where T.TangentVector == T,
            U.TangentVector == U,
            V.TangentVector == V
  • Declaration

    public func valueWithGradient<T, R>(
      at x: T, in f: @differentiable (T) -> R
    ) -> (value: R, gradient: T.TangentVector)
      where R : FloatingPoint, R.TangentVector == R
  • Declaration

    public func valueWithGradient<T, U, R>(
      at x: T, _ y: U, in f: @differentiable (T, U) -> R
    ) -> (value: R, gradient: (T.TangentVector, U.TangentVector))
      where R : FloatingPoint, R.TangentVector == R
  • Declaration

    public func valueWithGradient<T, U, V, R>(
      at x: T, _ y: U, _ z: V, in f: @differentiable (T, U, V) -> R
    ) -> (value: R,
          gradient: (T.TangentVector, U.TangentVector, V.TangentVector))
      where R : FloatingPoint, R.TangentVector == R
  • Declaration

    public func derivative<T: FloatingPoint, R>(
      of f: @escaping @differentiable (T) -> R
    ) -> (T) -> R.TangentVector
      where T.TangentVector == T
  • Declaration

    public func derivative<T: FloatingPoint, U: FloatingPoint, R>(
      of f: @escaping @differentiable (T, U) -> R
    ) -> (T, U) -> R.TangentVector
      where T.TangentVector == T,
            U.TangentVector == U
  • Declaration

    public func derivative<T: FloatingPoint, U: FloatingPoint, V: FloatingPoint, R>(
      of f: @escaping @differentiable (T, U, V) -> R
    ) -> (T, U, V) -> R.TangentVector
      where T.TangentVector == T,
            U.TangentVector == U,
            V.TangentVector == V
  • Declaration

    public func gradient<T, R>(
      of f: @escaping @differentiable (T) -> R
    ) -> (T) -> T.TangentVector
      where R : FloatingPoint, R.TangentVector == R
  • Declaration

    public func gradient<T, U, R>(
      of f: @escaping @differentiable (T, U) -> R
    ) -> (T, U) -> (T.TangentVector, U.TangentVector)
      where R : FloatingPoint, R.TangentVector == R
  • Declaration

    public func gradient<T, U, V, R>(
      of f: @escaping @differentiable (T, U, V) -> R
    ) -> (T, U, V) -> (T.TangentVector, U.TangentVector, V.TangentVector)
      where R : FloatingPoint, R.TangentVector == R
  • Declaration

    public func valueWithDerivative<T: FloatingPoint, R>(
      of f: @escaping @differentiable (T) -> R
    ) -> (T) -> (value: R, derivative: R.TangentVector)
      where T.TangentVector == T
  • Declaration

    public func valueWithDerivative<T: FloatingPoint, U: FloatingPoint, R>(
      of f: @escaping @differentiable (T, U) -> R
    ) -> (T, U) -> (value: R, derivative: R.TangentVector)
      where T.TangentVector == T,
            U.TangentVector == U
  • Declaration

    public func valueWithDerivative<
      T: FloatingPoint, U: FloatingPoint, V: FloatingPoint, R>(
      of f: @escaping @differentiable (T, U, V) -> R
    ) -> (T, U, V) -> (value: R, derivative: R.TangentVector)
      where T.TangentVector == T,
            U.TangentVector == U,
            V.TangentVector == V
  • Declaration

    public func valueWithGradient<T, R>(
      of f: @escaping @differentiable (T) -> R
    ) -> (T) -> (value: R, gradient: T.TangentVector)
      where R : FloatingPoint, R.TangentVector == R
  • Declaration

    public func valueWithGradient<T, U, R>(
      of f: @escaping @differentiable (T, U) -> R
    ) -> (T, U) -> (value: R, gradient: (T.TangentVector, U.TangentVector))
      where R : FloatingPoint, R.TangentVector == R
  • Declaration

    public func valueWithGradient<T, U, V, R>(
      of f: @escaping @differentiable (T, U, V) -> R
    ) -> (T, U, V)
      -> (value: R,
          gradient: (T.TangentVector, U.TangentVector, V.TangentVector))
      where R : FloatingPoint, R.TangentVector == R
  • Returns the L1 loss between predictions and expectations.

    Declaration

    @differentiable
    public func l1Loss<Scalar: TensorFlowFloatingPoint>(
        predicted: Tensor<Scalar>,
        expected: Tensor<Scalar>
    ) -> Tensor<Scalar>

    Parameters

    predicted

    Predicted outputs from a neural network.

    expected

    Expected values, i.e. targets, that correspond to the correct output.

  • Returns the L2 loss between predictions and expectations.

    Declaration

    @differentiable
    public func l2Loss<Scalar: TensorFlowFloatingPoint>(
        predicted: Tensor<Scalar>,
        expected: Tensor<Scalar>
    ) -> Tensor<Scalar>

    Parameters

    predicted

    Predicted outputs from a neural network.

    expected

    Expected values, i.e. targets, that correspond to the correct output.

  • Returns the hinge loss between predictions and expectations.

    Declaration

    @differentiable
    public func hingeLoss<Scalar: TensorFlowFloatingPoint>(
        predicted: Tensor<Scalar>,
        expected: Tensor<Scalar>
    ) -> Tensor<Scalar>

    Parameters

    predicted

    Predicted outputs from a neural network.

    expected

    Expected values, i.e. targets, that correspond to the correct output.

  • Returns the squared hinge loss between predictions and expectations.

    Declaration

    @differentiable
    public func squaredHingeLoss<Scalar: TensorFlowFloatingPoint>(
        predicted: Tensor<Scalar>,
        expected: Tensor<Scalar>
    ) -> Tensor<Scalar>

    Parameters

    predicted

    Predicted outputs from a neural network.

    expected

    Expected values, i.e. targets, that correspond to the correct output.

  • Returns the categorical hinge loss between predictions and expectations.

    Declaration

    @differentiable
    public func categoricalHingeLoss<Scalar: TensorFlowFloatingPoint>(
        predicted: Tensor<Scalar>,
        expected: Tensor<Scalar>
    ) -> Tensor<Scalar>

    Parameters

    predicted

    Predicted outputs from a neural network.

    expected

    Expected values, i.e. targets, that correspond to the correct output.

  • Returns the logarithm of the hyperbolic cosine of the error between predictions and expectations.

    Declaration

    @differentiable
    public func logCoshLoss<Scalar: TensorFlowFloatingPoint>(
        predicted: Tensor<Scalar>,
        expected: Tensor<Scalar>
    ) -> Tensor<Scalar>

    Parameters

    predicted

    Predicted outputs from a neural network.

    expected

    Expected values, i.e. targets, that correspond to the correct output.

  • Returns the Poisson loss between predictions and expectations.

    Declaration

    @differentiable
    public func poissonLoss<Scalar: TensorFlowFloatingPoint>(
        predicted: Tensor<Scalar>,
        expected: Tensor<Scalar>
    ) -> Tensor<Scalar>

    Parameters

    predicted

    Predicted outputs from a neural network.

    expected

    Expected values, i.e. targets, that correspond to the correct output.

  • Returns the Kullback-Leibler divergence (KL divergence) between between expectations and predictions. Given two distributions p and q, KL divergence computes p * log(p / q).

    Declaration

    @differentiable
    public func kullbackLeiblerDivergence<Scalar: TensorFlowFloatingPoint>(
        predicted: Tensor<Scalar>,
        expected: Tensor<Scalar>
    ) -> Tensor<Scalar>

    Parameters

    predicted

    Predicted outputs from a neural network.

    expected

    Expected values, i.e. targets, that correspond to the correct output.

  • Returns the softmax cross entropy (categorical cross entropy) between logits and labels.

    Declaration

    @differentiable
    public func softmaxCrossEntropy<Scalar: TensorFlowFloatingPoint>(
        logits: Tensor<Scalar>,
        probabilities: Tensor<Scalar>
    ) -> Tensor<Scalar>

    Parameters

    logits

    One-hot encoded outputs from a neural network.

    labels

    Indices (zero-indexed) of the correct outputs.

  • Returns the sigmoid cross entropy (binary cross entropy) between logits and labels.

    Declaration

    @differentiable
    public func sigmoidCrossEntropy<Scalar: TensorFlowFloatingPoint>(
        logits: Tensor<Scalar>,
        labels: Tensor<Scalar>
    ) -> Tensor<Scalar>

    Parameters

    logits

    The unscaled output of a neural network.

    labels

    Integer values that correspond to the correct output.

  • Returns a tensor with the same shape and scalars as the specified tensor.

    Declaration

    @differentiable
    public func identity<Scalar>(_ x: Tensor<Scalar>) -> Tensor<Scalar> where Scalar : TensorFlowScalar
  • Calls the given closure within a context that has everything identical to the current context except for the given learning phase.

    Declaration

    public func withContext<R>(_ context: Context, _ body: () throws -> R) rethrows -> R

    Parameters

    context

    A context that will be set before the closure gets called and restored after the closure returns.

    body

    A nullary closure. If the closure has a return value, that value is also used as the return value of the withContext(_:_:) function.

    Return Value

    The return value, if any, of the body closure.

  • Calls the given closure within a context that has everything identical to the current context except for the given learning phase.

    Declaration

    public func withLearningPhase<R>(
        _ learningPhase: LearningPhase,
        _ body: () throws -> R
    ) rethrows -> R

    Parameters

    learningPhase

    A learning phase that will be set before the closure gets called and restored after the closure returns.

    body

    A nullary closure. If the closure has a return value, that value is also used as the return value of the withLearningPhase(_:_:) function.

    Return Value

    The return value, if any, of the body closure.

  • Calls the given closure within a context that has everything identical to the current context except for the given random seed.

    Declaration

    public func withRandomSeedForTensorFlow<R>(
        _ randomSeed: TensorFlowSeed,
        _ body: () throws -> R
    ) rethrows -> R

    Parameters

    randomSeed

    A random seed that will be set before the closure gets called and restored after the closure returns.

    body

    A nullary closure. If the closure has a return value, that value is also used as the return value of the withRandomSeedForTensorFlow(_:_:) function.

    Return Value

    The return value, if any, of the body closure.

  • Calls the given closure within a context that has everything identical to the current context except for the given random number generator.

    Declaration

    public func withRandomNumberGeneratorForTensorFlow<G: RandomNumberGenerator, R>(
        _ randomNumberGenerator: inout G,
        _ body: () throws -> R
    ) rethrows -> R

    Parameters

    randomNumberGenerator

    A random number generator that will be set before the closure gets called and restored after the closure returns.

    body

    A nullary closure. If the closure has a return value, that value is also used as the return value of the withRandomNumberGeneratorForTensorFlow(_:_:) function.

    Return Value

    The return value, if any, of the body closure.

  • Executes a closure, making TensorFlow operations run on a specific kind of device.

    Declaration

    public func withDevice<R>(
        _ kind: DeviceKind,
        _ index: UInt = 0,
        perform body: () throws -> R
    ) rethrows -> R

    Parameters

    kind

    A kind of device to run TensorFlow operations on.

    index

    The device to run the ops on.

    body

    A closure whose TensorFlow operations are to be executed on the specified kind of device.

  • Executes a closure, making TensorFlow operations run on a device with a specific name.

    Some examples of device names:

    • /device:CPU:0: The CPU of your machine.
    • /GPU:0: Short-hand notation for the first GPU of your machine that is visible to TensorFlow
    • /job:localhost/replica:0/task:0/device:GPU:1: Fully qualified name of the second GPU of your machine that is visible to TensorFlow.

    Declaration

    public func withDevice<R>(named name: String, perform body: () throws -> R) rethrows -> R

    Parameters

    name

    Device name.

    body

    A closure whose TensorFlow operations are to be executed on the specified kind of device.

  • Executes a closure, allowing TensorFlow to place TensorFlow operations on any device. This should restore the default placement behavior.

    Declaration

    public func withDefaultDevice<R>(perform body: () throws -> R) rethrows -> R

    Parameters

    body

    A closure whose TensorFlow operations are to be executed on the specified kind of device.

  • Returns a function that creates a tensor by initializing all its values to zeros.

    Declaration

    public func zeros<Scalar>() -> ParameterInitializer<Scalar> where Scalar : TensorFlowFloatingPoint, Scalar : TensorFlowScalar
  • Returns a function that creates a tensor by performing Glorot uniform initialization for the specified shape, randomly sampling scalar values from a uniform distribution between -limit and limit, generated by the default random number generator, where limit is sqrt(6 / (fanIn + fanOut)), and fanIn/fanOut represent the number of input and output features multiplied by the receptive field, if present.

    Declaration

    public func glorotUniform<Scalar: TensorFlowFloatingPoint>(
        seed: TensorFlowSeed = Context.local.randomSeed
    ) -> ParameterInitializer<Scalar>
  • Returns the L1 loss between predictions and expectations.

    Declaration

    @differentiable
    public func l1Loss<Scalar: TensorFlowFloatingPoint>(
        predicted: Tensor<Scalar>,
        expected: Tensor<Scalar>,
        reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = { $0.sum() }
    ) -> Tensor<Scalar>

    Parameters

    predicted

    Predicted outputs from a neural network.

    expected

    Expected values, i.e. targets, that correspond to the correct output.

    reduction

    Reduction to apply on the computed element-wise loss values.

  • Returns the L2 loss between predictions and expectations.

    Declaration

    @differentiable
    public func l2Loss<Scalar: TensorFlowFloatingPoint>(
        predicted: Tensor<Scalar>,
        expected: Tensor<Scalar>,
        reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = { $0.sum() }
    ) -> Tensor<Scalar>

    Parameters

    predicted

    Predicted outputs from a neural network.

    expected

    Expected values, i.e. targets, that correspond to the correct output.

    reduction

    Reduction to apply on the computed element-wise loss values.

  • Returns the mean absolute error between predictions and expectations.

    Declaration

    @differentiable
    public func meanAbsoluteError<Scalar: TensorFlowFloatingPoint>(
        predicted: Tensor<Scalar>,
        expected: Tensor<Scalar>
    ) -> Tensor<Scalar>

    Parameters

    predicted

    Predicted outputs from a neural network.

    expected

    Expected values, i.e. targets, that correspond to the correct output.

  • Returns the mean squared error between predictions and expectations.

    Declaration

    @differentiable
    public func meanSquaredError<Scalar: TensorFlowFloatingPoint>(
        predicted: Tensor<Scalar>,
        expected: Tensor<Scalar>
    ) -> Tensor<Scalar>

    Parameters

    predicted

    Predicted outputs from a neural network.

    expected

    Expected values, i.e. targets, that correspond to the correct output.

  • Returns the mean squared logarithmic error between predictions and expectations.

    Note

    Negative tensor entries will be clamped at 0 to avoid undefined logarithmic behavior, as log(_:) is undefined for negative reals.

    Declaration

    @differentiable
    public func meanSquaredLogarithmicError<Scalar: TensorFlowFloatingPoint>(
        predicted: Tensor<Scalar>,
        expected: Tensor<Scalar>
    ) -> Tensor<Scalar>

    Parameters

    predicted

    Predicted outputs from a neural network.

    expected

    Expected values, i.e. targets, that correspond to the correct output.

  • Returns the mean absolute percentage error between predictions and expectations.

    Declaration

    @differentiable
    public func meanAbsolutePercentageError<Scalar: TensorFlowFloatingPoint>(
        predicted: Tensor<Scalar>,
        expected: Tensor<Scalar>
    ) -> Tensor<Scalar>

    Parameters

    predicted

    Predicted outputs from a neural network.

    expected

    Expected values, i.e. targets, that correspond to the correct output.

  • Returns the hinge loss between predictions and expectations.

    Declaration

    @differentiable
    public func hingeLoss<Scalar: TensorFlowFloatingPoint>(
        predicted: Tensor<Scalar>,
        expected: Tensor<Scalar>,
        reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = _mean
    ) -> Tensor<Scalar>

    Parameters

    predicted

    Predicted outputs from a neural network.

    expected

    Expected values, i.e. targets, that correspond to the correct output.

    reduction

    Reduction to apply on the computed element-wise loss values.

  • Returns the squared hinge loss between predictions and expectations.

    Declaration

    @differentiable
    public func squaredHingeLoss<Scalar: TensorFlowFloatingPoint>(
        predicted: Tensor<Scalar>,
        expected: Tensor<Scalar>,
        reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = _mean
    ) -> Tensor<Scalar>

    Parameters

    predicted

    Predicted outputs from a neural network.

    expected

    Expected values, i.e. targets, that correspond to the correct output.

    reduction

    Reduction to apply on the computed element-wise loss values.

  • Returns the hinge loss between predictions and expectations.

    Declaration

    @differentiable
    public func categoricalHingeLoss<Scalar: TensorFlowFloatingPoint>(
        predicted: Tensor<Scalar>,
        expected: Tensor<Scalar>,
        reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = _mean
    ) -> Tensor<Scalar>

    Parameters

    predicted

    Predicted outputs from a neural network.

    expected

    Expected values, i.e. targets, that correspond to the correct output.

    reduction

    Reduction to apply on the computed element-wise loss values.

  • Returns the logarithm of the hyperbolic cosine of the error between predictions and expectations.

    Declaration

    @differentiable
    public func logCoshLoss<Scalar: TensorFlowFloatingPoint>(
        predicted: Tensor<Scalar>,
        expected: Tensor<Scalar>,
        reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = _mean
    ) -> Tensor<Scalar>

    Parameters

    predicted

    Predicted outputs from a neural network.

    expected

    Expected values, i.e. targets, that correspond to the correct output.

    reduction

    Reduction to apply on the computed element-wise loss values.

  • Returns the Poisson loss between predictions and expectations.

    Declaration

    @differentiable
    public func poissonLoss<Scalar: TensorFlowFloatingPoint>(
        predicted: Tensor<Scalar>,
        expected: Tensor<Scalar>,
        reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = _mean
    ) -> Tensor<Scalar>

    Parameters

    predicted

    Predicted outputs from a neural network.

    expected

    Expected values, i.e. targets, that correspond to the correct output.

    reduction

    Reduction to apply on the computed element-wise loss values.

  • Returns the Kullback-Leibler divergence (KL divergence) between between expectations and predictions. Given two distributions p and q, KL divergence computes p * log(p / q).

    Declaration

    @differentiable
    public func kullbackLeiblerDivergence<Scalar: TensorFlowFloatingPoint>(
        predicted: Tensor<Scalar>,
        expected: Tensor<Scalar>,
        reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = { $0.sum() }
    ) -> Tensor<Scalar>

    Parameters

    predicted

    Predicted outputs from a neural network.

    expected

    Expected values, i.e. targets, that correspond to the correct output.

    reduction

    Reduction to apply on the computed element-wise loss values.

  • Returns the softmax cross entropy (categorical cross entropy) between logits and labels.

    Declaration

    @differentiable
    public func softmaxCrossEntropy<Scalar: TensorFlowFloatingPoint>(
        logits: Tensor<Scalar>,
        labels: Tensor<Int32>,
        reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = _mean
    ) -> Tensor<Scalar>

    Parameters

    logits

    One-hot encoded outputs from a neural network.

    labels

    Indices (zero-indexed) of the correct outputs.

    reduction

    Reduction to apply on the computed element-wise loss values.

  • Returns the softmax cross entropy (categorical cross entropy) between logits and labels.

    Declaration

    @differentiable
    public func softmaxCrossEntropy<Scalar: TensorFlowFloatingPoint>(
        logits: Tensor<Scalar>,
        probabilities: Tensor<Scalar>,
        reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = _mean
    ) -> Tensor<Scalar>

    Parameters

    logits

    Unscaled log probabilities from a neural network.

    probabilities

    Probability values that correspond to the correct output. Each row must be a valid probability distribution.

    reduction

    Reduction to apply on the computed element-wise loss values.

  • Returns the sigmoid cross entropy (binary cross entropy) between logits and labels.

    The reduction is reduced over all elements. If reduced over batch size is intended, please consider to scale the loss.

    Declaration

    @differentiable
    public func sigmoidCrossEntropy<Scalar: TensorFlowFloatingPoint>(
        logits: Tensor<Scalar>,
        labels: Tensor<Scalar>,
        reduction: @differentiable (Tensor<Scalar>) -> Tensor<Scalar> = _mean
    ) -> Tensor<Scalar>

    Parameters

    logits

    The unscaled output of a neural network.

    labels

    Integer values that correspond to the correct output.

    reduction

    Reduction to apply on the computed element-wise loss values.

  • Returns a 2-D convolution with the specified input, filter, strides, and padding.

    Precondition

    input must have rank 4.

    Precondition

    filter must have rank 4.

    Declaration

    @differentiable
    public func conv2D<Scalar: TensorFlowFloatingPoint>(
        _ input: Tensor<Scalar>,
        filter: Tensor<Scalar>,
        strides: (Int, Int, Int, Int) = (1, 1, 1, 1),
        padding: Padding = .valid,
        dilations: (Int, Int, Int, Int) = (1, 1, 1, 1)
    ) -> Tensor<Scalar>

    Parameters

    input

    The input.

    filter

    The convolution filter.

    strides

    The strides of the sliding filter for each dimension of the input.

    padding

    The padding for the operation

    dilations

    The dilation factor for each dimension of the input.

  • Returns a 3-D convolution with the specified input, filter, strides, and padding.

    Precondition

    input must have rank 5.

    Precondition

    filter must have rank 5.

    Declaration

    @differentiable
    public func conv3D<Scalar: TensorFlowFloatingPoint>(
        _ input: Tensor<Scalar>,
        filter: Tensor<Scalar>,
        strides: (Int, Int, Int, Int, Int) = (1, 1, 1, 1, 1),
        padding: Padding = .valid
    ) -> Tensor<Scalar>

    Parameters

    input

    The input.

    filter

    The convolution filter.

    strides

    The strides of the sliding filter for each dimension of the input.

    padding

    The padding for the operation.

  • Returns a 2-D depthwise convolution with the specified input, filter, strides, and padding.

    Precondition

    input must have rank 4.

    Precondition

    filter must have rank 4.

    Declaration

    @differentiable
    public func depthwiseConv2D<Scalar: TensorFlowFloatingPoint>(
        _ input: Tensor<Scalar>,
        filter: Tensor<Scalar>,
        strides: (Int, Int, Int, Int),
        padding: Padding
    ) -> Tensor<Scalar>

    Parameters

    input

    The input.

    filter

    The depthwise convolution filter.

    strides

    The strides of the sliding filter for each dimension of the input.

    padding

    The padding for the operation.

  • Returns a 2-D max pooling, with the specified filter sizes, strides, and padding.

    Declaration

    @differentiable
    public func maxPool2D<Scalar: TensorFlowFloatingPoint>(
        _ input: Tensor<Scalar>,
        filterSize: (Int, Int, Int, Int),
        strides: (Int, Int, Int, Int),
        padding: Padding
    ) -> Tensor<Scalar>

    Parameters

    input

    The input.

    filterSize

    The dimensions of the pooling kernel.

    strides

    The strides of the sliding filter for each dimension of the input.

    padding

    The padding for the operation.

  • Returns a 3-D max pooling, with the specified filter sizes, strides, and padding.

    Declaration

    @differentiable
    public func maxPool3D<Scalar: TensorFlowFloatingPoint>(
        _ input: Tensor<Scalar>,
        filterSize: (Int, Int, Int, Int, Int),
        strides: (Int, Int, Int, Int, Int),
        padding: Padding
    ) -> Tensor<Scalar>

    Parameters

    input

    The input.

    filterSize

    The dimensions of the pooling kernel.

    strides

    The strides of the sliding filter for each dimension of the input.

    padding

    The padding for the operation.

  • Returns a 2-D average pooling, with the specified filter sizes, strides, and padding.

    Declaration

    @differentiable
    public func avgPool2D<Scalar: TensorFlowFloatingPoint>(
        _ input: Tensor<Scalar>,
        filterSize: (Int, Int, Int, Int),
        strides: (Int, Int, Int, Int),
        padding: Padding
    ) -> Tensor<Scalar>

    Parameters

    input

    The input.

    filterSize

    The dimensions of the pooling kernel.

    strides

    The strides of the sliding filter for each dimension of the input.

    padding

    The padding for the operation.

  • Returns a 3-D average pooling, with the specified filter sizes, strides, and padding.

    Declaration

    @differentiable
    public func avgPool3D<Scalar: TensorFlowFloatingPoint>(
        _ input: Tensor<Scalar>,
        filterSize: (Int, Int, Int, Int, Int),
        strides: (Int, Int, Int, Int, Int),
        padding: Padding
    ) -> Tensor<Scalar>

    Parameters

    input

    The input.

    filterSize

    The dimensions of the pooling kernel.

    strides

    The strides of the sliding filter for each dimension of the input.

    padding

    The padding for the operation.