public class Adam<Model: Layer, Scalar: TensorFlowFloatingPoint>: Optimizer
    where Model.AllDifferentiableVariables == Model.CotangentVector

Adam optimizer.

Reference: Adam - A Method for Stochastic Optimization

  • The learning rate.

    Declaration

    public let learningRate: Scalar
  • A coefficient used to calculate the first and second moments of gradients.

    Declaration

    public var beta1: Scalar
  • A coefficient used to calculate the first and second moments of gradients.

    Declaration

    public var beta2: Scalar
  • A small scalar added to the denominator to improve numerical stability.

    Declaration

    public let epsilon: Scalar
  • The weight decay.

    Declaration

    public let decay: Scalar
  • Declaration

    public init(
        learningRate: Scalar = 1e-3,
        beta1: Scalar = 0.9,
        beta2: Scalar = 0.999,
        epsilon: Scalar = 1e-8,
        decay: Scalar = 0
    )
  • Declaration

    public convenience init(
        for _: __shared Model,
        learningRate: Scalar = 1e-3,
        beta1: Scalar = 0.9,
        beta2: Scalar = 0.999,
        epsilon: Scalar = 1e-8,
        decay: Scalar = 0,
        scalarType: Scalar.Type
    )
  • Declaration

    public func update(_ model: inout Model.AllDifferentiableVariables,
                       along direction: Model.AllDifferentiableVariables)