public class SGD<Model: Layer, Scalar: TensorFlowFloatingPoint>: Optimizer
    where Model.AllDifferentiableVariables == Model.CotangentVector

Stochastic gradient descent (SGD) optimizer.

An optimizer that implements stochastic gradient descent, with support for momentum, learning rate decay, and Nesterov momentum.

  • The learning rate.

    Declaration

    public let learningRate: Scalar
  • The momentum factor. It accelerates stochastic gradient descent in the relevant direction and dampens oscillations.

    Declaration

    public let momentum: Scalar
  • The weight decay.

    Declaration

    public let decay: Scalar
  • Use Neseterov momentum if true.

    Declaration

    public let nesterov: Bool
  • Declaration

    public init(
        learningRate: Scalar = 0.01,
        momentum: Scalar = 0,
        decay: Scalar = 0,
        nesterov: Bool = false
    )
  • Declaration

    public convenience init(
        for _: __shared Model,
        learningRate: Scalar = 0.01,
        momentum: Scalar = 0,
        decay: Scalar = 0,
        nesterov: Bool = false,
        scalarType: Scalar.Type
    )
  • Declaration

    public func update(_ model: inout Model.AllDifferentiableVariables,
                       along direction: Model.CotangentVector)