Module: tfm.optimization.optimizer_factory

Optimizer factory class.

Classes

class OptimizerFactory: Optimizer factory class.

Functions

register_optimizer_cls(...): Register customize optimizer cls.

LR_CLS

{
 'cosine': <class 'official.modeling.optimization.lr_schedule.CosineDecayWithOffset'>,
 'exponential': <class 'official.modeling.optimization.lr_schedule.ExponentialDecayWithOffset'>,
 'polynomial': <class 'official.modeling.optimization.lr_schedule.PolynomialDecayWithOffset'>,
 'power': <class 'official.modeling.optimization.lr_schedule.DirectPowerDecay'>,
 'power_linear': <class 'official.modeling.optimization.lr_schedule.PowerAndLinearDecay'>,
 'power_with_offset': <class 'official.modeling.optimization.lr_schedule.PowerDecayWithOffset'>,
 'step_cosine_with_offset': <class 'official.modeling.optimization.lr_schedule.StepCosineDecayWithOffset'>,
 'stepwise': <class 'official.modeling.optimization.lr_schedule.PiecewiseConstantDecayWithOffset'>
}

OPTIMIZERS_CLS

{
 'adafactor': 'Unimplemented',
 'adagrad': <class 'keras.optimizers.optimizer_v2.adagrad.Adagrad'>,
 'adam': <class 'keras.optimizers.optimizer_v2.adam.Adam'>,
 'adamw': <class 'official.nlp.optimization.AdamWeightDecay'>,
 'lamb': <class 'tensorflow_addons.optimizers.lamb.LAMB'>,
 'lars': <class 'official.modeling.optimization.lars_optimizer.LARS'>,
 'rmsprop': <class 'keras.optimizers.optimizer_v2.rmsprop.RMSprop'>,
 'sgd': <class 'keras.optimizers.optimizer_v2.gradient_descent.SGD'>,
 'slide': 'Unimplemented'
}

WARMUP_CLS

{
 'linear': <class 'official.modeling.optimization.lr_schedule.LinearWarmup'>,
 'polynomial': <class 'official.modeling.optimization.lr_schedule.PolynomialWarmUp'>
}