# Copyright (C) 2019-2022, François-Guillaume Fernandez.# This program is licensed under the Apache License 2.0.# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0> for full license details.fromtypingimportCallable,Optionalimporttorchfromtorch.optimimportAdamfrom.importfunctionalasF
[docs]classAdaBelief(Adam):"""Implements the AdaBelief optimizer from `"AdaBelief Optimizer: Adapting Stepsizes by the Belief in Observed Gradients" <https://arxiv.org/pdf/2010.07468.pdf>`_. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate betas (Tuple[float, float], optional): coefficients used for running averages (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) amsgrad (bool, optional): whether to use the AMSGrad variant (default: False) """@torch.no_grad()defstep(self,closure:Optional[Callable[[],float]]=None)->Optional[float]:"""Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """loss=NoneifclosureisnotNone:withtorch.enable_grad():loss=closure()forgroupinself.param_groups:params_with_grad=[]grads=[]exp_avgs=[]exp_avg_sqs=[]max_exp_avg_sqs=[]state_steps=[]forpingroup["params"]:ifp.gradisnotNone:params_with_grad.append(p)ifp.grad.is_sparse:raiseRuntimeError(f"{self.__class__.__name__} does not support sparse gradients")grads.append(p.grad)state=self.state[p]# Lazy state initializationiflen(state)==0:state["step"]=0# Exponential moving average of gradient valuesstate["exp_avg"]=torch.zeros_like(p,memory_format=torch.preserve_format)# Exponential moving average of squared gradient valuesstate["exp_avg_sq"]=torch.zeros_like(p,memory_format=torch.preserve_format)ifgroup["amsgrad"]:# Maintains max of all exp. moving avg. of sq. grad. valuesstate["max_exp_avg_sq"]=torch.zeros_like(p,memory_format=torch.preserve_format)exp_avgs.append(state["exp_avg"])exp_avg_sqs.append(state["exp_avg_sq"])ifgroup["amsgrad"]:max_exp_avg_sqs.append(state["max_exp_avg_sq"])# update the steps for each param group updatestate["step"]+=1# record the step after step updatestate_steps.append(state["step"])beta1,beta2=group["betas"]F.adabelief(params_with_grad,grads,exp_avgs,exp_avg_sqs,max_exp_avg_sqs,state_steps,group["amsgrad"],beta1,beta2,group["lr"],group["weight_decay"],group["eps"],)returnloss