Source code for holocron.optim.lars

# Copyright (C) 2019-2022, François-Guillaume Fernandez.

# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0> for full license details.

from typing import Callable, Dict, Iterable, Optional, Tuple

import torch
from torch.optim.optimizer import Optimizer


[docs] class Lars(Optimizer): r"""Implements the LARS optimizer from `"Large batch training of convolutional networks" <https://arxiv.org/pdf/1708.03888.pdf>`_. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate momentum (float, optional): momentum factor (default: 0) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) dampening (float, optional): dampening for momentum (default: 0) nesterov (bool, optional): enables Nesterov momentum (default: False) scale_clip (tuple, optional): the lower and upper bounds for the weight norm in local LR of LARS """ def __init__( self, params: Iterable[torch.nn.Parameter], # type: ignore[name-defined] lr: float = 1e-3, momentum: float = 0.0, dampening: float = 0.0, weight_decay: float = 0.0, nesterov: bool = False, scale_clip: Optional[Tuple[float, float]] = None, ) -> None: if not isinstance(lr, float) or lr < 0.0: raise ValueError(f"Invalid learning rate: {lr}") if momentum < 0.0: raise ValueError(f"Invalid momentum value: {momentum}") if weight_decay < 0.0: raise ValueError(f"Invalid weight_decay value: {weight_decay}") defaults = dict(lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov) if nesterov and (momentum <= 0 or dampening != 0): raise ValueError("Nesterov momentum requires a momentum and zero dampening") super(Lars, self).__init__(params, defaults) # LARS arguments self.scale_clip = scale_clip if self.scale_clip is None: self.scale_clip = (0.0, 10.0) def __setstate__(self, state: Dict[str, torch.Tensor]): super(Lars, self).__setstate__(state) for group in self.param_groups: group.setdefault("nesterov", False) @torch.no_grad() def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]: """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: weight_decay = group["weight_decay"] momentum = group["momentum"] dampening = group["dampening"] nesterov = group["nesterov"] for p in group["params"]: if p.grad is None: continue d_p = p.grad.data if weight_decay != 0: d_p.add_(p.data, alpha=weight_decay) if momentum != 0: param_state = self.state[p] if "momentum_buffer" not in param_state: buf = param_state["momentum_buffer"] = torch.clone(d_p).detach() else: buf = param_state["momentum_buffer"] buf.mul_(momentum).add_(d_p, alpha=1 - dampening) if nesterov: d_p = d_p.add(buf, alpha=momentum) else: d_p = buf # LARS p_norm = p.data.pow(2).sum().sqrt() update_norm = d_p.pow(2).sum().sqrt() # Compute the local LR if p_norm == 0 or update_norm == 0: local_lr = 1 else: local_lr = p_norm / update_norm p.data.add_(d_p, alpha=-group["lr"] * local_lr) return loss