B
    ӻdwO                 @   s   d Z ddlmZ ddlmZ ddlmZ ddlmZ ddlm	Z	 ddl
mZ ddl
mZ dd	l
mZ dd
l
mZ ddlmZ ddlmZ edG dd de	jZG dd de	jZdS )zAdam optimizer implementation.    )def_function)indexed_slices)ops)backend_config)optimizer_v2)	array_ops)control_flow_ops)math_ops)	state_ops)gen_training_ops)keras_exportzkeras.optimizers.Adamc                   sf   e Zd ZdZdZd fdd		Zd
d Z fddZ fddZdddZ	dddZ
 fddZ  ZS )Adama  Optimizer that implements the Adam algorithm.

  Adam optimization is a stochastic gradient descent method that is based on
  adaptive estimation of first-order and second-order moments.

  According to
  [Kingma et al., 2014](http://arxiv.org/abs/1412.6980),
  the method is "*computationally
  efficient, has little memory requirement, invariant to diagonal rescaling of
  gradients, and is well suited for problems that are large in terms of
  data/parameters*".

  Args:
    learning_rate: A `Tensor`, floating point value, or a schedule that is a
      `tf.keras.optimizers.schedules.LearningRateSchedule`, or a callable
      that takes no arguments and returns the actual value to use, The
      learning rate. Defaults to 0.001.
    beta_1: A float value or a constant float tensor, or a callable
      that takes no arguments and returns the actual value to use. The
      exponential decay rate for the 1st moment estimates. Defaults to 0.9.
    beta_2: A float value or a constant float tensor, or a callable
      that takes no arguments and returns the actual value to use, The
      exponential decay rate for the 2nd moment estimates. Defaults to 0.999.
    epsilon: A small constant for numerical stability. This epsilon is
      "epsilon hat" in the Kingma and Ba paper (in the formula just before
      Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults to
      1e-7.
    amsgrad: Boolean. Whether to apply AMSGrad variant of this algorithm from
      the paper "On the Convergence of Adam and beyond". Defaults to `False`.
    name: Optional name for the operations created when applying gradients.
      Defaults to `"Adam"`.
    **kwargs: Keyword arguments. Allowed to be one of
      `"clipnorm"` or `"clipvalue"`.
      `"clipnorm"` (float) clips gradients by norm; `"clipvalue"` (float) clips
      gradients by value.

  Usage:

  >>> opt = tf.keras.optimizers.Adam(learning_rate=0.1)
  >>> var1 = tf.Variable(10.0)
  >>> loss = lambda: (var1 ** 2)/2.0       # d(loss)/d(var1) == var1
  >>> step_count = opt.minimize(loss, [var1]).numpy()
  >>> # The first step is `-learning_rate*sign(grad)`
  >>> var1.numpy()
  9.9

  Reference:
    - [Kingma et al., 2014](http://arxiv.org/abs/1412.6980)
    - [Reddi et al., 2018](
        https://openreview.net/pdf?id=ryQu7f-RZ) for `amsgrad`.

  Notes:

  The default value of 1e-7 for epsilon might not be a good default in
  general. For example, when training an Inception network on ImageNet a
  current good choice is 1.0 or 0.1. Note that since Adam uses the
  formulation just before Section 2.1 of the Kingma and Ba paper rather than
  the formulation in Algorithm 1, the "epsilon" referred to here is "epsilon
  hat" in the paper.

  The sparse implementation of this algorithm (used when the gradient is an
  IndexedSlices object, typically because of `tf.gather` or an embedding
  lookup in the forward pass) does apply momentum to variable slices even if
  they were not used in the forward pass (meaning they have a gradient equal
  to zero). Momentum decay (beta1) is also applied to the entire momentum
  accumulator. This means that the sparse behavior is equivalent to the dense
  behavior (in contrast to some momentum implementations which ignore momentum
  unless a variable slice was actually used).
  TMbP??+?Hz>Fc                sf   t t| j|f| | d|d| | d| j | d| | d| |pXt | _|| _d S )Nlearning_ratelrdecaybeta_1beta_2)	superr   __init__
_set_hyperget_initial_decayr   epsilonamsgrad)selfr   r   r   r   r   namekwargs)	__class__ [/var/www/html/venv/lib/python3.7/site-packages/tensorflow/python/keras/optimizer_v2/adam.pyr   i   s    zAdam.__init__c             C   sX   x|D ]}|  |d qW x|D ]}|  |d q W | jrTx|D ]}|  |d q@W d S )Nmvvhat)add_slotr   )r   var_listvarr"   r"   r#   _create_slotsy   s    


zAdam._create_slotsc       
         s   t t| ||| t| jd |}t| d|}t| d|}t	||}t	||}|||f d t
d| d|   }	|||f t|	t| j|||d| ||d| d d S )N   r   r   lr_t)r   r   beta_1_tbeta_1_powerone_minus_beta_1_tbeta_2_tbeta_2_powerone_minus_beta_2_t)r   r   _prepare_localr	   cast
iterationsr   identity
_get_hyperpowsqrtupdatedictr   "convert_to_tensor_v2_with_dispatchr   )
r   
var_device	var_dtypeapply_state
local_stepr-   r0   r.   r1   r   )r!   r"   r#   r3      s&    zAdam._prepare_localc                sR   | j }tt|d d }t|d| d kr>|d t| }tt| | d S )Nr+         )weightsintlenr   r   set_weights)r   rC   paramsnum_vars)r!   r"   r#   rF      s
    zAdam.set_weightsNc       
      C   s   |j |jj }}|pi ||fp,| ||}| |d}| |d}| jstj|j	|j	|j	|d |d |d |d |d |d || j
d	S | |d
}	tj|j	|j	|j	|	j	|d |d |d |d |d |d || j
dS d S )Nr$   r%   r.   r1   r,   r-   r0   r   )r)   r$   r%   beta1_powerbeta2_powerr   beta1beta2r   graduse_lockingr&   )r)   r$   r%   r&   rI   rJ   r   rK   rL   r   rM   rN   )devicedtype
base_dtyper   _fallback_apply_stateget_slotr   r   ZResourceApplyAdamhandle_use_lockingZResourceApplyAdamWithAmsgrad)
r   rM   r)   r?   r=   r>   coefficientsr$   r%   r&   r"   r"   r#   _resource_apply_dense   s@    
zAdam._resource_apply_densec          	   C   s  |j |jj }}|pi ||fp,| ||}| |d}||d  }	tj|||d  | jd}
t	
|
g | |||	}
W d Q R X | |d}|| |d  }tj|||d  | jd}t	
|g | |||}W d Q R X | js*t|}tj||d |
 ||d	   | jd}tj||
|g S | |d
}t||}t	
|g tj||| jd}W d Q R X t|}tj||d |
 ||d	   | jd}tj||
||g S d S )Nr$   r/   r-   )rN   r%   r2   r0   r   r   r&   )rO   rP   rQ   r   rR   rS   r
   assignrU   r   Zcontrol_dependenciesZ_resource_scatter_addr   r	   r9   
assign_subr   groupmaximum)r   rM   r)   indicesr?   r=   r>   rV   r$   m_scaled_g_valuesZm_tr%   v_scaled_g_valuesZv_tZv_sqrtZ
var_updatev_hatZv_hat_tZ
v_hat_sqrtr"   r"   r#   _resource_apply_sparse   s@    





zAdam._resource_apply_sparsec          	      sB   t t|  }|| d| j| d| d| j| jd |S )Nr   r   r   )r   r   r   r   r   r   )r   r   
get_configr:   _serialize_hyperparameterr   r   r   )r   config)r!   r"   r#   ra      s    zAdam.get_config)r   r   r   r   Fr   )N)N)__name__
__module____qualname____doc___HAS_AGGREGATE_GRADr   r*   r3   rF   rW   r`   ra   __classcell__r"   r"   )r!   r#   r      s   F     


%
(r   c                   s~   e Zd ZdZdZd fd	d
	Zdd Z fddZ fddZe	j
dddddZe	j
dddddZ fddZ  ZS )NonFusedAdama  Optimizer that implements the Adam algorithm without fused kernels.

  Adam optimization is a stochastic gradient descent method that is based on
  adaptive estimation of first-order and second-order moments.
  According to the paper
  [Adam: A Method for Stochastic Optimization. Kingma et al.,
  2014](http://arxiv.org/abs/1412.6980), the method is "*computationally
  efficient, has little memory requirement, invariant to diagonal rescaling of
  gradients, and is well suited for problems that are large in terms of
  data/parameters*".

  For AMSGrad see [On The Convergence Of Adam And Beyond.
  Reddi et al., 5-8](https://openreview.net/pdf?id=ryQu7f-RZ).

  **If amsgrad = False**:

  initialize $m_0$ as 1st moment vector
  initialize $v_0$ as 2nd moment vector

  The update rule for $\theta$ with gradient $g$ uses an optimization
  described at the end of section 2 of the paper:

  $$lr_t = \mathrm{learning\_rate} *
    \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$
  $$m_t = \beta_1 * m_{t-1} + (1 - \beta_1) * g$$
  $$v_t = \beta_2 * v_{t-1} + (1 - \beta_2) * g^2$$
  $$\theta_t = \theta_{t-1} - lr_t * m_t / (\sqrt{v_t} + \epsilon)$$

  **If amsgrad = True**:

  initialize $m_0$ as 1st moment vector
  initialize $v_0$ as 2nd moment vector
  initialize $\hat{v}_0$ as 2nd moment vector

  The update rule for $\theta$ with gradient $g$ uses an optimization
  described at the end of section 2 of the paper:

  $$lr_t = \mathrm{learning\_rate} *
    \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$

  $$m_t = \beta_1 * m_{t-1} + (1 - \beta_1) * g$$
  $$v_t = \beta_2 * v_{t-1} + (1 - \beta_2) * g^2$$
  $$\hat{v}_t = \max(\hat{v}_{t-1}, v_t)$$
  $$\theta_t = \theta_{t-1} - lr_t * m_t / (\sqrt{\hat{v}_t} + \epsilon)$$

  The default value of 1e-7 for epsilon might not be a good default in
  general. For example, when training an Inception network on ImageNet a
  current good choice is 1.0 or 0.1. Note that since Adam uses the
  formulation just before Section 2.1 of the Kingma and Ba paper rather than
  the formulation in Algorithm 1, the "epsilon" referred to here is "epsilon
  hat" in the paper.

  The sparse implementation of this algorithm (used when the gradient is an
  IndexedSlices object, typically because of `tf.gather` or an embedding
  lookup in the forward pass) does apply momentum to variable slices even if
  they were not used in the forward pass (meaning they have a gradient equal
  to zero). Momentum decay (beta1) is also applied to the entire momentum
  accumulator. This means that the sparse behavior is equivalent to the dense
  behavior (in contrast to some momentum implementations which ignore momentum
  unless a variable slice was actually used).

  Usage:

  >>> opt = tf.keras.optimizers.Adam(learning_rate=0.1)
  >>> var1 = tf.Variable(10.0)
  >>> loss = lambda: (var1 ** 2)/2.0       # d(loss)/d(var1) == var1
  >>> step_count = opt.minimize(loss, [var1]).numpy()
  >>> # The first step is `-learning_rate*sign(grad)`
  >>> var1.numpy()
  9.9
  TMbP??+?Hz>Fr   c                sf   t t| j|f| | d|d| | d| j | d| | d| |pXt | _|| _dS )au  Construct a new Adam optimizer.

    Args:
      learning_rate: A `Tensor`, floating point value, or a schedule that is a
        `tf.keras.optimizers.schedules.LearningRateSchedule`, or a callable that
        takes no arguments and returns the actual value to use, The learning
        rate. Defaults to 0.001.
      beta_1: A float value or a constant float tensor, or a callable that takes
        no arguments and returns the actual value to use. The exponential decay
        rate for the 1st moment estimates. Defaults to 0.9.
      beta_2: A float value or a constant float tensor, or a callable that takes
        no arguments and returns the actual value to use, The exponential decay
        rate for the 2nd moment estimates. Defaults to 0.999.
      epsilon: A small constant for numerical stability. This epsilon is
        "epsilon hat" in the Kingma and Ba paper (in the formula just before
        Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults to
        1e-7.
      amsgrad: Boolean. Whether to apply AMSGrad variant of this algorithm from
        the paper "On the Convergence of Adam and beyond". Defaults to `False`.
      name: Optional name for the operations created when applying gradients.
        Defaults to "Adam".
      **kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`,
        `decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip
        gradients by value, `decay` is included for backward compatibility to
        allow time inverse decay of learning rate. `lr` is included for backward
        compatibility, recommended to use `learning_rate` instead.
    r   r   r   r   r   N)	r   rj   r   r   r   r   r   r   r   )r   r   r   r   r   r   r   r    )r!   r"   r#   r   I  s    $zNonFusedAdam.__init__c             C   sX   x|D ]}|  |d qW x|D ]}|  |d q W | jrTx|D ]}|  |d q@W d S )Nr$   r%   r&   )r'   r   )r   r(   r)   r"   r"   r#   r*   u  s    


zNonFusedAdam._create_slotsc       
         s   t t| ||| t| jd |}t| d|}t| d|}t	||}t	||}|||f d t
d| d|   }	|||f t|	t| j|||d| ||d| d d S )Nr+   r   r   r,   )r   r   r-   r.   r/   r0   r1   r2   )r   rj   r3   r	   r4   r5   r   r6   r7   r8   r9   r:   r;   r   r<   r   )
r   r=   r>   r?   r@   r-   r0   r.   r1   r   )r!   r"   r#   r3     s&    zNonFusedAdam._prepare_localc                sR   | j }tt|d d }t|d| d kr>|d t| }tt| | d S )Nr+   rA   rB   )rC   rD   rE   r   rj   rF   )r   rC   rG   rH   )r!   r"   r#   rF     s
    zNonFusedAdam.set_weights)Zjit_compileNc             C   s   |j |jj }}|pi ||fp,| ||}| |d}| |d}|d td|d   d|d   }	||| d|d    |t	|| d|d    | j
r| |d	}
|
t|
| |
}|||	 t||d
    d S )Nr$   r%   r,   r+   r1   r.   r-   r0   r&   r   )rO   rP   rQ   r   rR   rS   r	   r9   Z
assign_addZsquarer   rX   r[   rY   )r   rM   r)   r?   r=   r>   rV   r$   r%   alphar&   r"   r"   r#   rW     s     z"NonFusedAdam._resource_apply_densec             C   s  |j |jj }}|pi ||fp,| ||}| |d}||d  }	|||d   |t	|	| | |d}
|| |d  }|
|
|d   |
t	|| | j
s||d | t|
|d    nB| |d	}|t||
 ||d | t||d    d S )
Nr$   r/   r-   r%   r2   r0   r   r   r&   )rO   rP   rQ   r   rR   rS   rX   Zscatter_addr   ZIndexedSlicesr   rY   r	   r9   r[   )r   rM   r)   r\   r?   r=   r>   rV   r$   r]   r%   r^   r_   r"   r"   r#   r`     s$    z#NonFusedAdam._resource_apply_sparsec          	      sB   t t|  }|| d| j| d| d| j| jd |S )Nr   r   r   )r   r   r   r   r   r   )r   rj   ra   r:   rb   r   r   r   )r   rc   )r!   r"   r#   ra     s    zNonFusedAdam.get_config)rk   rl   rm   rn   Fr   )N)N)rd   re   rf   rg   rh   r   r*   r3   rF   r   functionrW   r`   ra   ri   r"   r"   )r!   r#   rj      s    G     &


rj   N)rg   Ztensorflow.python.eagerr   Ztensorflow.python.frameworkr   r   Ztensorflow.python.kerasr   Z$tensorflow.python.keras.optimizer_v2r   Ztensorflow.python.opsr   r   r	   r
   Ztensorflow.python.trainingr   Z tensorflow.python.util.tf_exportr   ZOptimizerV2r   rj   r"   r"   r"   r#   <module>   s   
 _