B
    ӻd7                 @   s   d Z ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlm	Z	 ddl
mZ dd	lmZ dd
lmZ ddlmZ dd ZedG dd de	ZedG dd de	ZedG dd de	ZedG dd de	Zdd ZedG dd de	ZedG d d! d!e	Zd"S )#z(Layers that act as activation functions.    )dtypes)backend)constraints)initializers)regularizers)Layer)	InputSpec)tf_utils)math_ops)keras_exportc               C   s   t  S )N)globals r   r   e/var/www/html/venv/lib/python3.7/site-packages/tensorflow/python/keras/layers/advanced_activations.pyget_globals   s    r   zkeras.layers.LeakyReLUc                   sD   e Zd ZdZd fdd	Zdd Z fddZejd	d
 Z	  Z
S )	LeakyReLUa  Leaky version of a Rectified Linear Unit.

  It allows a small gradient when the unit is not active:

  ```
    f(x) = alpha * x if x < 0
    f(x) = x if x >= 0
  ```

  Usage:

  >>> layer = tf.keras.layers.LeakyReLU()
  >>> output = layer([-3.0, -1.0, 0.0, 2.0])
  >>> list(output.numpy())
  [-0.9, -0.3, 0.0, 2.0]
  >>> layer = tf.keras.layers.LeakyReLU(alpha=0.1)
  >>> output = layer([-3.0, -1.0, 0.0, 2.0])
  >>> list(output.numpy())
  [-0.3, -0.1, 0.0, 2.0]

  Input shape:
    Arbitrary. Use the keyword argument `input_shape`
    (tuple of integers, does not include the batch axis)
    when using this layer as the first layer in a model.

  Output shape:
    Same shape as the input.

  Args:
    alpha: Float >= 0. Negative slope coefficient. Default to 0.3.

  333333?c                s<   t t| jf | |d kr&td| d| _t|| _d S )NzKThe alpha value of a Leaky ReLU layer cannot be None, needs a float. Got %sT)superr   __init__
ValueErrorsupports_maskingr   cast_to_floatxalpha)selfr   kwargs)	__class__r   r   r   E   s    zLeakyReLU.__init__c             C   s   t j|| jdS )N)r   )r   relur   )r   inputsr   r   r   callN   s    zLeakyReLU.callc                s8   dt | ji}tt|  }tt| t|  S )Nr   )floatr   r   r   
get_configdictlistitems)r   configbase_config)r   r   r   r   Q   s    zLeakyReLU.get_configc             C   s   |S )Nr   )r   input_shaper   r   r   compute_output_shapeV   s    zLeakyReLU.compute_output_shape)r   )__name__
__module____qualname____doc__r   r   r   r	   shape_type_conversionr&   __classcell__r   r   )r   r   r   "   s
   !	r   zkeras.layers.PReLUc                   sR   e Zd ZdZd fdd	Zejdd Zdd	 Z fd
dZ	ejdd Z
  ZS )PReLUa  Parametric Rectified Linear Unit.

  It follows:

  ```
    f(x) = alpha * x for x < 0
    f(x) = x for x >= 0
  ```

  where `alpha` is a learned array with the same shape as x.

  Input shape:
    Arbitrary. Use the keyword argument `input_shape`
    (tuple of integers, does not include the samples axis)
    when using this layer as the first layer in a model.

  Output shape:
    Same shape as the input.

  Args:
    alpha_initializer: Initializer function for the weights.
    alpha_regularizer: Regularizer for the weights.
    alpha_constraint: Constraint for the weights.
    shared_axes: The axes along which to share learnable
      parameters for the activation function.
      For example, if the incoming feature maps
      are from a 2D convolution
      with output shape `(batch, height, width, channels)`,
      and you wish to share parameters across space
      so that each filter only has one set of parameters,
      set `shared_axes=[1, 2]`.
  zerosNc                sr   t t| jf | d| _t|| _t|| _t	|| _
|d krLd | _n"t|ttfsd|g| _n
t|| _d S )NT)r   r-   r   r   r   getalpha_initializerr   alpha_regularizerr   alpha_constraintshared_axes
isinstancer!   tuple)r   r0   r1   r2   r3   r   )r   r   r   r   ~   s    
zPReLU.__init__c             C   s   t |dd  }| jd k	r6x| jD ]}d||d < q"W | j|d| j| j| jd| _i }| jrx,tdt|D ]}|| jkrl|| ||< qlW t	t||d| _
d| _d S )N   r   )shapenameZinitializerZregularizer
constraint)ndimaxesT)r!   r3   Z
add_weightr0   r1   r2   r   rangelenr   Z
input_specZbuilt)r   r%   Zparam_shapeir;   r   r   r   build   s"    

zPReLU.buildc             C   s&   t |}| j t |  }|| S )N)r   r   r   )r   r   posnegr   r   r   r      s    
z
PReLU.callc                sR   t | jt| jt| j| jd}tt	| 
 }tt| t|  S )N)r0   r1   r2   r3   )r   	serializer0   r   r1   r   r2   r3   r   r-   r   r    r!   r"   )r   r#   r$   )r   r   r   r      s    



zPReLU.get_configc             C   s   |S )Nr   )r   r%   r   r   r   r&      s    zPReLU.compute_output_shape)r.   NNN)r'   r(   r)   r*   r   r	   r+   r?   r   r   r&   r,   r   r   )r   r   r-   [   s   !   
r-   zkeras.layers.ELUc                   sD   e Zd ZdZd fdd	Zdd Z fddZejd	d
 Z	  Z
S )ELUa  Exponential Linear Unit.

  It follows:

  ```
    f(x) =  alpha * (exp(x) - 1.) for x < 0
    f(x) = x for x >= 0
  ```

  Input shape:
    Arbitrary. Use the keyword argument `input_shape`
    (tuple of integers, does not include the samples axis)
    when using this layer as the first layer in a model.

  Output shape:
    Same shape as the input.

  Args:
    alpha: Scale for the negative factor.
        ?c                s<   t t| jf | |d kr&td| d| _t|| _d S )Nz>Alpha of an ELU layer cannot be None, requires a float. Got %sT)r   rC   r   r   r   r   r   r   )r   r   r   )r   r   r   r      s    zELU.__init__c             C   s   t || jS )N)r   Zelur   )r   r   r   r   r   r      s    zELU.callc                s8   dt | ji}tt|  }tt| t|  S )Nr   )r   r   r   rC   r   r    r!   r"   )r   r#   r$   )r   r   r   r      s    zELU.get_configc             C   s   |S )Nr   )r   r%   r   r   r   r&      s    zELU.compute_output_shape)rD   )r'   r(   r)   r*   r   r   r   r	   r+   r&   r,   r   r   )r   r   rC      s
   rC   zkeras.layers.ThresholdedReLUc                   sD   e Zd ZdZd fdd	Zdd Z fddZejd	d
 Z	  Z
S )ThresholdedReLUa  Thresholded Rectified Linear Unit.

  It follows:

  ```
    f(x) = x for x > theta
    f(x) = 0 otherwise`
  ```

  Input shape:
    Arbitrary. Use the keyword argument `input_shape`
    (tuple of integers, does not include the samples axis)
    when using this layer as the first layer in a model.

  Output shape:
    Same shape as the input.

  Args:
    theta: Float >= 0. Threshold location of activation.
        ?c                sP   t t| jf | |d kr&td| |dk r:td| d| _t|| _d S )NzJTheta of a Thresholded ReLU layer cannot be None, requires a float. Got %sr   zAThe theta value of a Thresholded ReLU layer should be >=0, got %sT)r   rE   r   r   r   r   r   theta)r   rG   r   )r   r   r   r      s    zThresholdedReLU.__init__c             C   s*   t | j|j}|t t |||j S )N)r
   castrG   dtypeZgreater)r   r   rG   r   r   r   r     s    zThresholdedReLU.callc                s8   dt | ji}tt|  }tt| t|  S )NrG   )r   rG   r   rE   r   r    r!   r"   )r   r#   r$   )r   r   r   r     s    zThresholdedReLU.get_configc             C   s   |S )Nr   )r   r%   r   r   r   r&     s    z$ThresholdedReLU.compute_output_shape)rF   )r'   r(   r)   r*   r   r   r   r	   r+   r&   r,   r   r   )r   r   rE      s
   rE   c             C   s   | t jkrt jjS dS )a	  Large negative number as Tensor.

  This function is necessary because the standard value for epsilon
  in this module (-1e9) cannot be represented using tf.float16

  Args:
    tensor_type: a dtype to determine the type.

  Returns:
    a large negative number.
  g    e)r   Zfloat16min)Ztensor_typer   r   r   _large_compatible_negative  s    
rK   zkeras.layers.Softmaxc                   sF   e Zd ZdZd fdd	ZdddZ fdd	Zejd
d Z	  Z
S )Softmaxa  Softmax activation function.

  Example without mask:

  >>> inp = np.asarray([1., 2., 1.])
  >>> layer = tf.keras.layers.Softmax()
  >>> layer(inp).numpy()
  array([0.21194157, 0.5761169 , 0.21194157], dtype=float32)
  >>> mask = np.asarray([True, False, True], dtype=bool)
  >>> layer(inp, mask).numpy()
  array([0.5, 0. , 0.5], dtype=float32)

  Input shape:
    Arbitrary. Use the keyword argument `input_shape`
    (tuple of integers, does not include the samples axis)
    when using this layer as the first layer in a model.

  Output shape:
    Same shape as the input.

  Args:
    axis: Integer, or list of Integers, axis along which the softmax
      normalization is applied.
  Call arguments:
    inputs: The inputs, or logits to the softmax layer.
    mask: A boolean mask of the same shape as `inputs`. Defaults to `None`. The
      mask specifies 1 to keep and 0 to mask.

  Returns:
    softmaxed output with the same shape as `inputs`.
  c                s"   t t| jf | d| _|| _d S )NT)r   rL   r   r   axis)r   rN   r   )r   r   r   r   H  s    zSoftmax.__init__Nc             C   s   |d k	r,dt ||j t|j }||7 }t| jttfrzt| jdkrft 	|t j
|| jdd S tj|| jd dS tj|| jdS )Ng      ?r6   T)rN   Zkeepdimsr   )rN   )r
   rH   rI   rK   r4   rN   r5   r!   r=   expZreduce_logsumexpr   Zsoftmax)r   r   maskZadderr   r   r   r   M  s    
zSoftmax.callc                s4   d| j i}tt|  }tt| t|  S )NrN   )rN   r   rL   r   r    r!   r"   )r   r#   r$   )r   r   r   r   `  s    
zSoftmax.get_configc             C   s   |S )Nr   )r   r%   r   r   r   r&   e  s    zSoftmax.compute_output_shape)rM   )N)r'   r(   r)   r*   r   r   r   r	   r+   r&   r,   r   r   )r   r   rL   &  s
    
rL   zkeras.layers.ReLUc                   sD   e Zd ZdZd fdd	Zdd Z fdd	Zejd
d Z	  Z
S )ReLUaY  Rectified Linear Unit activation function.

  With default values, it returns element-wise `max(x, 0)`.

  Otherwise, it follows:

  ```
    f(x) = max_value if x >= max_value
    f(x) = x if threshold <= x < max_value
    f(x) = negative_slope * (x - threshold) otherwise
  ```

  Usage:

  >>> layer = tf.keras.layers.ReLU()
  >>> output = layer([-3.0, -1.0, 0.0, 2.0])
  >>> list(output.numpy())
  [0.0, 0.0, 0.0, 2.0]
  >>> layer = tf.keras.layers.ReLU(max_value=1.0)
  >>> output = layer([-3.0, -1.0, 0.0, 2.0])
  >>> list(output.numpy())
  [0.0, 0.0, 0.0, 1.0]
  >>> layer = tf.keras.layers.ReLU(negative_slope=1.0)
  >>> output = layer([-3.0, -1.0, 0.0, 2.0])
  >>> list(output.numpy())
  [-3.0, -1.0, 0.0, 2.0]
  >>> layer = tf.keras.layers.ReLU(threshold=1.5)
  >>> output = layer([-3.0, -1.0, 1.0, 2.0])
  >>> list(output.numpy())
  [0.0, 0.0, 0.0, 2.0]

  Input shape:
    Arbitrary. Use the keyword argument `input_shape`
    (tuple of integers, does not include the batch axis)
    when using this layer as the first layer in a model.

  Output shape:
    Same shape as the input.

  Args:
    max_value: Float >= 0. Maximum activation value. Default to None, which
      means unlimited.
    negative_slope: Float >= 0. Negative slope coefficient. Default to 0.
    threshold: Float >= 0. Threshold value for thresholded activation. Default
      to 0.
  Nr   c                s   t t| jf | |d k	r.|dk r.td| |d ks>|dk rJtd| |d ksZ|dk rftd| d| _|d k	r~t|}|| _t|| _t|| _	d S )Ng        z=max_value of a ReLU layer cannot be a negative value. Got: %szBnegative_slope of a ReLU layer cannot be a negative value. Got: %sz=threshold of a ReLU layer cannot be a negative value. Got: %sT)
r   rQ   r   r   r   r   r   	max_valuenegative_slope	threshold)r   rR   rS   rT   r   )r   r   r   r     s     
zReLU.__init__c             C   s   t j|| j| j| jdS )N)r   rR   rT   )r   r   rS   rR   rT   )r   r   r   r   r   r     s    z	ReLU.callc                s<   | j | j| jd}tt|  }tt| t|  S )N)rR   rS   rT   )	rR   rS   rT   r   rQ   r   r    r!   r"   )r   r#   r$   )r   r   r   r     s
    
zReLU.get_configc             C   s   |S )Nr   )r   r%   r   r   r   r&     s    zReLU.compute_output_shape)Nr   r   )r'   r(   r)   r*   r   r   r   r	   r+   r&   r,   r   r   )r   r   rQ   j  s
   /	rQ   N)r*   Ztensorflow.python.frameworkr   Ztensorflow.python.kerasr   r   r   r   Z)tensorflow.python.keras.engine.base_layerr   Z)tensorflow.python.keras.engine.input_specr   Ztensorflow.python.keras.utilsr	   Ztensorflow.python.opsr
   Z tensorflow.python.util.tf_exportr   r   r   r-   rC   rE   rK   rL   rQ   r   r   r   r   <module>   s0   8]+/C