B
    «»ˆdaM  ã               @   sê   d Z ddlm  mZ ddlmZ ddlmZ ddlmZ ddlm	Z	 ddlm
Z
 ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ edƒG dd„ deejƒƒZedƒG dd„ deƒƒZdS )zFully connected RNN layer.é    N)Úactivations)Úbackend)Úconstraints)Úinitializers)Úregularizers)Ú
base_layer)Ú	InputSpec)Ú	rnn_utils)ÚRNN)ÚDropoutRNNCellMixin)Útf_utils)Ú
tf_logging)Úkeras_exportzkeras.layers.SimpleRNNCellc                   sT   e Zd ZdZd‡ fd	d
„	Zej‡ fdd„ƒZddd„Zddd„Z	‡ fdd„Z
‡  ZS )ÚSimpleRNNCella«  Cell class for SimpleRNN.

    See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
    for details about the usage of RNN API.

    This class processes one step within the whole time sequence input, whereas
    `tf.keras.layer.SimpleRNN` processes the whole sequence.

    Args:
      units: Positive integer, dimensionality of the output space.
      activation: Activation function to use.
        Default: hyperbolic tangent (`tanh`).
        If you pass `None`, no activation is applied
        (ie. "linear" activation: `a(x) = x`).
      use_bias: Boolean, (default `True`), whether the layer uses a bias vector.
      kernel_initializer: Initializer for the `kernel` weights matrix,
        used for the linear transformation of the inputs. Default:
        `glorot_uniform`.
      recurrent_initializer: Initializer for the `recurrent_kernel`
        weights matrix, used for the linear transformation of the recurrent
        state.  Default: `orthogonal`.
      bias_initializer: Initializer for the bias vector. Default: `zeros`.
      kernel_regularizer: Regularizer function applied to the `kernel` weights
        matrix. Default: `None`.
      recurrent_regularizer: Regularizer function applied to the
        `recurrent_kernel` weights matrix. Default: `None`.
      bias_regularizer: Regularizer function applied to the bias vector.
        Default: `None`.
      kernel_constraint: Constraint function applied to the `kernel` weights
        matrix. Default: `None`.
      recurrent_constraint: Constraint function applied to the
        `recurrent_kernel` weights matrix. Default: `None`.
      bias_constraint: Constraint function applied to the bias vector. Default:
        `None`.
      dropout: Float between 0 and 1. Fraction of the units to drop for the
        linear transformation of the inputs. Default: 0.
      recurrent_dropout: Float between 0 and 1. Fraction of the units to drop
        for the linear transformation of the recurrent state. Default: 0.

    Call arguments:
      inputs: A 2D tensor, with shape of `[batch, feature]`.
      states: A 2D tensor with shape of `[batch, units]`, which is the state
        from the previous time step. For timestep 0, the initial state provided
        by user will be feed to cell.
      training: Python boolean indicating whether the layer should behave in
        training mode or in inference mode. Only relevant when `dropout` or
        `recurrent_dropout` is used.

    Examples:

    ```python
    inputs = np.random.random([32, 10, 8]).astype(np.float32)
    rnn = tf.keras.layers.RNN(tf.keras.layers.SimpleRNNCell(4))

    output = rnn(inputs)  # The output has shape `[32, 4]`.

    rnn = tf.keras.layers.RNN(
        tf.keras.layers.SimpleRNNCell(4),
        return_sequences=True,
        return_state=True)

    # whole_sequence_output has shape `[32, 10, 4]`.
    # final_state has shape `[32, 4]`.
    whole_sequence_output, final_state = rnn(inputs)
    ```
    ÚtanhTÚglorot_uniformÚ
orthogonalÚzerosNç        c                s  |dkrt d|› dƒ‚tjj ¡ r4| dd¡| _n| dd¡| _tƒ jf |Ž || _	t
 |¡| _|| _t |¡| _t |¡| _t |¡| _t |¡| _t |¡| _t |	¡| _t |
¡| _t |¡| _t |¡| _tdtd|ƒƒ| _tdtd|ƒƒ| _| j	| _| j	| _d S )	Nr   zQReceived an invalid value for argument `units`, expected a positive integer, got Ú.Úenable_caching_deviceTFg      ð?g        ) Ú
ValueErrorÚtfÚcompatZv1Z#executing_eagerly_outside_functionsÚpopZ_enable_caching_deviceÚsuperÚ__init__Úunitsr   ÚgetÚ
activationÚuse_biasr   Úkernel_initializerÚrecurrent_initializerÚbias_initializerr   Úkernel_regularizerÚrecurrent_regularizerÚbias_regularizerr   Úkernel_constraintÚrecurrent_constraintÚbias_constraintÚminÚmaxÚdropoutÚrecurrent_dropoutZ
state_sizeZoutput_size)Úselfr   r   r    r!   r"   r#   r$   r%   r&   r'   r(   r)   r,   r-   Úkwargs)Ú	__class__© úM/var/www/html/venv/lib/python3.7/site-packages/keras/layers/rnn/simple_rnn.pyr   j   s2    
zSimpleRNNCell.__init__c                sž   t ƒ  |¡ t | ¡}| j|d | jfd| j| j| j|d| _	| j| j| jfd| j
| j| j|d| _| jrŽ| j| jfd| j| j| j|d| _nd | _d| _d S )NéÿÿÿÿÚkernel)ÚshapeÚnameZinitializerZregularizerÚ
constraintÚcaching_deviceÚrecurrent_kernelÚbiasT)r   Úbuildr	   r8   Z
add_weightr   r!   r$   r'   r4   r"   r%   r(   r9   r    r#   r&   r)   r:   Zbuilt)r.   Zinput_shapeZdefault_caching_device)r0   r1   r2   r;       s4    



zSimpleRNNCell.buildc       
      C   sÆ   t j |¡r|d n|}|  ||¡}|  ||¡}|d k	rLt || | j¡}nt || j¡}| jd k	rrt 	|| j¡}|d k	r‚|| }|t || j
¡ }| jd k	r¨|  |¡}t j |¡rº|gn|}	||	fS )Nr   )r   ÚnestZ	is_nestedZget_dropout_mask_for_cellZ#get_recurrent_dropout_mask_for_cellr   Údotr4   r:   Zbias_addr9   r   )
r.   ÚinputsZstatesÚtrainingZprev_outputZdp_maskZrec_dp_maskÚhÚoutputÚ	new_stater1   r1   r2   ÚcallÁ   s     


zSimpleRNNCell.callc             C   s   t  | |||¡S )N)r	   Z#generate_zero_filled_state_for_cell)r.   r>   Z
batch_sizeÚdtyper1   r1   r2   Úget_initial_stateØ   s    zSimpleRNNCell.get_initial_statec                s°   | j t | j¡| jt | j¡t | j¡t | j¡t	 | j
¡t	 | j¡t	 | j¡t | j¡t | j¡t | j¡| j| jdœ}| t | ¡¡ tƒ  ¡ }tt| ¡ ƒt| ¡ ƒ ƒS )N)r   r   r    r!   r"   r#   r$   r%   r&   r'   r(   r)   r,   r-   )r   r   Ú	serializer   r    r   r!   r"   r#   r   r$   r%   r&   r   r'   r(   r)   r,   r-   Úupdater	   Ú config_for_enable_caching_devicer   Ú
get_configÚdictÚlistÚitems)r.   ÚconfigÚbase_config)r0   r1   r2   rI   Ý   s,    






zSimpleRNNCell.get_config)r   Tr   r   r   NNNNNNr   r   )N)NNN)Ú__name__Ú
__module__Ú__qualname__Ú__doc__r   r   Zshape_type_conversionr;   rC   rE   rI   Ú__classcell__r1   r1   )r0   r2   r   %   s$   C            '!

r   zkeras.layers.SimpleRNNc                   sð   e Zd ZdZd.‡ fd
d„	Zd/‡ fdd„	Zedd„ ƒZedd„ ƒZedd„ ƒZ	edd„ ƒZ
edd„ ƒZedd„ ƒZedd„ ƒZedd„ ƒZedd„ ƒZed d!„ ƒZed"d#„ ƒZed$d%„ ƒZed&d'„ ƒZed(d)„ ƒZ‡ fd*d+„Zed,d-„ ƒZ‡  ZS )0Ú	SimpleRNNa6  Fully-connected RNN where the output is to be fed back to input.

    See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
    for details about the usage of RNN API.

    Args:
      units: Positive integer, dimensionality of the output space.
      activation: Activation function to use.
        Default: hyperbolic tangent (`tanh`).
        If you pass None, no activation is applied
        (ie. "linear" activation: `a(x) = x`).
      use_bias: Boolean, (default `True`), whether the layer uses a bias vector.
      kernel_initializer: Initializer for the `kernel` weights matrix,
        used for the linear transformation of the inputs. Default:
        `glorot_uniform`.
      recurrent_initializer: Initializer for the `recurrent_kernel`
        weights matrix, used for the linear transformation of the recurrent
        state.  Default: `orthogonal`.
      bias_initializer: Initializer for the bias vector. Default: `zeros`.
      kernel_regularizer: Regularizer function applied to the `kernel` weights
        matrix. Default: `None`.
      recurrent_regularizer: Regularizer function applied to the
        `recurrent_kernel` weights matrix. Default: `None`.
      bias_regularizer: Regularizer function applied to the bias vector.
        Default: `None`.
      activity_regularizer: Regularizer function applied to the output of the
        layer (its "activation"). Default: `None`.
      kernel_constraint: Constraint function applied to the `kernel` weights
        matrix. Default: `None`.
      recurrent_constraint: Constraint function applied to the
        `recurrent_kernel` weights matrix.  Default: `None`.
      bias_constraint: Constraint function applied to the bias vector. Default:
        `None`.
      dropout: Float between 0 and 1.
        Fraction of the units to drop for the linear transformation of the
        inputs. Default: 0.
      recurrent_dropout: Float between 0 and 1.
        Fraction of the units to drop for the linear transformation of the
        recurrent state. Default: 0.
      return_sequences: Boolean. Whether to return the last output
        in the output sequence, or the full sequence. Default: `False`.
      return_state: Boolean. Whether to return the last state
        in addition to the output. Default: `False`
      go_backwards: Boolean (default False).
        If True, process the input sequence backwards and return the
        reversed sequence.
      stateful: Boolean (default False). If True, the last state
        for each sample at index i in a batch will be used as initial
        state for the sample of index i in the following batch.
      unroll: Boolean (default False).
        If True, the network will be unrolled,
        else a symbolic loop will be used.
        Unrolling can speed-up a RNN,
        although it tends to be more memory-intensive.
        Unrolling is only suitable for short sequences.

    Call arguments:
      inputs: A 3D tensor, with shape `[batch, timesteps, feature]`.
      mask: Binary tensor of shape `[batch, timesteps]` indicating whether
        a given timestep should be masked. An individual `True` entry indicates
        that the corresponding timestep should be utilized, while a `False`
        entry indicates that the corresponding timestep should be ignored.
      training: Python boolean indicating whether the layer should behave in
        training mode or in inference mode. This argument is passed to the cell
        when calling it. This is only relevant if `dropout` or
        `recurrent_dropout` is used.
      initial_state: List of initial state tensors to be passed to the first
        call of the cell.

    Examples:

    ```python
    inputs = np.random.random([32, 10, 8]).astype(np.float32)
    simple_rnn = tf.keras.layers.SimpleRNN(4)

    output = simple_rnn(inputs)  # The output has shape `[32, 4]`.

    simple_rnn = tf.keras.layers.SimpleRNN(
        4, return_sequences=True, return_state=True)

    # whole_sequence_output has shape `[32, 10, 4]`.
    # final_state has shape `[32, 4]`.
    whole_sequence_output, final_state = simple_rnn(inputs)
    ```
    r   Tr   r   r   Nç        Fc                s´   d|kr|  d¡ t d¡ d|kr4d|  d¡i}ni }t|f||||||||	|||||| d¡| dd¡dœ|—Ž}tƒ j|f|||||dœ|—Ž t |
¡| _t	d	d
g| _
d S )NÚimplementationzhThe `implementation` argument in `SimpleRNN` has been deprecated. Please remove it from your layer call.r   rD   Ú	trainableT)r   r    r!   r"   r#   r$   r%   r&   r'   r(   r)   r,   r-   rD   rW   )Úreturn_sequencesÚreturn_stateÚgo_backwardsÚstatefulÚunrollé   )Úndim)r   ÚloggingÚwarningr   r   r   r   r   Úactivity_regularizerr   Z
input_spec)r.   r   r   r    r!   r"   r#   r$   r%   r&   ra   r'   r(   r)   r,   r-   rX   rY   rZ   r[   r\   r/   Zcell_kwargsÚcell)r0   r1   r2   r   U  sF    
zSimpleRNN.__init__c                s   t ƒ j||||dS )N)Úmaskr?   Úinitial_state)r   rC   )r.   r>   rc   r?   rd   )r0   r1   r2   rC   ™  s    zSimpleRNN.callc             C   s   | j jS )N)rb   r   )r.   r1   r1   r2   r   ž  s    zSimpleRNN.unitsc             C   s   | j jS )N)rb   r   )r.   r1   r1   r2   r   ¢  s    zSimpleRNN.activationc             C   s   | j jS )N)rb   r    )r.   r1   r1   r2   r    ¦  s    zSimpleRNN.use_biasc             C   s   | j jS )N)rb   r!   )r.   r1   r1   r2   r!   ª  s    zSimpleRNN.kernel_initializerc             C   s   | j jS )N)rb   r"   )r.   r1   r1   r2   r"   ®  s    zSimpleRNN.recurrent_initializerc             C   s   | j jS )N)rb   r#   )r.   r1   r1   r2   r#   ²  s    zSimpleRNN.bias_initializerc             C   s   | j jS )N)rb   r$   )r.   r1   r1   r2   r$   ¶  s    zSimpleRNN.kernel_regularizerc             C   s   | j jS )N)rb   r%   )r.   r1   r1   r2   r%   º  s    zSimpleRNN.recurrent_regularizerc             C   s   | j jS )N)rb   r&   )r.   r1   r1   r2   r&   ¾  s    zSimpleRNN.bias_regularizerc             C   s   | j jS )N)rb   r'   )r.   r1   r1   r2   r'   Â  s    zSimpleRNN.kernel_constraintc             C   s   | j jS )N)rb   r(   )r.   r1   r1   r2   r(   Æ  s    zSimpleRNN.recurrent_constraintc             C   s   | j jS )N)rb   r)   )r.   r1   r1   r2   r)   Ê  s    zSimpleRNN.bias_constraintc             C   s   | j jS )N)rb   r,   )r.   r1   r1   r2   r,   Î  s    zSimpleRNN.dropoutc             C   s   | j jS )N)rb   r-   )r.   r1   r1   r2   r-   Ò  s    zSimpleRNN.recurrent_dropoutc                sÂ   | j t | j¡| jt | j¡t | j¡t | j¡t	 | j
¡t	 | j¡t	 | j¡t	 | j¡t | j¡t | j¡t | j¡| j| jdœ}tƒ  ¡ }| t | j¡¡ |d= tt| ¡ ƒt| ¡ ƒ ƒS )N)r   r   r    r!   r"   r#   r$   r%   r&   ra   r'   r(   r)   r,   r-   rb   )r   r   rF   r   r    r   r!   r"   r#   r   r$   r%   r&   ra   r   r'   r(   r)   r,   r-   r   rI   rG   r	   rH   rb   rJ   rK   rL   )r.   rM   rN   )r0   r1   r2   rI   Ö  s2    






zSimpleRNN.get_configc             C   s   d|kr|  d¡ | f |ŽS )NrV   )r   )ÚclsrM   r1   r1   r2   Úfrom_configù  s    
zSimpleRNN.from_config)r   Tr   r   r   NNNNNNNrU   rU   FFFFF)NNN)rO   rP   rQ   rR   r   rC   Úpropertyr   r   r    r!   r"   r#   r$   r%   r&   r'   r(   r)   r,   r-   rI   Úclassmethodrf   rS   r1   r1   )r0   r2   rT   ý   sJ   V                  /#rT   )rR   Ztensorflow.compat.v2r   Zv2r   Zkerasr   r   r   r   r   Zkeras.enginer   Zkeras.engine.input_specr   Zkeras.layers.rnnr	   Zkeras.layers.rnn.base_rnnr
   Z'keras.layers.rnn.dropout_rnn_cell_mixinr   Zkeras.utilsr   Ztensorflow.python.platformr   r_   Z tensorflow.python.util.tf_exportr   ZBaseRandomLayerr   rT   r1   r1   r1   r2   Ú<module>   s&    X