B
    ӻdI                 @   s   d Z ddlZddlZddlmZ ddlmZ ddlmZ ddl	m
Z
 dd Zd	d
 Zd ddZdd Zd!ddZdd Zdd Zdd Zdd Zdd Zdd Zdd ZdS )"z%Utilities used by convolution layers.    N)ops)tensor_shape)backend)	array_opsc             C   s~   | dkr8|dkrdS |dkr dS |dkr,dS t d|nB| d	krp|dkrLd
S |dkrXdS |dkrddS t d|n
t d| d S )Nchannels_last   ZNWC   ZNHWC   ZNDHWCzInput rank not supported:channels_firstZNCWZNCHWZNCDHWzInvalid data_format:)
ValueError)data_formatndim r   Z/var/www/html/venv/lib/python3.7/site-packages/tensorflow/python/keras/utils/conv_utils.pyconvert_data_format   s"    r   c          
   C   s
  t | tr| f| S yt| }W n8 tk
rX   td| d t| d t|  Y nX t||krtd| d t| d t|  xv|D ]n}yt| W q ttfk
r   td| d t| d t|  d t| d d tt| Y qX qW |S dS )a  Transforms a single integer or iterable of integers into an integer tuple.

  Args:
    value: The value to validate and convert. Could an int, or any iterable of
      ints.
    n: The size of the tuple to be returned.
    name: The name of the argument being validated, e.g. "strides" or
      "kernel_size". This is only used to format error messages.

  Returns:
    A tuple of n integers.

  Raises:
    ValueError: If something else than an int/long or iterable thereof was
      passed.
  zThe `z` argument must be a tuple of z integers. Received: z including element z of type N)
isinstanceinttuple	TypeErrorr   strlentype)valuennameZvalue_tupleZsingle_valuer   r   r   normalize_tuple2   s"    

("
Lr      c             C   st   | dkrdS |dkst ||d |d   }|dkr:| }n*|dkrP| | d }n|dkrd| | d }|| d | S )a(  Determines output length of a convolution given input length.

  Args:
      input_length: integer.
      filter_size: integer.
      padding: one of "same", "valid", "full", "causal"
      stride: integer.
      dilation: dilation rate, integer.

  Returns:
      The output length (integer).
  N>   validfullcausalsamer   )r!   r    r   r   )AssertionError)input_lengthfilter_sizepaddingstridedilationZdilated_filter_sizeoutput_lengthr   r   r   conv_output_lengthY   s    r)   c             C   s`   | dkrdS |dkst |dkr*|d }n|dkr8d}n|dkrH|d }| d | d|  | S )	zDetermines input length of a convolution given output length.

  Args:
      output_length: integer.
      filter_size: integer.
      padding: one of "same", "valid", "full".
      stride: integer.

  Returns:
      The input length (integer).
  N>   r   r   r!   r!      r   r   r   r   )r"   )r(   r$   r%   r&   padr   r   r   conv_input_lengths   s    
r,   c             C   s   |dkst | dkrdS ||d |d   }|dkr|dkrT| | t|| d }q|dkrr| | || d  }q|dkr| | }nL|dkr|d }n|dkrd}n|dkr|d }| d | | d|  | }|S )	a  Determines output length of a transposed convolution given input length.

  Args:
      input_length: Integer.
      filter_size: Integer.
      padding: one of `"same"`, `"valid"`, `"full"`.
      output_padding: Integer, amount of padding along the output dimension. Can
        be set to `None` in which case the output length is inferred.
      stride: Integer.
      dilation: Integer.

  Returns:
      The output length (integer).
  >   r   r   r!   Nr   r   r   r   r*   r!   )r"   max)r#   r$   r%   Zoutput_paddingr&   r'   lengthr+   r   r   r   deconv_output_length   s(    

r/   c             C   s4   | d krt  } |  }|dkr0tdt|  |S )N>   r
   r   zWThe `data_format` argument must be one of "channels_first", "channels_last". Received: )r   Zimage_data_formatlowerr   r   )r   r   r   r   r   normalize_data_format   s    r1   c             C   s6   t | ttfr| S |  }|dkr2tdt| |S )N>   r   r    r!   zqThe `padding` argument must be a list/tuple or one of "valid", "same" (or "causal", only for `Conv1D). Received: )r   listr   r0   r   r   )r   r%   r   r   r   normalize_padding   s    r3   c             C   s   |dkrt d| t| }t|tr0|f| }t|trD|f| }t|}t|}||ksd||krvtd|||f t| |||}| | }t|tj}	dd |D }
xBt	j
|
 D ]4}t| ||||}xt	j
| D ]}d|	|| < qW qW |	S )a@  Compute a mask representing the connectivity of a convolution operation.

  Assume a convolution with given parameters is applied to an input having N
  spatial dimensions with `input_shape = (d_in1, ..., d_inN)` to produce an
  output with shape `(d_out1, ..., d_outN)`. This method returns a boolean array
  of shape `(d_in1, ..., d_inN, d_out1, ..., d_outN)` with `True` entries
  indicating pairs of input and output locations that are connected by a weight.

  Example:

    >>> input_shape = (4,)
    >>> kernel_shape = (2,)
    >>> strides = (1,)
    >>> padding = "valid"
    >>> conv_kernel_mask(input_shape, kernel_shape, strides, padding)
    array([[ True, False, False],
           [ True,  True, False],
           [False,  True,  True],
           [False, False,  True]])

    where rows and columns correspond to inputs and outputs respectively.


  Args:
    input_shape: tuple of size N: `(d_in1, ..., d_inN)`, spatial shape of the
      input.
    kernel_shape: tuple of size N, spatial shape of the convolutional kernel /
      receptive field.
    strides: tuple of size N, strides along each spatial dimension.
    padding: type of padding, string `"same"` or `"valid"`.
      `"valid"` means no padding. `"same"` results in padding evenly to
      the left/right or up/down of the input such that output has the same
      height/width dimension as the input.

  Returns:
    A boolean 2N-D `np.ndarray` of shape
    `(d_in1, ..., d_inN, d_out1, ..., d_outN)`, where `(d_out1, ..., d_outN)`
    is the spatial shape of the output. `True` entries in the mask represent
    pairs of input-output locations that are connected by a weight.

  Raises:
    ValueError: if `input_shape`, `kernel_shape` and `strides` don't have the
        same number of dimensions.
    NotImplementedError: if `padding` is not in {`"same"`, `"valid"`}.
  >   r   r!   zGPadding type %s not supported. Only "valid" and "same" are implemented.zTNumber of strides, input and kernel dimensions must all match. Received: %d, %d, %d.c             S   s   g | ]}t |qS r   )range).0dimr   r   r   
<listcomp>  s    z$conv_kernel_mask.<locals>.<listcomp>T)NotImplementedErrorr   r   r   r   conv_output_shapenpZzerosZbool_	itertoolsproductconv_connected_inputs)input_shapekernel_shapestridesr%   in_dimskernel_dimsstride_dimsoutput_shapeZ
mask_shapemaskoutput_axes_ticksoutput_positioninput_axes_ticksinput_positionr   r   r   conv_kernel_mask   s.    .




rJ   c          	   c   sb  |dkrt d| t| }t|tr0|f| }t|trD|f| }t|}t|}	||ksd|	|krvtd|	||f t| |||}
dd |
D }|dkrdd }n|d	krd
d }ntd| xtj| D ]}t| ||||}xvtj| D ]h}xbt	|D ]V}xPt	|D ]D}t
j|||||
|d}t
j||||| |d}||fV  q
W qW qW qW dS )a  Yields output-input tuples of indices in a CNN layer.

  The generator iterates over all `(output_idx, input_idx)` tuples, where
    `output_idx` is an integer index in a flattened tensor representing a single
    output image of a convolutional layer that is connected (via the layer
    weights) to the respective single input image at `input_idx`

  Example:

    >>> input_shape = (2, 2)
    >>> kernel_shape = (2, 1)
    >>> strides = (1, 1)
    >>> padding = "valid"
    >>> filters_in = 1
    >>> filters_out = 1
    >>> data_format = "channels_last"
    >>> list(conv_kernel_idxs(input_shape, kernel_shape, strides, padding,
    ...                       filters_in, filters_out, data_format))
    [(0, 0), (0, 2), (1, 1), (1, 3)]

  Args:
    input_shape: tuple of size N: `(d_in1, ..., d_inN)`, spatial shape of the
      input.
    kernel_shape: tuple of size N, spatial shape of the convolutional kernel /
      receptive field.
    strides: tuple of size N, strides along each spatial dimension.
    padding: type of padding, string `"same"` or `"valid"`.
      `"valid"` means no padding. `"same"` results in padding evenly to
      the left/right or up/down of the input such that output has the same
      height/width dimension as the input.
    filters_in: `int`, number if filters in the input to the layer.
    filters_out: `int', number if filters in the output of the layer.
    data_format: string, "channels_first" or "channels_last".

  Yields:
    The next tuple `(output_idx, input_idx)`, where
    `output_idx` is an integer index in a flattened tensor representing a single
    output image of a convolutional layer that is connected (via the layer
    weights) to the respective single input image at `input_idx`.

  Raises:
      ValueError: if `data_format` is neither
      `"channels_last"` nor `"channels_first"`, or if number of strides, input,
      and kernel number of dimensions do not match.

      NotImplementedError: if `padding` is neither `"same"` nor `"valid"`.
  )r!   r   zGPadding type %s not supported. Only "valid" and "same" are implemented.zTNumber of strides, input and kernel dimensions must all match. Received: %d, %d, %d.c             S   s   g | ]}t |qS r   )r4   )r5   r6   r   r   r   r7   e  s    z$conv_kernel_idxs.<locals>.<listcomp>r
   c             S   s
   |f|  S )Nr   )spatial_idx
filter_idxr   r   r   <lambda>h      z"conv_kernel_idxs.<locals>.<lambda>r   c             S   s
   | |f S )Nr   )rK   rL   r   r   r   rM   j  rN   zXData format %s not recognized.`data_format` must be "channels_first" or "channels_last".)Zmulti_indexdimsN)r8   r   r   r   r   r9   r;   r<   r=   r4   r:   Zravel_multi_index)r>   r?   r@   r%   Z
filters_inZfilters_outr   rA   rB   rC   rD   rF   Zconcat_idxsrG   rH   rI   Zf_inZf_outZout_idxZin_idxr   r   r   conv_kernel_idxs!  sD    1






rP   c             C   s   g }t | }x|t|D ]p}t|| d }|| | }	|| ||  }
|dkrV|
|7 }
td|
| }t| | |
|	 }|t|| qW |S )a  Return locations of the input connected to an output position.

  Assume a convolution with given parameters is applied to an input having N
  spatial dimensions with `input_shape = (d_in1, ..., d_inN)`. This method
  returns N ranges specifying the input region that was convolved with the
  kernel to produce the output at position
  `output_position = (p_out1, ..., p_outN)`.

  Example:

    >>> input_shape = (4, 4)
    >>> kernel_shape = (2, 1)
    >>> output_position = (1, 1)
    >>> strides = (1, 1)
    >>> padding = "valid"
    >>> conv_connected_inputs(input_shape, kernel_shape, output_position,
    ...                       strides, padding)
    [range(1, 3), range(1, 2)]

  Args:
    input_shape: tuple of size N: `(d_in1, ..., d_inN)`, spatial shape of the
      input.
    kernel_shape: tuple of size N, spatial shape of the convolutional kernel /
      receptive field.
    output_position: tuple of size N: `(p_out1, ..., p_outN)`, a single position
      in the output of the convolution.
    strides: tuple of size N, strides along each spatial dimension.
    padding: type of padding, string `"same"` or `"valid"`.
      `"valid"` means no padding. `"same"` results in padding evenly to
      the left/right or up/down of the input such that output has the same
      height/width dimension as the input.

  Returns:
    N ranges `[[p_in_left1, ..., p_in_right1], ...,
              [p_in_leftN, ..., p_in_rightN]]` specifying the region in the
    input connected to output_position.
  r*   r   r   )r   r4   r   r-   minappend)r>   r?   rG   r@   r%   rangesZndimsdZ
left_shiftZright_shiftcenterstartendr   r   r   r=     s    'r=   c                s@   t t} fdd|D t fdd|D S )a  Return the output shape of an N-D convolution.

  Forces dimensions where input is empty (size 0) to remain empty.

  Args:
    input_shape: tuple of size N: `(d_in1, ..., d_inN)`, spatial shape of the
      input.
    kernel_shape: tuple of size N, spatial shape of the convolutional kernel /
      receptive field.
    strides: tuple of size N, strides along each spatial dimension.
    padding: type of padding, string `"same"` or `"valid"`.
      `"valid"` means no padding. `"same"` results in padding evenly to
      the left/right or up/down of the input such that output has the same
      height/width dimension as the input.

  Returns:
    tuple of size N: `(d_out1, ..., d_outN)`, spatial shape of the output.
  c                s&   g | ]}t  | | | qS r   )r)   )r5   rT   )r>   r?   r%   r@   r   r   r7     s   z%conv_output_shape.<locals>.<listcomp>c                s$   g | ]} | d krd n| qS )r   r   )r5   rT   )r>   rD   r   r   r7     s    )r4   r   r   )r>   r?   r@   r%   rO   r   )r>   r?   rD   r%   r@   r   r9     s    r9   c       
   	   C   s*  t d | j}|| d }| s>t| | d }|d|  }| sht| d|  }t|tjrt| dg|	  }nt| tj
dg|fdd}||}|j| d }| st|| d }t|tj
||fdd}	|	| jd|  |	j| d   |	S Q R X dS )a  Returns `unsqueeze_batch(op(squeeze_batch(inp)))`.

  Where `squeeze_batch` reshapes `inp` to shape
  `[prod(inp.shape[:-inner_rank])] + inp.shape[-inner_rank:]`
  and `unsqueeze_batch` does the reverse reshape but on the output.

  Args:
    inp: A tensor with dims `batch_shape + inner_shape` where `inner_shape`
      is length `inner_rank`.
    op: A callable that takes a single input tensor and returns a single.
      output tensor.
    inner_rank: A python integer.

  Returns:
    `unsqueeze_batch_op(squeeze_batch(inp))`.
  squeeze_batch_dimsN)Zaxis)r   Zname_scope_v2shapeZis_fully_definedr   r   r   ZTensorShapeZreshapeas_listconcat	set_shape)
ZinpopZ
inner_rankrZ   Zinner_shapeZbatch_shapeZinp_reshapedZout_reshapedZout_inner_shapeoutr   r   r   rX     s(    &rX   )r   )Nr   r   )__doc__r;   numpyr:   Ztensorflow.python.frameworkr   r   Ztensorflow.python.kerasr   Ztensorflow.python.opsr   r   r   r)   r,   r/   r1   r3   rJ   rP   r=   r9   rX   r   r   r   r   <module>   s(   '
  
,O^;