B
    ٻd                 @   s  d Z ddlZddlZddlZddlZddlZddlmZ ddl	m
Z
 ddl	mZ ddlmZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddl m!Z! ddl"m#Z# ej$ej%ej&ej'ej(fZ)dd Z*dd Z+dd Z,dd Z-dd Z.dd  Z/d!d" Z0d#d$ Z1d%d& Z2dHd)d*Z3e#d+g d,dId2d3Z4e#d+gd,dJd4d5Z5e4j e5_ d6Z6e#d7g d,G d8d9 d9ej7Z8e#d7gd,G d:d; d;ej9Z:e#d<g d,dKd=d>Z;e#d<gd,dLd?d@Z<e;j e<_ dAdB Z=e#dCg d,G dDdE dEej7Z>e#dCgd,G dFdG dGej9Z?e@ rve8ZAe>ZBe;ZCe4ZDne:ZAe?ZBe<ZCe5ZDdS )Mz$Python wrappers for reader Datasets.    N)tf2)	error_ops)parsing_ops)dataset_ops)options)readers)convert)nest)constant_op)dtypes)ops)tensor_spec)tensor_util)file_io)gen_experimental_dataset_ops)io_ops)gfile)	tf_exportc          	   C   s6   yt j| t j| kS  ttfk
r0   dS X d S )NF)r   int32as_numpy_dtypeint64
ValueErrorOverflowError)str_val r   a/var/www/html/venv/lib/python3.7/site-packages/tensorflow/python/data/experimental/ops/readers.py_is_valid_int32.   s
    r   c          	   C   s.   yt j|  dS  ttfk
r(   dS X d S )NTF)r   r   r   r   r   )r   r   r   r   _is_valid_int647   s
    r   c             C   s*   y| | tjk S  tk
r$   dS X d S )NF)r   npinfr   )r   Zfloat_dtyper   r   r   _is_valid_float?   s    r    c             C   s   | d|fkr|S t jt jt jt jt jg}ttdd dd dd g}xHtt	|D ]8}|| }|| rP|dks||d|d  krP|| S qPW dS )a  Given a string, infers its tensor type.

  Infers the type of a value by picking the least 'permissive' type possible,
  while still allowing the previous type inference for this column to be valid.

  Args:
    str_val: String value to infer the type of.
    na_value: Additional string to recognize as a NA/NaN CSV value.
    prev_type: Type previously inferred based on values of this column that
      we've seen up till now.
  Returns:
    Inferred dtype.
   c             S   s   t | tjS )N)r    r   float32)r   r   r   r   <lambda>_       z_infer_type.<locals>.<lambda>c             S   s   t | tjS )N)r    r   float64)r   r   r   r   r#   `   r$   c             S   s   dS )NTr   )r   r   r   r   r#   a   r$   N   )
r   r   r   r"   r%   stringr   r   rangelen)r   na_valueZ	prev_typeZ	type_listZtype_functionsiZvalidation_fnr   r   r   _infer_typeF   s    
r,   c             c   s   x| D ]}||}t j|||r&t jnt jd}d}	|rHt| |	d7 }	xF|D ]>}
t|
|kr~td|	 dt|
 d| d|	d7 }	|
V  qNW W dQ R X qW dS )z3Generator that yields rows of CSV file(s) in order.)	delimiterquotingr&   z!Problem inferring types: CSV row z has z number of fields. Expected: .N)csvreaderQUOTE_MINIMAL
QUOTE_NONEnextr)   r   )	filenamesnum_colsfield_delimuse_quote_delimheader
file_io_fnfnfrdrZrow_numcsv_rowr   r   r   _next_csv_rowk   s     


r?   c	             C   s   |dkrt |}dgt| }	xbtt| |||||D ]H\}
}|dk	rP|
|krPP x,t|D ] \}}t|| ||	| |	|< qZW q6W dd |	D }	dd |	D S )z@Infers column types from the first N valid CSV records of files.Nc             S   s   g | ]}|pt jqS r   )r   r'   ).0tr   r   r   
<listcomp>   s    z*_infer_column_defaults.<locals>.<listcomp>c             S   s*   g | ]"}t j|tjk	rd ndg|dqS )r   r!   )dtype)r
   constantr   r'   )r@   rA   r   r   r   rB      s   )r(   r)   	enumerater?   r,   )r5   r6   r7   r8   r*   r9   num_rows_for_inferenceselect_columnsr:   Zinferred_typesr+   r>   jZ	col_indexr   r   r   _infer_column_defaults   s    
rI   c          
   C   s   ||rt jnt jd}|| d F}ytt j|f|}W n( tk
rb   td| d  dY nX W dQ R X xx| dd D ]h}||V}y*tt j|f||krtd| dW n$ tk
r   td| dY nX W dQ R X q|W |S )	z-Infers column names from first rows of files.)r-   r.   r   z'Failed when reading the header line of z. Is it an empty file?Nr&   zNAll input CSV files should have the same column names in the header row. File z has different column names.)r0   r2   r3   r4   r1   StopIterationr   )r5   r7   r8   r:   Z
csv_kwargsr<   column_namesnamer   r   r   _infer_column_names   s     $
$rM   c             C   s   dd t |D }t|}g }xz| D ]r}t|trd|dk sB||krXtd| d| d|| q$||krtd| d|  d	q$|||  q$W tt|}t|t| krt|}td
d t	|dd |dd D }td| d	|S )z>Transforms select_columns argument into sorted column indices.c             S   s   i | ]\}}||qS r   r   )r@   r+   nr   r   r   
<dictcomp>   s    z+_get_sorted_col_indices.<locals>.<dictcomp>r   zColumn index z5 specified in `select_columns` should be > 0  and <= z!, which is the number of columns.zColumn z> specified in `select_columns` must be of one of the columns: r/   c             S   s   g | ]\}}||kr|qS r   r   )r@   abr   r   r   rB      s    z+_get_sorted_col_indices.<locals>.<listcomp>Nr&   z:The `select_columns` argument contains duplicate columns: )
rE   r)   
isinstanceintr   appendkeyssortedsetzip)rG   rK   Znames_to_indicesr6   resultsvsorted_namesduplicate_columnsr   r   r   _get_sorted_col_indices   s(    


r^   c             C   s&   |r|  ||} |dkr"| |} | S )z4Optionally shuffle and repeat dataset, as requested.r&   )shufflerepeat)dataset
num_epochsr_   shuffle_buffer_sizeshuffle_seedr   r   r   _maybe_shuffle_and_repeat   s
    
re   TFc             C   s   |dkrd}|	dkr|}	|dkr&t j}t jj| ||d}tj||d}|dkrRd}t|||||}|
pl|dk}
|dkr|j||
d}n|j||	d}|j||
d}|dkr|S |j	|d	S dS )
a.  Reads and optionally parses TFRecord files into a dataset.

  Provides common functionality such as batching, optional parsing, shuffling,
  and performant defaults.

  Args:
    file_pattern: List of files or patterns of TFRecord file paths.
      See `tf.io.gfile.glob` for pattern rules.
    batch_size: An int representing the number of records to combine
      in a single batch.
    parser_fn: (Optional.) A function accepting string input to parse
      and process the record contents. This function must map records
      to components of a fixed shape, so they may be batched. By
      default, uses the record contents unmodified.
    num_epochs: (Optional.) An int specifying the number of times this
      dataset is repeated.  If None (the default), cycles through the
      dataset forever.
    shuffle: (Optional.) A bool that indicates whether the input
      should be shuffled. Defaults to `True`.
    shuffle_buffer_size: (Optional.) Buffer size to use for
      shuffling. A large buffer size ensures better shuffling, but
      increases memory usage and startup time.
    shuffle_seed: (Optional.) Randomization seed to use for shuffling.
    prefetch_buffer_size: (Optional.) An int specifying the number of
      feature batches to prefetch for performance improvement.
      Defaults to auto-tune. Set to 0 to disable prefetching.
    num_parallel_reads: (Optional.) Number of threads used to read
      records from files. By default or if set to a value >1, the
      results will be interleaved. Defaults to `24`.
    num_parallel_parser_calls: (Optional.) Number of parallel
      records to parse in parallel. Defaults to `batch_size`.
    drop_final_batch: (Optional.) Whether the last batch should be
      dropped in case its size is smaller than `batch_size`; the
      default behavior is not to drop the smaller batch.

  Returns:
    A dataset, where each element matches the output of `parser_fn`
    except it will have an additional leading `batch-size` dimension,
    or a `batch_size`-length 1-D tensor of strings if `parser_fn` is
    unspecified.
  N   )r_   seed)num_parallel_readsi'  )drop_remainder)num_parallel_callsr   )buffer_size)
r   AUTOTUNEDataset
list_filescore_readersTFRecordDatasetre   batchmapprefetch)file_pattern
batch_sizeZ	parser_fnrb   r_   rc   rd   prefetch_buffer_sizerh   Znum_parallel_parser_callsdrop_final_batchfilesra   r   r   r   make_tf_record_dataset   s.    4

ry   z"data.experimental.make_csv_dataset)Zv1,r!   '  d   utf-8c          
      s  
dkrd
|dkrt j}t| d}t j|}|rD|t||}dksT dkr҇fdd}dk	rt}|dkrt	d d|dkrfd	d}n2|d
krt	d dn|dkrt	d ddkrst	dt
||tttkrJt}tdd t|dd |dd D }t	d| ddk	r^t dk	rxdd  D  nt|t	||	 dk	rt tkrt	dt  dt ddk	rttkrfddD dk	r&kr&t	d d d 	fddfdd}
t jkr|j
d}t } |_||}n
fdd }||}t||
|||}|j||
dkd!}t j||dd"}||}|S )#a  Reads CSV files into a dataset.

  Reads CSV files into a dataset, where each element of the dataset is a
  (features, labels) tuple that corresponds to a batch of CSV rows. The features
  dictionary maps feature column names to `Tensor`s containing the corresponding
  feature data, and labels is a `Tensor` containing the batch's label data.

  By default, the first rows of the CSV files are expected to be headers listing
  the column names. If the first rows are not headers, set `header=False` and
  provide the column names with the `column_names` argument.

  By default, the dataset is repeated indefinitely, reshuffling the order each
  time. This behavior can be modified by setting the `num_epochs` and `shuffle`
  arguments.

  For example, suppose you have a CSV file containing

  | Feature_A | Feature_B |
  | --------- | --------- |
  | 1         | "a"       |
  | 2         | "b"       |
  | 3         | "c"       |
  | 4         | "d"       |

  ```
  # No label column specified
  dataset = tf.data.experimental.make_csv_dataset(filename, batch_size=2)
  iterator = dataset.as_numpy_iterator()
  print(dict(next(iterator)))
  # prints a dictionary of batched features:
  # OrderedDict([('Feature_A', array([1, 4], dtype=int32)),
  #              ('Feature_B', array([b'a', b'd'], dtype=object))])
  ```

  ```
  # Set Feature_B as label column
  dataset = tf.data.experimental.make_csv_dataset(
      filename, batch_size=2, label_name="Feature_B")
  iterator = dataset.as_numpy_iterator()
  print(next(iterator))
  # prints (features, labels) tuple:
  # (OrderedDict([('Feature_A', array([1, 2], dtype=int32))]),
  #  array([b'a', b'b'], dtype=object))
  ```

  See the
  [Load CSV data guide](https://www.tensorflow.org/tutorials/load_data/csv) for
  more examples of using `make_csv_dataset` to read CSV data.

  Args:
    file_pattern: List of files or patterns of file paths containing CSV
      records. See `tf.io.gfile.glob` for pattern rules.
    batch_size: An int representing the number of records to combine
      in a single batch.
    column_names: An optional list of strings that corresponds to the CSV
      columns, in order. One per column of the input record. If this is not
      provided, infers the column names from the first row of the records.
      These names will be the keys of the features dict of each dataset element.
    column_defaults: A optional list of default values for the CSV fields. One
      item per selected column of the input record. Each item in the list is
      either a valid CSV dtype (float32, float64, int32, int64, or string), or a
      `Tensor` with one of the aforementioned types. The tensor can either be
      a scalar default value (if the column is optional), or an empty tensor (if
      the column is required). If a dtype is provided instead of a tensor, the
      column is also treated as required. If this list is not provided, tries
      to infer types based on reading the first num_rows_for_inference rows of
      files specified, and assumes all columns are optional, defaulting to `0`
      for numeric values and `""` for string values. If both this and
      `select_columns` are specified, these must have the same lengths, and
      `column_defaults` is assumed to be sorted in order of increasing column
      index.
    label_name: A optional string corresponding to the label column. If
      provided, the data for this column is returned as a separate `Tensor` from
      the features dictionary, so that the dataset complies with the format
      expected by a `tf.Estimator.train` or `tf.Estimator.evaluate` input
      function.
    select_columns: An optional list of integer indices or string column
      names, that specifies a subset of columns of CSV data to select. If
      column names are provided, these must correspond to names provided in
      `column_names` or inferred from the file header lines. When this argument
      is specified, only a subset of CSV columns will be parsed and returned,
      corresponding to the columns specified. Using this results in faster
      parsing and lower memory usage. If both this and `column_defaults` are
      specified, these must have the same lengths, and `column_defaults` is
      assumed to be sorted in order of increasing column index.
    field_delim: An optional `string`. Defaults to `","`. Char delimiter to
      separate fields in a record.
    use_quote_delim: An optional bool. Defaults to `True`. If false, treats
      double quotation marks as regular characters inside of the string fields.
    na_value: Additional string to recognize as NA/NaN.
    header: A bool that indicates whether the first rows of provided CSV files
      correspond to header lines with column names, and should not be included
      in the data.
    num_epochs: An int specifying the number of times this dataset is repeated.
      If None, cycles through the dataset forever.
    shuffle: A bool that indicates whether the input should be shuffled.
    shuffle_buffer_size: Buffer size to use for shuffling. A large buffer size
      ensures better shuffling, but increases memory usage and startup time.
    shuffle_seed: Randomization seed to use for shuffling.
    prefetch_buffer_size: An int specifying the number of feature
      batches to prefetch for performance improvement. Recommended value is the
      number of batches consumed per training step. Defaults to auto-tune.
    num_parallel_reads: Number of threads used to read CSV records from files.
      If >1, the results will be interleaved. Defaults to `1`.
    sloppy: If `True`, reading performance will be improved at
      the cost of non-deterministic ordering. If `False`, the order of elements
      produced is deterministic prior to shuffling (elements are still
      randomized if `shuffle=True`. Note that if the seed is set, then order
      of elements after shuffling is deterministic). Defaults to `False`.
    num_rows_for_inference: Number of rows of a file to use for type inference
      if record_defaults is not provided. If None, reads all the rows of all
      the files. Defaults to 100.
    compression_type: (Optional.) A `tf.string` scalar evaluating to one of
      `""` (no compression), `"ZLIB"`, or `"GZIP"`. Defaults to no compression.
    ignore_errors: (Optional.) If `True`, ignores errors with CSV file parsing,
      such as malformed data or empty lines, and moves on to the next valid
      CSV record. Otherwise, the dataset raises an error and stops processing
      when encountering any invalid records. Defaults to `False`.
    encoding: Encoding to use when reading. Defaults to `UTF-8`.

  Returns:
    A dataset, where each element is a (features, labels) tuple that corresponds
    to a batch of `batch_size` CSV rows. The features dictionary maps feature
    column names to `Tensor`s containing the corresponding column data, and
    labels is a `Tensor` containing the column data for the label column
    specified by `label_name`.

  Raises:
    ValueError: If any of the arguments is malformed.
  Nr&   Fc                s   t j| d dS )Nr)encoding)r   FileIO)filename)r   r   r   r#     s   z%make_csv_dataset_v2.<locals>.<lambda>z$Received unknown `compression_type` z*. Expected: GZIP, ZLIB or  (empty string).ZGZIPc                s   t j| d dS )Nrt)r   )gzipopen)r   )r   r   r   r#     s   ZZLIBz`compression_type` z& is not supported for probing columns.r!   zCExpected `column_names` or `header` arguments. Neither is provided.c             S   s   g | ]\}}||kr|qS r   r   )r@   rP   rQ   r   r   r   rB     s    z'make_csv_dataset_v2.<locals>.<listcomp>rR   zREither `column_names` argument or CSV header row contains duplicate column names: r/   c             S   s0   g | ](}t |s(|tkr(tjg |d n|qS ))rC   )r   
is_tf_type_ACCEPTABLE_CSV_TYPESr
   rD   )r@   xr   r   r   rB     s   zmIf specified, `column_defaults` and `select_columns` must have the same length: `column_defaults` has length z, `select_columns` has length c                s   g | ]} | qS r   r   )r@   r+   )rK   r   r   rB   *  s    z2`label_name` provided must be one of the columns: z. Received: c          
      s.   t |  d}r*|t }|S )N)record_defaultsr7   r8   r*   select_colsr9   compression_type)
CsvDatasetapplyr   ignore_errors)r   ra   )column_defaultsr   r7   r9   r   r*   rG   r8   r   r   filename_to_dataset0  s    z0make_csv_dataset_v2.<locals>.filename_to_datasetc                 s.   t t | }dk	r*|}||fS |S )aN  Organizes columns into a features dictionary.

    Args:
      *columns: list of `Tensor`s corresponding to one csv record.
    Returns:
      An OrderedDict of feature names to values for that particular record. If
      label_name is provided, extracts the label feature to be returned as the
      second element of the tuple.
    N)collectionsOrderedDictrY   pop)columnsfeatureslabel)rK   
label_namer   r   map_fn?  s
    

z#make_csv_dataset_v2.<locals>.map_fn)rj   c          	      s   t j|  dd d dS )Nr&   )cycle_lengthblock_lengthsloppybuffer_output_elementsprefetch_input_elements)ro   ParallelInterleaveDataset)ra   )r   rh   r   r   r   apply_fnW  s    z%make_csv_dataset_v2.<locals>.apply_fn)ru   ri   )use_inter_op_parallelism)r   rl   _get_file_namesrm   Zfrom_tensor_slicesr_   r)   r   Zconstant_valuer   rM   rX   rW   rY   r^   rI   
interleaveoptions_libOptionsdeterministicwith_optionsr   re   rq   
MapDatasetrs   )rt   ru   rK   r   r   rG   r7   r8   r*   r9   rb   r_   rc   rd   rv   rh   r   rF   r   r   r   r5   ra   r:   Zcompression_type_valuer\   r]   r   r   r   r   )r   rK   r   r   r7   r   r9   r   r   r*   rh   rG   r   r8   r   make_csv_dataset_v2E  s     











r   c             C   s6   t t| |||||||||	|
||||||||||S )N)r   DatasetV1Adapterr   )rt   ru   rK   r   r   rG   r7   r8   r*   r9   rb   r_   rc   rd   rv   rh   r   rF   r   r   r   r   r   r   make_csv_dataset_v1u  s    r   i  @ zdata.experimental.CsvDatasetc                   s.   e Zd ZdZd fdd	Zed	d
 Z  ZS )CsvDatasetV2a  A Dataset comprising lines from one or more CSV files.

  The `tf.data.experimental.CsvDataset` class provides a minimal CSV Dataset
  interface. There is also a richer `tf.data.experimental.make_csv_dataset`
  function which provides additional convenience features such as column header
  parsing, column type-inference, automatic shuffling, and file interleaving.

  The elements of this dataset correspond to records from the file(s).
  RFC 4180 format is expected for CSV files
  (https://tools.ietf.org/html/rfc4180)
  Note that we allow leading and trailing spaces for int or float fields.

  For example, suppose we have a file 'my_file0.csv' with four CSV columns of
  different data types:

  >>> with open('/tmp/my_file0.csv', 'w') as f:
  ...   f.write('abcdefg,4.28E10,5.55E6,12\n')
  ...   f.write('hijklmn,-5.3E14,,2\n')

  We can construct a CsvDataset from it as follows:

  >>> dataset = tf.data.experimental.CsvDataset(
  ...   "/tmp/my_file0.csv",
  ...   [tf.float32,  # Required field, use dtype or empty tensor
  ...    tf.constant([0.0], dtype=tf.float32),  # Optional field, default to 0.0
  ...    tf.int32,  # Required field, use dtype or empty tensor
  ...   ],
  ...   select_cols=[1,2,3]  # Only parse last three columns
  ... )

  The expected output of its iterations is:

  >>> for element in dataset.as_numpy_iterator():
  ...   print(element)
  (4.28e10, 5.55e6, 12)
  (-5.3e14, 0.0, 2)

  See
  https://www.tensorflow.org/tutorials/load_data/csv#tfdataexperimentalcsvdataset
  for more in-depth example usage.
  NFrz   Tr!   c                s4  t j|tjdd| _tjd|dtjd| _dd |D }t j|dd	| _	td
|t
| _t j|tjdd| _t j|tjdd| _t j|tjdd| _t j|tjdd| _tjd|	g tjd| _tjd|
g tjd| _tdd | j	D | _tj| j| j	| j| j| j| j| j| j| j| j| jd}tt| | dS )a>
  Creates a `CsvDataset` by reading and decoding CSV files.

    Args:
      filenames: A `tf.string` tensor containing one or more filenames.
      record_defaults: A list of default values for the CSV fields. Each item in
        the list is either a valid CSV `DType` (float32, float64, int32, int64,
        string), or a `Tensor` object with one of the above types. One per
        column of CSV data, with either a scalar `Tensor` default value for the
        column if it is optional, or `DType` or empty `Tensor` if required. If
        both this and `select_columns` are specified, these must have the same
        lengths, and `column_defaults` is assumed to be sorted in order of
        increasing column index. If both this and 'exclude_cols' are specified,
        the sum of lengths of record_defaults and exclude_cols should equal
        the total number of columns in the CSV file.
      compression_type: (Optional.) A `tf.string` scalar evaluating to one of
        `""` (no compression), `"ZLIB"`, or `"GZIP"`. Defaults to no
        compression.
      buffer_size: (Optional.) A `tf.int64` scalar denoting the number of bytes
        to buffer while reading files. Defaults to 4MB.
      header: (Optional.) A `tf.bool` scalar indicating whether the CSV file(s)
        have header line(s) that should be skipped when parsing. Defaults to
        `False`.
      field_delim: (Optional.) A `tf.string` scalar containing the delimiter
        character that separates fields in a record. Defaults to `","`.
      use_quote_delim: (Optional.) A `tf.bool` scalar. If `False`, treats
        double quotation marks as regular characters inside of string fields
        (ignoring RFC 4180, Section 2, Bullet 5). Defaults to `True`.
      na_value: (Optional.) A `tf.string` scalar indicating a value that will
        be treated as NA/NaN.
      select_cols: (Optional.) A sorted list of column indices to select from
        the input data. If specified, only this subset of columns will be
        parsed. Defaults to parsing all columns. At most one of `select_cols`
        and `exclude_cols` can be specified.
      exclude_cols: (Optional.) A sorted list of column indices to exclude from
        the input data. If specified, only the complement of this set of column
        will be parsed. Defaults to parsing all columns. At most one of
        `select_cols` and `exclude_cols` can be specified.

    Raises:
       InvalidArgumentError: If exclude_cols is not None and
           len(exclude_cols) + len(record_defaults) does not match the total
           number of columns in the file(s)


    r5   )rC   rL   r   r!   )argument_defaultZargument_dtypec             S   s0   g | ](}t |s(|tkr(tjg |d n|qS ))rC   )r   r   r   r
   rD   )r@   r   r   r   r   rB     s   z)CsvDatasetV2.__init__.<locals>.<listcomp>r   )rL   rk   r9   r7   r8   r*   r   exclude_colsc             s   s   | ]}t g |jV  qd S )N)r   
TensorSpecrC   )r@   dr   r   r   	<genexpr>$  s    z(CsvDatasetV2.__init__.<locals>.<genexpr>)r5   r   rk   r9   Zoutput_shapesr7   r8   r*   r   r   r   N)r   convert_to_tensorr   r'   Z
_filenamesr   Zoptional_param_to_tensorZ_compression_typeZconvert_n_to_tensorZ_record_defaults!_DEFAULT_READER_BUFFER_SIZE_BYTESZ_buffer_sizeboolZ_headerZ_field_delimZ_use_quote_delimZ	_na_valuer   Z_select_colsZ_exclude_colstuple_element_specr   Zcsv_dataset_v2Z_flat_shapessuperr   __init__)selfr5   r   r   rk   r9   r7   r8   r*   r   r   variant_tensor)	__class__r   r   r     s\    8
zCsvDatasetV2.__init__c             C   s   | j S )N)r   )r   r   r   r   element_spec3  s    zCsvDatasetV2.element_spec)NNFrz   Tr!   NN)__name__
__module____qualname____doc__r   propertyr   __classcell__r   r   )r   r   r     s   *       br   c                   s0   e Zd ZdZeejdd
 fdd		Z  ZS )CsvDatasetV1z6A Dataset comprising lines from one or more CSV files.)r   r   NFrz   Tr!   c
          
      s,   t |||||||||		}
tt| |
 dS )a[  Creates a `CsvDataset` by reading and decoding CSV files.

    The elements of this dataset correspond to records from the file(s).
    RFC 4180 format is expected for CSV files
    (https://tools.ietf.org/html/rfc4180)
    Note that we allow leading and trailing spaces with int or float field.


    For example, suppose we have a file 'my_file0.csv' with four CSV columns of
    different data types:
    ```
    abcdefg,4.28E10,5.55E6,12
    hijklmn,-5.3E14,,2
    ```

    We can construct a CsvDataset from it as follows:

    ```python
     dataset = tf.data.experimental.CsvDataset(
        "my_file*.csv",
        [tf.float32,  # Required field, use dtype or empty tensor
         tf.constant([0.0], dtype=tf.float32),  # Optional field, default to 0.0
         tf.int32,  # Required field, use dtype or empty tensor
         ],
        select_cols=[1,2,3]  # Only parse last three columns
    )
    ```

    The expected output of its iterations is:

    ```python
    for element in dataset:
      print(element)

    >> (4.28e10, 5.55e6, 12)
    >> (-5.3e14, 0.0, 2)
    ```

    Args:
      filenames: A `tf.string` tensor containing one or more filenames.
      record_defaults: A list of default values for the CSV fields. Each item in
        the list is either a valid CSV `DType` (float32, float64, int32, int64,
        string), or a `Tensor` object with one of the above types. One per
        column of CSV data, with either a scalar `Tensor` default value for the
        column if it is optional, or `DType` or empty `Tensor` if required. If
        both this and `select_columns` are specified, these must have the same
        lengths, and `column_defaults` is assumed to be sorted in order of
        increasing column index. If both this and 'exclude_cols' are specified,
        the sum of lengths of record_defaults and exclude_cols should equal the
        total number of columns in the CSV file.
      compression_type: (Optional.) A `tf.string` scalar evaluating to one of
        `""` (no compression), `"ZLIB"`, or `"GZIP"`. Defaults to no
        compression.
      buffer_size: (Optional.) A `tf.int64` scalar denoting the number of bytes
        to buffer while reading files. Defaults to 4MB.
      header: (Optional.) A `tf.bool` scalar indicating whether the CSV file(s)
        have header line(s) that should be skipped when parsing. Defaults to
        `False`.
      field_delim: (Optional.) A `tf.string` scalar containing the delimiter
        character that separates fields in a record. Defaults to `","`.
      use_quote_delim: (Optional.) A `tf.bool` scalar. If `False`, treats double
        quotation marks as regular characters inside of string fields (ignoring
        RFC 4180, Section 2, Bullet 5). Defaults to `True`.
      na_value: (Optional.) A `tf.string` scalar indicating a value that will be
        treated as NA/NaN.
      select_cols: (Optional.) A sorted list of column indices to select from
        the input data. If specified, only this subset of columns will be
        parsed. Defaults to parsing all columns. At most one of `select_cols`
        and `exclude_cols` can be specified.
    N)r   r   r   r   )r   r5   r   r   rk   r9   r7   r8   r*   r   wrapped)r   r   r   r   <  s    QzCsvDatasetV1.__init__)NNFrz   Tr!   N)	r   r   r   r   	functoolswrapsr   r   r   r   r   )r   r   r   8  s         r   z/data.experimental.make_batched_features_datasetc                sz  dkrt jdkrd|dkr&d}|
dkr4tj}
tjj| ||	d}ttrdtt	j
rdtddkrpg tjkr|jfddd}t } |_||}nfd	d
}||}t|tjtjfkrtj|dd dd}t|||||	}|j||p|dkd}|tj||d} rl |krZtd  d|  d| fdd}||
}|S )a  Returns a `Dataset` of feature dictionaries from `Example` protos.

  If label_key argument is provided, returns a `Dataset` of tuple
  comprising of feature dictionaries and label.

  Example:

  ```
  serialized_examples = [
    features {
      feature { key: "age" value { int64_list { value: [ 0 ] } } }
      feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
      feature { key: "kws" value { bytes_list { value: [ "code", "art" ] } } }
    },
    features {
      feature { key: "age" value { int64_list { value: [] } } }
      feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
      feature { key: "kws" value { bytes_list { value: [ "sports" ] } } }
    }
  ]
  ```

  We can use arguments:

  ```
  features: {
    "age": FixedLenFeature([], dtype=tf.int64, default_value=-1),
    "gender": FixedLenFeature([], dtype=tf.string),
    "kws": VarLenFeature(dtype=tf.string),
  }
  ```

  And the expected output is:

  ```python
  {
    "age": [[0], [-1]],
    "gender": [["f"], ["f"]],
    "kws": SparseTensor(
      indices=[[0, 0], [0, 1], [1, 0]],
      values=["code", "art", "sports"]
      dense_shape=[2, 2]),
  }
  ```

  Args:
    file_pattern: List of files or patterns of file paths containing
      `Example` records. See `tf.io.gfile.glob` for pattern rules.
    batch_size: An int representing the number of records to combine
      in a single batch.
    features: A `dict` mapping feature keys to `FixedLenFeature` or
      `VarLenFeature` values. See `tf.io.parse_example`.
    reader: A function or class that can be
      called with a `filenames` tensor and (optional) `reader_args` and returns
      a `Dataset` of `Example` tensors. Defaults to `tf.data.TFRecordDataset`.
    label_key: (Optional) A string corresponding to the key labels are stored in
      `tf.Examples`. If provided, it must be one of the `features` key,
      otherwise results in `ValueError`.
    reader_args: Additional arguments to pass to the reader class.
    num_epochs: Integer specifying the number of times to read through the
      dataset. If None, cycles through the dataset forever. Defaults to `None`.
    shuffle: A boolean, indicates whether the input should be shuffled. Defaults
      to `True`.
    shuffle_buffer_size: Buffer size of the ShuffleDataset. A large capacity
      ensures better shuffling but would increase memory usage and startup time.
    shuffle_seed: Randomization seed to use for shuffling.
    prefetch_buffer_size: Number of feature batches to prefetch in order to
      improve performance. Recommended value is the number of batches consumed
      per training step. Defaults to auto-tune.
    reader_num_threads: Number of threads used to read `Example` records. If >1,
      the results will be interleaved. Defaults to `1`.
    parser_num_threads: Number of threads to use for parsing `Example` tensors
      into a dictionary of `Feature` tensors. Defaults to `2`.
    sloppy_ordering: If `True`, reading performance will be improved at
      the cost of non-deterministic ordering. If `False`, the order of elements
      produced is deterministic prior to shuffling (elements are still
      randomized if `shuffle=True`. Note that if the seed is set, then order
      of elements after shuffling is deterministic). Defaults to `False`.
    drop_final_batch: If `True`, and the batch size does not evenly divide the
      input dataset size, the final smaller batch will be dropped. Defaults to
      `False`.

  Returns:
    A dataset of `dict` elements, (or a tuple of `dict` elements and label).
    Each `dict` maps feature keys to `Tensor` or `SparseTensor` objects.

  Raises:
    TypeError: If `reader` is of the wrong type.
    ValueError: If `label_key` is not one of the `features` keys.
  Nr&      )r_   rg   zThe `reader` argument must return a `Dataset` object. `tf.ReaderBase` subclasses are not supported. For example, pass `tf.data.TFRecordDataset` instead of `tf.TFRecordReader`.c                s    | f S )Nr   )r   )r1   reader_argsr   r   r#     r$   z2make_batched_features_dataset_v2.<locals>.<lambda>)rj   c          	      s"   t j|  fdddd d dS )Nc                s    | f S )Nr   )r   )r1   r   r   r   r#   !  r$   zDmake_batched_features_dataset_v2.<locals>.apply_fn.<locals>.<lambda>r&   )r   r   r   r   r   )ro   r   )ra   )r1   r   reader_num_threadssloppy_orderingr   r   r     s    z2make_batched_features_dataset_v2.<locals>.apply_fnc             S   s   |S )Nr   )_r[   r   r   r   r#   .  r$   F)r   )ri   zThe `label_key` provided (z&) must be one of the `features` keys: r/   c                s   | |   fS )N)r   )r   )	label_keyr   r   r#   E  r$   )ro   rp   r   rl   rm   rn   rS   type
issubclassr   Z
ReaderBase	TypeErrorr   r   r   r   r   r   Zget_legacy_output_typesr   r'   r   re   rq   r   Zparse_example_datasetr   rV   rr   rs   )rt   ru   r   r1   r   r   rb   r_   rc   rd   rv   r   parser_num_threadsr   rw   ra   r   r   r   )r   r1   r   r   r   r    make_batched_features_dataset_v2  sR    j




r   c             C   s*   t t| |||||||||	|
||||S )N)r   r   r   )rt   ru   r   r1   r   r   rb   r_   rc   rd   rv   r   r   r   rw   r   r   r    make_batched_features_dataset_v1K  s
    r   c             C   sl   t | tr:| stdg }x,| D ]}|t| q W ntt| }|s\td|  d|sht|}|S )aO  Parse list of file names from pattern, optionally shuffled.

  Args:
    file_pattern: File glob pattern, or list of glob patterns.
    shuffle: Whether to shuffle the order of file names.

  Returns:
    List of file names matching `file_pattern`.

  Raises:
    ValueError: If `file_pattern` is empty, or pattern matches no files.
  z,Argument `file_pattern` should not be empty.zNo files match `file_pattern` r/   )rS   listr   extendr   ZGlobrW   )rt   r_   Z
file_namesentryr   r   r   r   d  s    

r   zdata.experimental.SqlDatasetc                   s,   e Zd ZdZ fddZedd Z  ZS )SqlDatasetV2a  A `Dataset` consisting of the results from a SQL query.

  `SqlDataset` allows a user to read data from the result set of a SQL query.
  For example:

  ```python
  dataset = tf.data.experimental.SqlDataset("sqlite", "/foo/bar.sqlite3",
                                            "SELECT name, age FROM people",
                                            (tf.string, tf.int32))
  # Prints the rows of the result set of the above query.
  for element in dataset:
    print(element)
  ```
  c                s|   t j|tjdd| _t j|tjdd| _t j|tjdd| _tdd || _	t
j| j| j| jf| j}tt| | dS )a  Creates a `SqlDataset`.

    Args:
      driver_name: A 0-D `tf.string` tensor containing the database type.
        Currently, the only supported value is 'sqlite'.
      data_source_name: A 0-D `tf.string` tensor containing a connection string
        to connect to the database.
      query: A 0-D `tf.string` tensor containing the SQL query to execute.
      output_types: A tuple of `tf.DType` objects representing the types of the
        columns returned by `query`.
    driver_name)rC   rL   data_source_namequeryc             S   s   t g | S )N)r   r   )rC   r   r   r   r#     r$   z'SqlDatasetV2.__init__.<locals>.<lambda>N)r   r   r   r'   Z_driver_nameZ_data_source_nameZ_queryr	   Zmap_structurer   r   Zsql_datasetZ_flat_structurer   r   r   )r   r   r   r   output_typesr   )r   r   r   r     s    zSqlDatasetV2.__init__c             C   s   | j S )N)r   )r   r   r   r   r     s    zSqlDatasetV2.element_spec)r   r   r   r   r   r   r   r   r   r   )r   r   r     s   r   c                   s,   e Zd ZdZeej fddZ  ZS )SqlDatasetV1z7A `Dataset` consisting of the results from a SQL query.c                s"   t ||||}tt| | d S )N)r   r   r   r   )r   r   r   r   r   r   )r   r   r   r     s    zSqlDatasetV1.__init__)	r   r   r   r   r   r   r   r   r   r   r   )r   r   r     s   r   )	NNTNNNNNF)NNNNrz   Tr!   TNTr{   NNNFr|   NFr}   )NNNNrz   Tr!   TNTr{   NNNFr|   NFr}   )NNNNTr{   NNNNFF)NNNNTr{   NNNNFF)Er   r   r0   r   r   numpyr   Ztensorflow.pythonr   Z'tensorflow.python.data.experimental.opsr   r   Ztensorflow.python.data.opsr   r   r   r   ro   Ztensorflow.python.data.utilr   r	   Ztensorflow.python.frameworkr
   r   r   r   r   Ztensorflow.python.lib.ior   Ztensorflow.python.opsr   r   Ztensorflow.python.platformr   Z tensorflow.python.util.tf_exportr   r"   r%   r   r   r'   r   r   r   r    r,   r?   rI   rM   r^   re   ry   r   r   r   ZDatasetSourcer   r   r   r   r   r   r   r   enabledr   Z
SqlDatasetZmake_batched_features_datasetZmake_csv_datasetr   r   r   r   <module>   s  	%"        
X
                    
                  

 
Z
            *
           
.
	
